VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATM.cpp@ 30572

Last change on this file since 30572 was 30572, checked in by vboxsync, 15 years ago

Removed PGMPhysGCPhys2R3Ptr usage from PATM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 247.2 KB
Line 
1/* $Id: PATM.cpp 30572 2010-07-02 11:52:02Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/patm.h>
25#include <VBox/stam.h>
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/cpumdis.h>
29#include <VBox/iom.h>
30#include <VBox/mm.h>
31#include <VBox/ssm.h>
32#include <VBox/trpm.h>
33#include <VBox/cfgm.h>
34#include <VBox/param.h>
35#include <VBox/selm.h>
36#include <iprt/avl.h>
37#include "PATMInternal.h"
38#include "PATMPatch.h"
39#include <VBox/vm.h>
40#include <VBox/csam.h>
41#include <VBox/dbg.h>
42#include <VBox/err.h>
43#include <VBox/log.h>
44#include <iprt/assert.h>
45#include <iprt/asm.h>
46#include <VBox/dis.h>
47#include <VBox/disopcode.h>
48#include <include/internal/pgm.h>
49
50#include <iprt/string.h>
51#include "PATMA.h"
52
53//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
54//#define PATM_DISABLE_ALL
55
56/*******************************************************************************
57* Internal Functions *
58*******************************************************************************/
59
60static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
61static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
62static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
63
64#ifdef LOG_ENABLED // keep gcc quiet
65static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
66#endif
67#ifdef VBOX_WITH_STATISTICS
68static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
69static void patmResetStat(PVM pVM, void *pvSample);
70static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
71#endif
72
73#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
74#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
75
76static int patmReinit(PVM pVM);
77static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
78
79#ifdef VBOX_WITH_DEBUGGER
80static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
81static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
82static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
83
84/** Command descriptors. */
85static const DBGCCMD g_aCmds[] =
86{
87 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
88 { "patmon", 0, 0, NULL, 0, NULL, 0, patmr3CmdOn, "", "Enable patching." },
89 { "patmoff", 0, 0, NULL, 0, NULL, 0, patmr3CmdOff, "", "Disable patching." },
90};
91#endif
92
93/* Don't want to break saved states, so put it here as a global variable. */
94static unsigned int cIDTHandlersDisabled = 0;
95
96/**
97 * Initializes the PATM.
98 *
99 * @returns VBox status code.
100 * @param pVM The VM to operate on.
101 */
102VMMR3DECL(int) PATMR3Init(PVM pVM)
103{
104 int rc;
105
106 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
107
108 /* These values can't change as they are hardcoded in patch code (old saved states!) */
109 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
110 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
111 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
112 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
113
114 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
115 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
116
117 /* Allocate patch memory and GC patch state memory. */
118 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
119 /* Add another page in case the generated code is much larger than expected. */
120 /** @todo bad safety precaution */
121 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
122 if (RT_FAILURE(rc))
123 {
124 Log(("MMHyperAlloc failed with %Rrc\n", rc));
125 return rc;
126 }
127 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
128
129 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
130 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
131 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
132
133 /*
134 * Hypervisor memory for GC status data (read/write)
135 *
136 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
137 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
138 *
139 */
140 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /** @note hardcoded dependencies on this exist. */
141 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
142 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
143
144 /* Hypervisor memory for patch statistics */
145 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
146 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
147
148 /* Memory for patch lookup trees. */
149 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
150 AssertRCReturn(rc, rc);
151 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
152
153#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
154 /* Check CFGM option. */
155 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
156 if (RT_FAILURE(rc))
157# ifdef PATM_DISABLE_ALL
158 pVM->fPATMEnabled = false;
159# else
160 pVM->fPATMEnabled = true;
161# endif
162#endif
163
164 rc = patmReinit(pVM);
165 AssertRC(rc);
166 if (RT_FAILURE(rc))
167 return rc;
168
169 /*
170 * Register save and load state notificators.
171 */
172 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
173 NULL, NULL, NULL,
174 NULL, patmR3Save, NULL,
175 NULL, patmR3Load, NULL);
176 AssertRCReturn(rc, rc);
177
178#ifdef VBOX_WITH_DEBUGGER
179 /*
180 * Debugger commands.
181 */
182 static bool s_fRegisteredCmds = false;
183 if (!s_fRegisteredCmds)
184 {
185 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
186 if (RT_SUCCESS(rc2))
187 s_fRegisteredCmds = true;
188 }
189#endif
190
191#ifdef VBOX_WITH_STATISTICS
192 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
193 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
194 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
195 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
196 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
197 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
198 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
199 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
200
201 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
202 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
203
204 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
205 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
206 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
207
208 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
209 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
210 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
211 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
212 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
213
214 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
215 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
216
217 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
218 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
219
220 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
221 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
222 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
223
224 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
225 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
226 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
227
228 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
229 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
230
231 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
232 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
233 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
234 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
235
236 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
237 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
238
239 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
240 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
241
242 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
243 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
244 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
245
246 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
247 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
248 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
249 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
250
251 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
252 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
253 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
254 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
255 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
256
257 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
258#endif /* VBOX_WITH_STATISTICS */
259
260 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
261 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
262 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
263 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
264 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
265 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
266 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
267 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
268
269 return rc;
270}
271
272/**
273 * Finalizes HMA page attributes.
274 *
275 * @returns VBox status code.
276 * @param pVM The VM handle.
277 */
278VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
279{
280 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
281 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
282 if (RT_FAILURE(rc))
283 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
284
285 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
286 if (RT_FAILURE(rc))
287 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
288
289 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
290 if (RT_FAILURE(rc))
291 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
292
293 return rc;
294}
295
296/**
297 * (Re)initializes PATM
298 *
299 * @param pVM The VM.
300 */
301static int patmReinit(PVM pVM)
302{
303 int rc;
304
305 /*
306 * Assert alignment and sizes.
307 */
308 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
309 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
310
311 /*
312 * Setup any fixed pointers and offsets.
313 */
314 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
315
316#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
317#ifndef PATM_DISABLE_ALL
318 pVM->fPATMEnabled = true;
319#endif
320#endif
321
322 Assert(pVM->patm.s.pGCStateHC);
323 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
324 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
325
326 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
327 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
328
329 Assert(pVM->patm.s.pGCStackHC);
330 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
331 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
332 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
333 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
334
335 Assert(pVM->patm.s.pStatsHC);
336 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
337 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
338
339 Assert(pVM->patm.s.pPatchMemHC);
340 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
341 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
342 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
343
344 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
345 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
346
347 Assert(pVM->patm.s.PatchLookupTreeHC);
348 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
349
350 /*
351 * (Re)Initialize PATM structure
352 */
353 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
354 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
355 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
356 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
357 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
358 pVM->patm.s.pvFaultMonitor = 0;
359 pVM->patm.s.deltaReloc = 0;
360
361 /* Lowest and highest patched instruction */
362 pVM->patm.s.pPatchedInstrGCLowest = ~0;
363 pVM->patm.s.pPatchedInstrGCHighest = 0;
364
365 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
366 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
367 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
368
369 pVM->patm.s.pfnSysEnterPatchGC = 0;
370 pVM->patm.s.pfnSysEnterGC = 0;
371
372 pVM->patm.s.fOutOfMemory = false;
373
374 pVM->patm.s.pfnHelperCallGC = 0;
375
376 /* Generate all global functions to be used by future patches. */
377 /* We generate a fake patch in order to use the existing code for relocation. */
378 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
379 if (RT_FAILURE(rc))
380 {
381 Log(("Out of memory!!!!\n"));
382 return VERR_NO_MEMORY;
383 }
384 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
385 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
386 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
387
388 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
389 AssertRC(rc);
390
391 /* Update free pointer in patch memory. */
392 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
393 /* Round to next 8 byte boundary. */
394 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
395 return rc;
396}
397
398
399/**
400 * Applies relocations to data and code managed by this
401 * component. This function will be called at init and
402 * whenever the VMM need to relocate it self inside the GC.
403 *
404 * The PATM will update the addresses used by the switcher.
405 *
406 * @param pVM The VM.
407 */
408VMMR3DECL(void) PATMR3Relocate(PVM pVM)
409{
410 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
411 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
412
413 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
414 if (delta)
415 {
416 PCPUMCTX pCtx;
417
418 /* Update CPUMCTX guest context pointer. */
419 pVM->patm.s.pCPUMCtxGC += delta;
420
421 pVM->patm.s.deltaReloc = delta;
422
423 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
424
425 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
426
427 /* If we are running patch code right now, then also adjust EIP. */
428 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
429 pCtx->eip += delta;
430
431 pVM->patm.s.pGCStateGC = GCPtrNew;
432 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
433
434 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
435
436 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
437
438 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
439
440 if (pVM->patm.s.pfnSysEnterPatchGC)
441 pVM->patm.s.pfnSysEnterPatchGC += delta;
442
443 /* Deal with the global patch functions. */
444 pVM->patm.s.pfnHelperCallGC += delta;
445 pVM->patm.s.pfnHelperRetGC += delta;
446 pVM->patm.s.pfnHelperIretGC += delta;
447 pVM->patm.s.pfnHelperJumpGC += delta;
448
449 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
450 }
451}
452
453
454/**
455 * Terminates the PATM.
456 *
457 * Termination means cleaning up and freeing all resources,
458 * the VM it self is at this point powered off or suspended.
459 *
460 * @returns VBox status code.
461 * @param pVM The VM to operate on.
462 */
463VMMR3DECL(int) PATMR3Term(PVM pVM)
464{
465 /* Memory was all allocated from the two MM heaps and requires no freeing. */
466 return VINF_SUCCESS;
467}
468
469
470/**
471 * PATM reset callback.
472 *
473 * @returns VBox status code.
474 * @param pVM The VM which is reset.
475 */
476VMMR3DECL(int) PATMR3Reset(PVM pVM)
477{
478 Log(("PATMR3Reset\n"));
479
480 /* Free all patches. */
481 while (true)
482 {
483 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
484 if (pPatchRec)
485 {
486 PATMRemovePatch(pVM, pPatchRec, true);
487 }
488 else
489 break;
490 }
491 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
492 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
493 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
494 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
495
496 int rc = patmReinit(pVM);
497 if (RT_SUCCESS(rc))
498 rc = PATMR3InitFinalize(pVM); /* paranoia */
499
500 return rc;
501}
502
503/**
504 * Read callback for disassembly function; supports reading bytes that cross a page boundary
505 *
506 * @returns VBox status code.
507 * @param pSrc GC source pointer
508 * @param pDest HC destination pointer
509 * @param size Number of bytes to read
510 * @param pvUserdata Callback specific user data (pCpu)
511 *
512 */
513int patmReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
514{
515 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
516 PATMDISASM *pDisInfo = (PATMDISASM *)pCpu->apvUserData[0];
517 int orgsize = size;
518
519 Assert(size);
520 if (size == 0)
521 return VERR_INVALID_PARAMETER;
522
523 /*
524 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
525 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
526 */
527 /** @todo could change in the future! */
528 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
529 {
530 for (int i=0;i<orgsize;i++)
531 {
532 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)pSrc, pDest);
533 if (RT_SUCCESS(rc))
534 {
535 pSrc++;
536 pDest++;
537 size--;
538 }
539 else break;
540 }
541 if (size == 0)
542 return VINF_SUCCESS;
543#ifdef VBOX_STRICT
544 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
545 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
546 {
547 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc, NULL) == false);
548 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc+size-1, NULL) == false);
549 }
550#endif
551 }
552
553 if ( !pDisInfo->pInstrHC
554 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(pSrc + size - 1)
555 && !PATMIsPatchGCAddr(pDisInfo->pVM, pSrc)))
556 {
557 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, pSrc));
558 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pDest, pSrc, size);
559 }
560 else
561 {
562 Assert(pDisInfo->pInstrHC);
563
564 uint8_t *pInstrHC = pDisInfo->pInstrHC;
565
566 Assert(pInstrHC);
567
568 /* pInstrHC is the base address; adjust according to the GC pointer. */
569 pInstrHC = pInstrHC + (pSrc - pDisInfo->pInstrGC);
570
571 memcpy(pDest, (void *)pInstrHC, size);
572 }
573
574 return VINF_SUCCESS;
575}
576
577/**
578 * Callback function for RTAvloU32DoWithAll
579 *
580 * Updates all fixups in the patches
581 *
582 * @returns VBox status code.
583 * @param pNode Current node
584 * @param pParam The VM to operate on.
585 */
586static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
587{
588 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
589 PVM pVM = (PVM)pParam;
590 RTRCINTPTR delta;
591#ifdef LOG_ENABLED
592 DISCPUSTATE cpu;
593 char szOutput[256];
594 uint32_t opsize;
595 bool disret;
596#endif
597 int rc;
598
599 /* Nothing to do if the patch is not active. */
600 if (pPatch->patch.uState == PATCH_REFUSED)
601 return 0;
602
603#ifdef LOG_ENABLED
604 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
605 {
606 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
607 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
608 Log(("Org patch jump: %s", szOutput));
609 }
610#endif
611
612 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
613 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
614
615 /*
616 * Apply fixups
617 */
618 PRELOCREC pRec = 0;
619 AVLPVKEY key = 0;
620
621 while (true)
622 {
623 /* Get the record that's closest from above */
624 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
625 if (pRec == 0)
626 break;
627
628 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
629
630 switch (pRec->uType)
631 {
632 case FIXUP_ABSOLUTE:
633 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
634 if ( !pRec->pSource
635 || PATMIsPatchGCAddr(pVM, pRec->pSource))
636 {
637 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
638 }
639 else
640 {
641 uint8_t curInstr[15];
642 uint8_t oldInstr[15];
643 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
644
645 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
646
647 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
648 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
649
650 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
651 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
652
653 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
654
655 if ( rc == VERR_PAGE_NOT_PRESENT
656 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
657 {
658 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
659
660 Log(("PATM: Patch page not present -> check later!\n"));
661 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
662 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
663 }
664 else
665 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
666 {
667 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
668 /*
669 * Disable patch; this is not a good solution
670 */
671 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
672 pPatch->patch.uState = PATCH_DISABLED;
673 }
674 else
675 if (RT_SUCCESS(rc))
676 {
677 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
678 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
679 AssertRC(rc);
680 }
681 }
682 break;
683
684 case FIXUP_REL_JMPTOPATCH:
685 {
686 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
687
688 if ( pPatch->patch.uState == PATCH_ENABLED
689 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
690 {
691 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
692 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
693 RTRCPTR pJumpOffGC;
694 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
695 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
696
697 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
698
699 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
700#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
701 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
702 {
703 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
704
705 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
706 oldJump[0] = pPatch->patch.aPrivInstr[0];
707 oldJump[1] = pPatch->patch.aPrivInstr[1];
708 *(RTRCUINTPTR *)&oldJump[2] = displOld;
709 }
710 else
711#endif
712 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
713 {
714 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
715 oldJump[0] = 0xE9;
716 *(RTRCUINTPTR *)&oldJump[1] = displOld;
717 }
718 else
719 {
720 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
721 continue; //this should never happen!!
722 }
723 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
724
725 /*
726 * Read old patch jump and compare it to the one we previously installed
727 */
728 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
729 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
730
731 if ( rc == VERR_PAGE_NOT_PRESENT
732 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
733 {
734 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
735
736 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
737 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
738 }
739 else
740 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
741 {
742 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
743 /*
744 * Disable patch; this is not a good solution
745 */
746 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
747 pPatch->patch.uState = PATCH_DISABLED;
748 }
749 else
750 if (RT_SUCCESS(rc))
751 {
752 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
753 AssertRC(rc);
754 }
755 else
756 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
757 }
758 else
759 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
760
761 pRec->pDest = pTarget;
762 break;
763 }
764
765 case FIXUP_REL_JMPTOGUEST:
766 {
767 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
768 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
769
770 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
771 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
772 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
773 pRec->pSource = pSource;
774 break;
775 }
776
777 default:
778 AssertMsg(0, ("Invalid fixup type!!\n"));
779 return VERR_INVALID_PARAMETER;
780 }
781 }
782
783#ifdef LOG_ENABLED
784 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
785 {
786 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
787 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
788 Log(("Rel patch jump: %s", szOutput));
789 }
790#endif
791 return 0;
792}
793
794/**
795 * \#PF Handler callback for virtual access handler ranges.
796 *
797 * Important to realize that a physical page in a range can have aliases, and
798 * for ALL and WRITE handlers these will also trigger.
799 *
800 * @returns VINF_SUCCESS if the handler have carried out the operation.
801 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
802 * @param pVM VM Handle.
803 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
804 * @param pvPtr The HC mapping of that address.
805 * @param pvBuf What the guest is reading/writing.
806 * @param cbBuf How much it's reading/writing.
807 * @param enmAccessType The access type.
808 * @param pvUser User argument.
809 */
810DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
811{
812 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
813 /** @todo could be the wrong virtual address (alias) */
814 pVM->patm.s.pvFaultMonitor = GCPtr;
815 PATMR3HandleMonitoredPage(pVM);
816 return VINF_PGM_HANDLER_DO_DEFAULT;
817}
818
819
820#ifdef VBOX_WITH_DEBUGGER
821/**
822 * Callback function for RTAvloU32DoWithAll
823 *
824 * Enables the patch that's being enumerated
825 *
826 * @returns 0 (continue enumeration).
827 * @param pNode Current node
828 * @param pVM The VM to operate on.
829 */
830static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
831{
832 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
833
834 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
835 return 0;
836}
837#endif /* VBOX_WITH_DEBUGGER */
838
839
840#ifdef VBOX_WITH_DEBUGGER
841/**
842 * Callback function for RTAvloU32DoWithAll
843 *
844 * Disables the patch that's being enumerated
845 *
846 * @returns 0 (continue enumeration).
847 * @param pNode Current node
848 * @param pVM The VM to operate on.
849 */
850static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
851{
852 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
853
854 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
855 return 0;
856}
857#endif
858
859/**
860 * Returns the host context pointer and size of the patch memory block
861 *
862 * @returns VBox status code.
863 * @param pVM The VM to operate on.
864 * @param pcb Size of the patch memory block
865 */
866VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
867{
868 if (pcb)
869 *pcb = pVM->patm.s.cbPatchMem;
870
871 return pVM->patm.s.pPatchMemHC;
872}
873
874
875/**
876 * Returns the guest context pointer and size of the patch memory block
877 *
878 * @returns VBox status code.
879 * @param pVM The VM to operate on.
880 * @param pcb Size of the patch memory block
881 */
882VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
883{
884 if (pcb)
885 *pcb = pVM->patm.s.cbPatchMem;
886
887 return pVM->patm.s.pPatchMemGC;
888}
889
890
891/**
892 * Returns the host context pointer of the GC context structure
893 *
894 * @returns VBox status code.
895 * @param pVM The VM to operate on.
896 */
897VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
898{
899 return pVM->patm.s.pGCStateHC;
900}
901
902
903/**
904 * Checks whether the HC address is part of our patch region
905 *
906 * @returns VBox status code.
907 * @param pVM The VM to operate on.
908 * @param pAddrGC Guest context address
909 */
910VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
911{
912 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
913}
914
915
916/**
917 * Allows or disallow patching of privileged instructions executed by the guest OS
918 *
919 * @returns VBox status code.
920 * @param pVM The VM to operate on.
921 * @param fAllowPatching Allow/disallow patching
922 */
923VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
924{
925 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
926 return VINF_SUCCESS;
927}
928
929/**
930 * Convert a GC patch block pointer to a HC patch pointer
931 *
932 * @returns HC pointer or NULL if it's not a GC patch pointer
933 * @param pVM The VM to operate on.
934 * @param pAddrGC GC pointer
935 */
936VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
937{
938 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
939 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
940 else
941 return NULL;
942}
943
944/**
945 * Query PATM state (enabled/disabled)
946 *
947 * @returns 0 - disabled, 1 - enabled
948 * @param pVM The VM to operate on.
949 */
950VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
951{
952 return pVM->fPATMEnabled;
953}
954
955
956/**
957 * Convert guest context address to host context pointer
958 *
959 * @returns VBox status code.
960 * @param pVM The VM to operate on.
961 * @param pCacheRec Address conversion cache record
962 * @param pGCPtr Guest context pointer
963 *
964 * @returns Host context pointer or NULL in case of an error
965 *
966 */
967R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
968{
969 int rc;
970 R3PTRTYPE(uint8_t *) pHCPtr;
971 uint32_t offset;
972
973 if (PATMIsPatchGCAddr(pVM, pGCPtr))
974 {
975 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
976 Assert(pPatch);
977 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
978 }
979
980 offset = pGCPtr & PAGE_OFFSET_MASK;
981 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
982 return pCacheRec->pPageLocStartHC + offset;
983
984 /* Release previous lock if any. */
985 if (pCacheRec->Lock.pvMap)
986 {
987 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
988 pCacheRec->Lock.pvMap = NULL;
989 }
990
991 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
992 if (rc != VINF_SUCCESS)
993 {
994 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
995 return NULL;
996 }
997 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
998 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
999 return pHCPtr;
1000}
1001
1002
1003/* Calculates and fills in all branch targets
1004 *
1005 * @returns VBox status code.
1006 * @param pVM The VM to operate on.
1007 * @param pPatch Current patch block pointer
1008 *
1009 */
1010static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1011{
1012 int32_t displ;
1013
1014 PJUMPREC pRec = 0;
1015 int nrJumpRecs = 0;
1016
1017 /*
1018 * Set all branch targets inside the patch block.
1019 * We remove all jump records as they are no longer needed afterwards.
1020 */
1021 while (true)
1022 {
1023 RCPTRTYPE(uint8_t *) pInstrGC;
1024 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1025
1026 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1027 if (pRec == 0)
1028 break;
1029
1030 nrJumpRecs++;
1031
1032 /* HC in patch block to GC in patch block. */
1033 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1034
1035 if (pRec->opcode == OP_CALL)
1036 {
1037 /* Special case: call function replacement patch from this patch block.
1038 */
1039 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1040 if (!pFunctionRec)
1041 {
1042 int rc;
1043
1044 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1045 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1046 else
1047 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1048
1049 if (RT_FAILURE(rc))
1050 {
1051 uint8_t *pPatchHC;
1052 RTRCPTR pPatchGC;
1053 RTRCPTR pOrgInstrGC;
1054
1055 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1056 Assert(pOrgInstrGC);
1057
1058 /* Failure for some reason -> mark exit point with int 3. */
1059 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1060
1061 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1062 Assert(pPatchGC);
1063
1064 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1065
1066 /* Set a breakpoint at the very beginning of the recompiled instruction */
1067 *pPatchHC = 0xCC;
1068
1069 continue;
1070 }
1071 }
1072 else
1073 {
1074 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1075 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1076 }
1077
1078 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1079 }
1080 else
1081 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1082
1083 if (pBranchTargetGC == 0)
1084 {
1085 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1086 return VERR_PATCHING_REFUSED;
1087 }
1088 /* Our jumps *always* have a dword displacement (to make things easier). */
1089 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1090 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1091 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1092 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1093 }
1094 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1095 Assert(pPatch->JumpTree == 0);
1096 return VINF_SUCCESS;
1097}
1098
1099/* Add an illegal instruction record
1100 *
1101 * @param pVM The VM to operate on.
1102 * @param pPatch Patch structure ptr
1103 * @param pInstrGC Guest context pointer to privileged instruction
1104 *
1105 */
1106static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1107{
1108 PAVLPVNODECORE pRec;
1109
1110 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1111 Assert(pRec);
1112 pRec->Key = (AVLPVKEY)pInstrGC;
1113
1114 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1115 Assert(ret); NOREF(ret);
1116 pPatch->pTempInfo->nrIllegalInstr++;
1117}
1118
1119static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1120{
1121 PAVLPVNODECORE pRec;
1122
1123 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)pInstrGC);
1124 if (pRec)
1125 return true;
1126 else
1127 return false;
1128}
1129
1130/**
1131 * Add a patch to guest lookup record
1132 *
1133 * @param pVM The VM to operate on.
1134 * @param pPatch Patch structure ptr
1135 * @param pPatchInstrHC Guest context pointer to patch block
1136 * @param pInstrGC Guest context pointer to privileged instruction
1137 * @param enmType Lookup type
1138 * @param fDirty Dirty flag
1139 *
1140 */
1141 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1142void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1143{
1144 bool ret;
1145 PRECPATCHTOGUEST pPatchToGuestRec;
1146 PRECGUESTTOPATCH pGuestToPatchRec;
1147 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1148
1149 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1150 {
1151 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1152 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1153 return; /* already there */
1154
1155 Assert(!pPatchToGuestRec);
1156 }
1157#ifdef VBOX_STRICT
1158 else
1159 {
1160 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1161 Assert(!pPatchToGuestRec);
1162 }
1163#endif
1164
1165 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1166 Assert(pPatchToGuestRec);
1167 pPatchToGuestRec->Core.Key = PatchOffset;
1168 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1169 pPatchToGuestRec->enmType = enmType;
1170 pPatchToGuestRec->fDirty = fDirty;
1171
1172 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1173 Assert(ret);
1174
1175 /* GC to patch address */
1176 if (enmType == PATM_LOOKUP_BOTHDIR)
1177 {
1178 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1179 if (!pGuestToPatchRec)
1180 {
1181 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1182 pGuestToPatchRec->Core.Key = pInstrGC;
1183 pGuestToPatchRec->PatchOffset = PatchOffset;
1184
1185 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1186 Assert(ret);
1187 }
1188 }
1189
1190 pPatch->nrPatch2GuestRecs++;
1191}
1192
1193
1194/**
1195 * Removes a patch to guest lookup record
1196 *
1197 * @param pVM The VM to operate on.
1198 * @param pPatch Patch structure ptr
1199 * @param pPatchInstrGC Guest context pointer to patch block
1200 */
1201void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1202{
1203 PAVLU32NODECORE pNode;
1204 PAVLU32NODECORE pNode2;
1205 PRECPATCHTOGUEST pPatchToGuestRec;
1206 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1207
1208 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1209 Assert(pPatchToGuestRec);
1210 if (pPatchToGuestRec)
1211 {
1212 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1213 {
1214 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1215
1216 Assert(pGuestToPatchRec->Core.Key);
1217 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1218 Assert(pNode2);
1219 }
1220 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1221 Assert(pNode);
1222
1223 MMR3HeapFree(pPatchToGuestRec);
1224 pPatch->nrPatch2GuestRecs--;
1225 }
1226}
1227
1228
1229/**
1230 * RTAvlPVDestroy callback.
1231 */
1232static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1233{
1234 MMR3HeapFree(pNode);
1235 return 0;
1236}
1237
1238/**
1239 * Empty the specified tree (PV tree, MMR3 heap)
1240 *
1241 * @param pVM The VM to operate on.
1242 * @param ppTree Tree to empty
1243 */
1244void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1245{
1246 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1247}
1248
1249
1250/**
1251 * RTAvlU32Destroy callback.
1252 */
1253static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1254{
1255 MMR3HeapFree(pNode);
1256 return 0;
1257}
1258
1259/**
1260 * Empty the specified tree (U32 tree, MMR3 heap)
1261 *
1262 * @param pVM The VM to operate on.
1263 * @param ppTree Tree to empty
1264 */
1265void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1266{
1267 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1268}
1269
1270
1271/**
1272 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1273 *
1274 * @returns VBox status code.
1275 * @param pVM The VM to operate on.
1276 * @param pCpu CPU disassembly state
1277 * @param pInstrGC Guest context pointer to privileged instruction
1278 * @param pCurInstrGC Guest context pointer to the current instruction
1279 * @param pCacheRec Cache record ptr
1280 *
1281 */
1282static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1283{
1284 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1285 bool fIllegalInstr = false;
1286
1287 //Preliminary heuristics:
1288 //- no call instructions without a fixed displacement between cli and sti/popf
1289 //- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1290 //- no nested pushf/cli
1291 //- sti/popf should be the (eventual) target of all branches
1292 //- no near or far returns; no int xx, no into
1293 //
1294 // Note: Later on we can impose less stricter guidelines if the need arises
1295
1296 /* Bail out if the patch gets too big. */
1297 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1298 {
1299 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1300 fIllegalInstr = true;
1301 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1302 }
1303 else
1304 {
1305 /* No unconditinal jumps or calls without fixed displacements. */
1306 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1307 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1308 )
1309 {
1310 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1311 if ( pCpu->param1.size == 6 /* far call/jmp */
1312 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1313 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1314 )
1315 {
1316 fIllegalInstr = true;
1317 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1318 }
1319 }
1320
1321 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1322 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1323 {
1324 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1325 {
1326 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1327 /* We turn this one into a int 3 callable patch. */
1328 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1329 }
1330 }
1331 else
1332 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1333 if (pPatch->opcode == OP_PUSHF)
1334 {
1335 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1336 {
1337 fIllegalInstr = true;
1338 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1339 }
1340 }
1341
1342 // no far returns
1343 if (pCpu->pCurInstr->opcode == OP_RETF)
1344 {
1345 pPatch->pTempInfo->nrRetInstr++;
1346 fIllegalInstr = true;
1347 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1348 }
1349 else
1350 // no int xx or into either
1351 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1352 {
1353 fIllegalInstr = true;
1354 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1355 }
1356 }
1357
1358 pPatch->cbPatchBlockSize += pCpu->opsize;
1359
1360 /* Illegal instruction -> end of analysis phase for this code block */
1361 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1362 return VINF_SUCCESS;
1363
1364 /* Check for exit points. */
1365 switch (pCpu->pCurInstr->opcode)
1366 {
1367 case OP_SYSEXIT:
1368 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1369
1370 case OP_SYSENTER:
1371 case OP_ILLUD2:
1372 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1373 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1374 return VINF_SUCCESS;
1375
1376 case OP_STI:
1377 case OP_POPF:
1378 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1379 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1380 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1381 {
1382 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1383 return VERR_PATCHING_REFUSED;
1384 }
1385 if (pPatch->opcode == OP_PUSHF)
1386 {
1387 if (pCpu->pCurInstr->opcode == OP_POPF)
1388 {
1389 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1390 return VINF_SUCCESS;
1391
1392 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1393 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1394 pPatch->flags |= PATMFL_CHECK_SIZE;
1395 }
1396 break; //sti doesn't mark the end of a pushf block; only popf does
1397 }
1398 //else no break
1399 case OP_RETN: /* exit point for function replacement */
1400 return VINF_SUCCESS;
1401
1402 case OP_IRET:
1403 return VINF_SUCCESS; /* exitpoint */
1404
1405 case OP_CPUID:
1406 case OP_CALL:
1407 case OP_JMP:
1408 break;
1409
1410 default:
1411 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1412 {
1413 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1414 return VINF_SUCCESS; /* exit point */
1415 }
1416 break;
1417 }
1418
1419 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1420 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1421 {
1422 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1423 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1424 return VINF_SUCCESS;
1425 }
1426
1427 return VWRN_CONTINUE_ANALYSIS;
1428}
1429
1430/**
1431 * Analyses the instructions inside a function for compliance
1432 *
1433 * @returns VBox status code.
1434 * @param pVM The VM to operate on.
1435 * @param pCpu CPU disassembly state
1436 * @param pInstrGC Guest context pointer to privileged instruction
1437 * @param pCurInstrGC Guest context pointer to the current instruction
1438 * @param pCacheRec Cache record ptr
1439 *
1440 */
1441static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1442{
1443 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1444 bool fIllegalInstr = false;
1445
1446 //Preliminary heuristics:
1447 //- no call instructions
1448 //- ret ends a block
1449
1450 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1451
1452 // bail out if the patch gets too big
1453 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1454 {
1455 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1456 fIllegalInstr = true;
1457 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1458 }
1459 else
1460 {
1461 // no unconditinal jumps or calls without fixed displacements
1462 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1463 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1464 )
1465 {
1466 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1467 if ( pCpu->param1.size == 6 /* far call/jmp */
1468 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1469 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1470 )
1471 {
1472 fIllegalInstr = true;
1473 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1474 }
1475 }
1476 else /* no far returns */
1477 if (pCpu->pCurInstr->opcode == OP_RETF)
1478 {
1479 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1480 fIllegalInstr = true;
1481 }
1482 else /* no int xx or into either */
1483 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1484 {
1485 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1486 fIllegalInstr = true;
1487 }
1488
1489 #if 0
1490 ///@todo we can handle certain in/out and privileged instructions in the guest context
1491 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1492 {
1493 Log(("Illegal instructions for function patch!!\n"));
1494 return VERR_PATCHING_REFUSED;
1495 }
1496 #endif
1497 }
1498
1499 pPatch->cbPatchBlockSize += pCpu->opsize;
1500
1501 /* Illegal instruction -> end of analysis phase for this code block */
1502 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1503 {
1504 return VINF_SUCCESS;
1505 }
1506
1507 // Check for exit points
1508 switch (pCpu->pCurInstr->opcode)
1509 {
1510 case OP_ILLUD2:
1511 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1512 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1513 return VINF_SUCCESS;
1514
1515 case OP_IRET:
1516 case OP_SYSEXIT: /* will fault or emulated in GC */
1517 case OP_RETN:
1518 return VINF_SUCCESS;
1519
1520 case OP_POPF:
1521 case OP_STI:
1522 return VWRN_CONTINUE_ANALYSIS;
1523 default:
1524 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1525 {
1526 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1527 return VINF_SUCCESS; /* exit point */
1528 }
1529 return VWRN_CONTINUE_ANALYSIS;
1530 }
1531
1532 return VWRN_CONTINUE_ANALYSIS;
1533}
1534
1535/**
1536 * Recompiles the instructions in a code block
1537 *
1538 * @returns VBox status code.
1539 * @param pVM The VM to operate on.
1540 * @param pCpu CPU disassembly state
1541 * @param pInstrGC Guest context pointer to privileged instruction
1542 * @param pCurInstrGC Guest context pointer to the current instruction
1543 * @param pCacheRec Cache record ptr
1544 *
1545 */
1546static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1547{
1548 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1549 int rc = VINF_SUCCESS;
1550 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1551
1552 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1553
1554 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1555 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1556 {
1557 /*
1558 * Been there, done that; so insert a jump (we don't want to duplicate code)
1559 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1560 */
1561 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1562 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1563 }
1564
1565 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1566 {
1567 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1568 }
1569 else
1570 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1571
1572 if (RT_FAILURE(rc))
1573 return rc;
1574
1575 /** @note Never do a direct return unless a failure is encountered! */
1576
1577 /* Clear recompilation of next instruction flag; we are doing that right here. */
1578 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1579 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1580
1581 /* Add lookup record for patch to guest address translation */
1582 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1583
1584 /* Update lowest and highest instruction address for this patch */
1585 if (pCurInstrGC < pPatch->pInstrGCLowest)
1586 pPatch->pInstrGCLowest = pCurInstrGC;
1587 else
1588 if (pCurInstrGC > pPatch->pInstrGCHighest)
1589 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1590
1591 /* Illegal instruction -> end of recompile phase for this code block. */
1592 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1593 {
1594 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1595 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1596 goto end;
1597 }
1598
1599 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1600 * Indirect calls are handled below.
1601 */
1602 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1603 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1604 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1605 {
1606 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1607 if (pTargetGC == 0)
1608 {
1609 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1610 return VERR_PATCHING_REFUSED;
1611 }
1612
1613 if (pCpu->pCurInstr->opcode == OP_CALL)
1614 {
1615 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1616 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1617 if (RT_FAILURE(rc))
1618 goto end;
1619 }
1620 else
1621 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1622
1623 if (RT_SUCCESS(rc))
1624 rc = VWRN_CONTINUE_RECOMPILE;
1625
1626 goto end;
1627 }
1628
1629 switch (pCpu->pCurInstr->opcode)
1630 {
1631 case OP_CLI:
1632 {
1633 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1634 * until we've found the proper exit point(s).
1635 */
1636 if ( pCurInstrGC != pInstrGC
1637 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1638 )
1639 {
1640 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1641 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1642 }
1643 /* Set by irq inhibition; no longer valid now. */
1644 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1645
1646 rc = patmPatchGenCli(pVM, pPatch);
1647 if (RT_SUCCESS(rc))
1648 rc = VWRN_CONTINUE_RECOMPILE;
1649 break;
1650 }
1651
1652 case OP_MOV:
1653 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1654 {
1655 /* mov ss, src? */
1656 if ( (pCpu->param1.flags & USE_REG_SEG)
1657 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1658 {
1659 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1660 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1661 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1662 }
1663#if 0 /* necessary for Haiku */
1664 else
1665 if ( (pCpu->param2.flags & USE_REG_SEG)
1666 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1667 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1668 {
1669 /* mov GPR, ss */
1670 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1671 if (RT_SUCCESS(rc))
1672 rc = VWRN_CONTINUE_RECOMPILE;
1673 break;
1674 }
1675#endif
1676 }
1677 goto duplicate_instr;
1678
1679 case OP_POP:
1680 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1681 {
1682 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1683
1684 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1685 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1686 }
1687 goto duplicate_instr;
1688
1689 case OP_STI:
1690 {
1691 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1692
1693 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1694 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1695 {
1696 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1697 fInhibitIRQInstr = true;
1698 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1699 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1700 }
1701 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1702
1703 if (RT_SUCCESS(rc))
1704 {
1705 DISCPUSTATE cpu = *pCpu;
1706 unsigned opsize;
1707 int disret;
1708 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1709
1710 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1711
1712 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1713 { /* Force pNextInstrHC out of scope after using it */
1714 uint8_t *pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1715 if (pNextInstrHC == NULL)
1716 {
1717 AssertFailed();
1718 return VERR_PATCHING_REFUSED;
1719 }
1720
1721 // Disassemble the next instruction
1722 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1723 }
1724 if (disret == false)
1725 {
1726 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1727 return VERR_PATCHING_REFUSED;
1728 }
1729 pReturnInstrGC = pNextInstrGC + opsize;
1730
1731 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1732 || pReturnInstrGC <= pInstrGC
1733 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1734 )
1735 {
1736 /* Not an exit point for function duplication patches */
1737 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1738 && RT_SUCCESS(rc))
1739 {
1740 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1741 rc = VWRN_CONTINUE_RECOMPILE;
1742 }
1743 else
1744 rc = VINF_SUCCESS; //exit point
1745 }
1746 else {
1747 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1748 rc = VERR_PATCHING_REFUSED; //not allowed!!
1749 }
1750 }
1751 break;
1752 }
1753
1754 case OP_POPF:
1755 {
1756 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1757
1758 /* Not an exit point for IDT handler or function replacement patches */
1759 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1760 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1761 fGenerateJmpBack = false;
1762
1763 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1764 if (RT_SUCCESS(rc))
1765 {
1766 if (fGenerateJmpBack == false)
1767 {
1768 /* Not an exit point for IDT handler or function replacement patches */
1769 rc = VWRN_CONTINUE_RECOMPILE;
1770 }
1771 else
1772 {
1773 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1774 rc = VINF_SUCCESS; /* exit point! */
1775 }
1776 }
1777 break;
1778 }
1779
1780 case OP_PUSHF:
1781 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1782 if (RT_SUCCESS(rc))
1783 rc = VWRN_CONTINUE_RECOMPILE;
1784 break;
1785
1786 case OP_PUSH:
1787 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1788 {
1789 rc = patmPatchGenPushCS(pVM, pPatch);
1790 if (RT_SUCCESS(rc))
1791 rc = VWRN_CONTINUE_RECOMPILE;
1792 break;
1793 }
1794 goto duplicate_instr;
1795
1796 case OP_IRET:
1797 Log(("IRET at %RRv\n", pCurInstrGC));
1798 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1799 if (RT_SUCCESS(rc))
1800 {
1801 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1802 rc = VINF_SUCCESS; /* exit point by definition */
1803 }
1804 break;
1805
1806 case OP_ILLUD2:
1807 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1808 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1809 if (RT_SUCCESS(rc))
1810 rc = VINF_SUCCESS; /* exit point by definition */
1811 Log(("Illegal opcode (0xf 0xb)\n"));
1812 break;
1813
1814 case OP_CPUID:
1815 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1816 if (RT_SUCCESS(rc))
1817 rc = VWRN_CONTINUE_RECOMPILE;
1818 break;
1819
1820 case OP_STR:
1821 case OP_SLDT:
1822 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1823 if (RT_SUCCESS(rc))
1824 rc = VWRN_CONTINUE_RECOMPILE;
1825 break;
1826
1827 case OP_SGDT:
1828 case OP_SIDT:
1829 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1830 if (RT_SUCCESS(rc))
1831 rc = VWRN_CONTINUE_RECOMPILE;
1832 break;
1833
1834 case OP_RETN:
1835 /* retn is an exit point for function patches */
1836 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1837 if (RT_SUCCESS(rc))
1838 rc = VINF_SUCCESS; /* exit point by definition */
1839 break;
1840
1841 case OP_SYSEXIT:
1842 /* Duplicate it, so it can be emulated in GC (or fault). */
1843 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1844 if (RT_SUCCESS(rc))
1845 rc = VINF_SUCCESS; /* exit point by definition */
1846 break;
1847
1848 case OP_CALL:
1849 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1850 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1851 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1852 */
1853 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1854 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1855 {
1856 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1857 if (RT_SUCCESS(rc))
1858 {
1859 rc = VWRN_CONTINUE_RECOMPILE;
1860 }
1861 break;
1862 }
1863 goto gen_illegal_instr;
1864
1865 case OP_JMP:
1866 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1867 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1868 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1869 */
1870 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1871 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1872 {
1873 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1874 if (RT_SUCCESS(rc))
1875 rc = VINF_SUCCESS; /* end of branch */
1876 break;
1877 }
1878 goto gen_illegal_instr;
1879
1880 case OP_INT3:
1881 case OP_INT:
1882 case OP_INTO:
1883 goto gen_illegal_instr;
1884
1885 case OP_MOV_DR:
1886 /** @note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1887 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1888 {
1889 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1890 if (RT_SUCCESS(rc))
1891 rc = VWRN_CONTINUE_RECOMPILE;
1892 break;
1893 }
1894 goto duplicate_instr;
1895
1896 case OP_MOV_CR:
1897 /** @note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1898 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1899 {
1900 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1901 if (RT_SUCCESS(rc))
1902 rc = VWRN_CONTINUE_RECOMPILE;
1903 break;
1904 }
1905 goto duplicate_instr;
1906
1907 default:
1908 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1909 {
1910gen_illegal_instr:
1911 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1912 if (RT_SUCCESS(rc))
1913 rc = VINF_SUCCESS; /* exit point by definition */
1914 }
1915 else
1916 {
1917duplicate_instr:
1918 Log(("patmPatchGenDuplicate\n"));
1919 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1920 if (RT_SUCCESS(rc))
1921 rc = VWRN_CONTINUE_RECOMPILE;
1922 }
1923 break;
1924 }
1925
1926end:
1927
1928 if ( !fInhibitIRQInstr
1929 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1930 {
1931 int rc2;
1932 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1933
1934 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1935 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1936 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1937 {
1938 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1939
1940 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1941 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1942 rc = VINF_SUCCESS; /* end of the line */
1943 }
1944 else
1945 {
1946 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
1947 }
1948 if (RT_FAILURE(rc2))
1949 rc = rc2;
1950 }
1951
1952 if (RT_SUCCESS(rc))
1953 {
1954 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1955 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
1956 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
1957 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
1958 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
1959 )
1960 {
1961 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1962
1963 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1964 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
1965
1966 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
1967 AssertRC(rc);
1968 }
1969 }
1970 return rc;
1971}
1972
1973
1974#ifdef LOG_ENABLED
1975
1976/* Add a disasm jump record (temporary for prevent duplicate analysis)
1977 *
1978 * @param pVM The VM to operate on.
1979 * @param pPatch Patch structure ptr
1980 * @param pInstrGC Guest context pointer to privileged instruction
1981 *
1982 */
1983static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1984{
1985 PAVLPVNODECORE pRec;
1986
1987 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1988 Assert(pRec);
1989 pRec->Key = (AVLPVKEY)pInstrGC;
1990
1991 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
1992 Assert(ret);
1993}
1994
1995/**
1996 * Checks if jump target has been analysed before.
1997 *
1998 * @returns VBox status code.
1999 * @param pPatch Patch struct
2000 * @param pInstrGC Jump target
2001 *
2002 */
2003static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2004{
2005 PAVLPVNODECORE pRec;
2006
2007 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)pInstrGC);
2008 if (pRec)
2009 return true;
2010 return false;
2011}
2012
2013/**
2014 * For proper disassembly of the final patch block
2015 *
2016 * @returns VBox status code.
2017 * @param pVM The VM to operate on.
2018 * @param pCpu CPU disassembly state
2019 * @param pInstrGC Guest context pointer to privileged instruction
2020 * @param pCurInstrGC Guest context pointer to the current instruction
2021 * @param pCacheRec Cache record ptr
2022 *
2023 */
2024int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2025{
2026 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2027
2028 if (pCpu->pCurInstr->opcode == OP_INT3)
2029 {
2030 /* Could be an int3 inserted in a call patch. Check to be sure */
2031 DISCPUSTATE cpu;
2032 RTRCPTR pOrgJumpGC;
2033 uint32_t dummy;
2034
2035 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2036 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2037
2038 { /* Force pOrgJumpHC out of scope after using it */
2039 uint8_t *pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2040
2041 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2042 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2043 return VINF_SUCCESS;
2044 }
2045 return VWRN_CONTINUE_ANALYSIS;
2046 }
2047
2048 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2049 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2050 {
2051 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2052 return VWRN_CONTINUE_ANALYSIS;
2053 }
2054
2055 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2056 || pCpu->pCurInstr->opcode == OP_INT
2057 || pCpu->pCurInstr->opcode == OP_IRET
2058 || pCpu->pCurInstr->opcode == OP_RETN
2059 || pCpu->pCurInstr->opcode == OP_RETF
2060 )
2061 {
2062 return VINF_SUCCESS;
2063 }
2064
2065 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2066 return VINF_SUCCESS;
2067
2068 return VWRN_CONTINUE_ANALYSIS;
2069}
2070
2071
2072/**
2073 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2074 *
2075 * @returns VBox status code.
2076 * @param pVM The VM to operate on.
2077 * @param pInstrGC Guest context pointer to the initial privileged instruction
2078 * @param pCurInstrGC Guest context pointer to the current instruction
2079 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2080 * @param pCacheRec Cache record ptr
2081 *
2082 */
2083int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2084{
2085 DISCPUSTATE cpu;
2086 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2087 int rc = VWRN_CONTINUE_ANALYSIS;
2088 uint32_t opsize, delta;
2089 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2090 bool disret;
2091 char szOutput[256];
2092
2093 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2094
2095 /* We need this to determine branch targets (and for disassembling). */
2096 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2097
2098 while(rc == VWRN_CONTINUE_ANALYSIS)
2099 {
2100 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2101
2102 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2103 if (pCurInstrHC == NULL)
2104 {
2105 rc = VERR_PATCHING_REFUSED;
2106 goto end;
2107 }
2108
2109 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2110 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2111 {
2112 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2113
2114 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2115 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2116 else
2117 Log(("DIS %s", szOutput));
2118
2119 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2120 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2121 {
2122 rc = VINF_SUCCESS;
2123 goto end;
2124 }
2125 }
2126 else
2127 Log(("DIS: %s", szOutput));
2128
2129 if (disret == false)
2130 {
2131 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2132 rc = VINF_SUCCESS;
2133 goto end;
2134 }
2135
2136 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2137 if (rc != VWRN_CONTINUE_ANALYSIS) {
2138 break; //done!
2139 }
2140
2141 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2142 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2143 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2144 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2145 )
2146 {
2147 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2148 RTRCPTR pOrgTargetGC;
2149
2150 if (pTargetGC == 0)
2151 {
2152 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2153 rc = VERR_PATCHING_REFUSED;
2154 break;
2155 }
2156
2157 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2158 {
2159 //jump back to guest code
2160 rc = VINF_SUCCESS;
2161 goto end;
2162 }
2163 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2164
2165 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2166 {
2167 rc = VINF_SUCCESS;
2168 goto end;
2169 }
2170
2171 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2172 {
2173 /* New jump, let's check it. */
2174 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2175
2176 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2177 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2178 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2179
2180 if (rc != VINF_SUCCESS) {
2181 break; //done!
2182 }
2183 }
2184 if (cpu.pCurInstr->opcode == OP_JMP)
2185 {
2186 /* Unconditional jump; return to caller. */
2187 rc = VINF_SUCCESS;
2188 goto end;
2189 }
2190
2191 rc = VWRN_CONTINUE_ANALYSIS;
2192 }
2193 pCurInstrGC += opsize;
2194 }
2195end:
2196 return rc;
2197}
2198
2199/**
2200 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2201 *
2202 * @returns VBox status code.
2203 * @param pVM The VM to operate on.
2204 * @param pInstrGC Guest context pointer to the initial privileged instruction
2205 * @param pCurInstrGC Guest context pointer to the current instruction
2206 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2207 * @param pCacheRec Cache record ptr
2208 *
2209 */
2210int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2211{
2212 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2213
2214 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2215 /* Free all disasm jump records. */
2216 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2217 return rc;
2218}
2219
2220#endif /* LOG_ENABLED */
2221
2222/**
2223 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2224 * If so, this patch is permanently disabled.
2225 *
2226 * @param pVM The VM to operate on.
2227 * @param pInstrGC Guest context pointer to instruction
2228 * @param pConflictGC Guest context pointer to check
2229 *
2230 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2231 *
2232 */
2233VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2234{
2235 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2236 if (pTargetPatch)
2237 {
2238 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2239 }
2240 return VERR_PATCH_NO_CONFLICT;
2241}
2242
2243/**
2244 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2245 *
2246 * @returns VBox status code.
2247 * @param pVM The VM to operate on.
2248 * @param pInstrGC Guest context pointer to privileged instruction
2249 * @param pCurInstrGC Guest context pointer to the current instruction
2250 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2251 * @param pCacheRec Cache record ptr
2252 *
2253 */
2254static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2255{
2256 DISCPUSTATE cpu;
2257 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2258 int rc = VWRN_CONTINUE_ANALYSIS;
2259 uint32_t opsize;
2260 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2261 bool disret;
2262#ifdef LOG_ENABLED
2263 char szOutput[256];
2264#endif
2265
2266 while (rc == VWRN_CONTINUE_RECOMPILE)
2267 {
2268 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2269
2270 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2271 if (pCurInstrHC == NULL)
2272 {
2273 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2274 goto end;
2275 }
2276#ifdef LOG_ENABLED
2277 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2278 Log(("Recompile: %s", szOutput));
2279#else
2280 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2281#endif
2282 if (disret == false)
2283 {
2284 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2285
2286 /* Add lookup record for patch to guest address translation */
2287 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2288 patmPatchGenIllegalInstr(pVM, pPatch);
2289 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2290 goto end;
2291 }
2292
2293 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2294 if (rc != VWRN_CONTINUE_RECOMPILE)
2295 {
2296 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2297 if ( rc == VINF_SUCCESS
2298 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2299 {
2300 DISCPUSTATE cpunext;
2301 uint32_t opsizenext;
2302 uint8_t *pNextInstrHC;
2303 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2304
2305 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2306
2307 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2308 * Recompile the next instruction as well
2309 */
2310 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2311 if (pNextInstrHC == NULL)
2312 {
2313 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2314 goto end;
2315 }
2316 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2317 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2318 if (disret == false)
2319 {
2320 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2321 goto end;
2322 }
2323 switch(cpunext.pCurInstr->opcode)
2324 {
2325 case OP_IRET: /* inhibit cleared in generated code */
2326 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2327 case OP_HLT:
2328 break; /* recompile these */
2329
2330 default:
2331 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2332 {
2333 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2334
2335 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2336 AssertRC(rc);
2337 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2338 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2339 }
2340 break;
2341 }
2342
2343 /** @note after a cli we must continue to a proper exit point */
2344 if (cpunext.pCurInstr->opcode != OP_CLI)
2345 {
2346 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2347 if (RT_SUCCESS(rc))
2348 {
2349 rc = VINF_SUCCESS;
2350 goto end;
2351 }
2352 break;
2353 }
2354 else
2355 rc = VWRN_CONTINUE_RECOMPILE;
2356 }
2357 else
2358 break; /* done! */
2359 }
2360
2361 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2362
2363
2364 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2365 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2366 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2367 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2368 )
2369 {
2370 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2371 if (addr == 0)
2372 {
2373 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2374 rc = VERR_PATCHING_REFUSED;
2375 break;
2376 }
2377
2378 Log(("Jump encountered target %RRv\n", addr));
2379
2380 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2381 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2382 {
2383 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2384 /* First we need to finish this linear code stream until the next exit point. */
2385 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pCacheRec);
2386 if (RT_FAILURE(rc))
2387 {
2388 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2389 break; //fatal error
2390 }
2391 }
2392
2393 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2394 {
2395 /* New code; let's recompile it. */
2396 Log(("patmRecompileCodeStream continue with jump\n"));
2397
2398 /*
2399 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2400 * this patch so we can continue our analysis
2401 *
2402 * We rely on CSAM to detect and resolve conflicts
2403 */
2404 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2405 if(pTargetPatch)
2406 {
2407 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2408 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2409 }
2410
2411 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2412 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2413 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2414
2415 if(pTargetPatch)
2416 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2417
2418 if (RT_FAILURE(rc))
2419 {
2420 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2421 break; //done!
2422 }
2423 }
2424 /* Always return to caller here; we're done! */
2425 rc = VINF_SUCCESS;
2426 goto end;
2427 }
2428 else
2429 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2430 {
2431 rc = VINF_SUCCESS;
2432 goto end;
2433 }
2434 pCurInstrGC += opsize;
2435 }
2436end:
2437 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2438 return rc;
2439}
2440
2441
2442/**
2443 * Generate the jump from guest to patch code
2444 *
2445 * @returns VBox status code.
2446 * @param pVM The VM to operate on.
2447 * @param pPatch Patch record
2448 * @param pCacheRec Guest translation lookup cache record
2449 */
2450static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2451{
2452 uint8_t temp[8];
2453 uint8_t *pPB;
2454 int rc;
2455
2456 Assert(pPatch->cbPatchJump <= sizeof(temp));
2457 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2458
2459 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2460 Assert(pPB);
2461
2462#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2463 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2464 {
2465 Assert(pPatch->pPatchJumpDestGC);
2466
2467 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2468 {
2469 // jmp [PatchCode]
2470 if (fAddFixup)
2471 {
2472 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2473 {
2474 Log(("Relocation failed for the jump in the guest code!!\n"));
2475 return VERR_PATCHING_REFUSED;
2476 }
2477 }
2478
2479 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2480 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2481 }
2482 else
2483 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2484 {
2485 // jmp [PatchCode]
2486 if (fAddFixup)
2487 {
2488 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2489 {
2490 Log(("Relocation failed for the jump in the guest code!!\n"));
2491 return VERR_PATCHING_REFUSED;
2492 }
2493 }
2494
2495 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2496 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2497 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2498 }
2499 else
2500 {
2501 Assert(0);
2502 return VERR_PATCHING_REFUSED;
2503 }
2504 }
2505 else
2506#endif
2507 {
2508 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2509
2510 // jmp [PatchCode]
2511 if (fAddFixup)
2512 {
2513 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2514 {
2515 Log(("Relocation failed for the jump in the guest code!!\n"));
2516 return VERR_PATCHING_REFUSED;
2517 }
2518 }
2519 temp[0] = 0xE9; //jmp
2520 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2521 }
2522 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2523 AssertRC(rc);
2524
2525 if (rc == VINF_SUCCESS)
2526 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2527
2528 return rc;
2529}
2530
2531/**
2532 * Remove the jump from guest to patch code
2533 *
2534 * @returns VBox status code.
2535 * @param pVM The VM to operate on.
2536 * @param pPatch Patch record
2537 */
2538static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2539{
2540#ifdef DEBUG
2541 DISCPUSTATE cpu;
2542 char szOutput[256];
2543 uint32_t opsize, i = 0;
2544 bool disret;
2545
2546 while (i < pPatch->cbPrivInstr)
2547 {
2548 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2549 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2550 if (disret == false)
2551 break;
2552
2553 Log(("Org patch jump: %s", szOutput));
2554 Assert(opsize);
2555 i += opsize;
2556 }
2557#endif
2558
2559 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2560 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2561#ifdef DEBUG
2562 if (rc == VINF_SUCCESS)
2563 {
2564 i = 0;
2565 while(i < pPatch->cbPrivInstr)
2566 {
2567 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2568 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2569 if (disret == false)
2570 break;
2571
2572 Log(("Org instr: %s", szOutput));
2573 Assert(opsize);
2574 i += opsize;
2575 }
2576 }
2577#endif
2578 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2579 return rc;
2580}
2581
2582/**
2583 * Generate the call from guest to patch code
2584 *
2585 * @returns VBox status code.
2586 * @param pVM The VM to operate on.
2587 * @param pPatch Patch record
2588 * @param pInstrHC HC address where to insert the jump
2589 * @param pCacheRec Guest translation cache record
2590 */
2591static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2592{
2593 uint8_t temp[8];
2594 uint8_t *pPB;
2595 int rc;
2596
2597 Assert(pPatch->cbPatchJump <= sizeof(temp));
2598
2599 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2600 Assert(pPB);
2601
2602 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2603
2604 // jmp [PatchCode]
2605 if (fAddFixup)
2606 {
2607 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2608 {
2609 Log(("Relocation failed for the jump in the guest code!!\n"));
2610 return VERR_PATCHING_REFUSED;
2611 }
2612 }
2613
2614 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2615 temp[0] = pPatch->aPrivInstr[0];
2616 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2617
2618 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2619 AssertRC(rc);
2620
2621 return rc;
2622}
2623
2624
2625/**
2626 * Patch cli/sti pushf/popf instruction block at specified location
2627 *
2628 * @returns VBox status code.
2629 * @param pVM The VM to operate on.
2630 * @param pInstrGC Guest context point to privileged instruction
2631 * @param pInstrHC Host context point to privileged instruction
2632 * @param uOpcode Instruction opcode
2633 * @param uOpSize Size of starting instruction
2634 * @param pPatchRec Patch record
2635 *
2636 * @note returns failure if patching is not allowed or possible
2637 *
2638 */
2639VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2640 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2641{
2642 PPATCHINFO pPatch = &pPatchRec->patch;
2643 int rc = VERR_PATCHING_REFUSED;
2644 DISCPUSTATE cpu;
2645 uint32_t orgOffsetPatchMem = ~0;
2646 RTRCPTR pInstrStart;
2647#ifdef LOG_ENABLED
2648 uint32_t opsize;
2649 char szOutput[256];
2650 bool disret;
2651#endif
2652
2653 /* Save original offset (in case of failures later on) */
2654 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2655 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2656
2657 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2658 switch (uOpcode)
2659 {
2660 case OP_MOV:
2661 break;
2662
2663 case OP_CLI:
2664 case OP_PUSHF:
2665 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2666 /** @note special precautions are taken when disabling and enabling such patches. */
2667 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2668 break;
2669
2670 default:
2671 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2672 {
2673 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2674 return VERR_INVALID_PARAMETER;
2675 }
2676 }
2677
2678 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2679 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2680
2681 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2682 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2683 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2684 )
2685 {
2686 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2687 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2688 rc = VERR_PATCHING_REFUSED;
2689 goto failure;
2690 }
2691
2692 pPatch->nrPatch2GuestRecs = 0;
2693 pInstrStart = pInstrGC;
2694
2695#ifdef PATM_ENABLE_CALL
2696 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2697#endif
2698
2699 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2700 pPatch->uCurPatchOffset = 0;
2701
2702 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2703
2704 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2705 {
2706 Assert(pPatch->flags & PATMFL_INTHANDLER);
2707
2708 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2709 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2710 if (RT_FAILURE(rc))
2711 goto failure;
2712 }
2713
2714 /***************************************************************************************************************************/
2715 /** @note We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2716 /***************************************************************************************************************************/
2717#ifdef VBOX_WITH_STATISTICS
2718 if (!(pPatch->flags & PATMFL_SYSENTER))
2719 {
2720 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2721 if (RT_FAILURE(rc))
2722 goto failure;
2723 }
2724#endif
2725
2726 PATMP2GLOOKUPREC cacheRec;
2727 RT_ZERO(cacheRec);
2728 cacheRec.pPatch = pPatch;
2729
2730 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2731 /* Free leftover lock if any. */
2732 if (cacheRec.Lock.pvMap)
2733 {
2734 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2735 cacheRec.Lock.pvMap = NULL;
2736 }
2737 if (rc != VINF_SUCCESS)
2738 {
2739 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2740 goto failure;
2741 }
2742
2743 /* Calculated during analysis. */
2744 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2745 {
2746 /* Most likely cause: we encountered an illegal instruction very early on. */
2747 /** @todo could turn it into an int3 callable patch. */
2748 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2749 rc = VERR_PATCHING_REFUSED;
2750 goto failure;
2751 }
2752
2753 /* size of patch block */
2754 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2755
2756
2757 /* Update free pointer in patch memory. */
2758 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2759 /* Round to next 8 byte boundary. */
2760 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2761
2762 /*
2763 * Insert into patch to guest lookup tree
2764 */
2765 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2766 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2767 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2768 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2769 if (!rc)
2770 {
2771 rc = VERR_PATCHING_REFUSED;
2772 goto failure;
2773 }
2774
2775 /* Note that patmr3SetBranchTargets can install additional patches!! */
2776 rc = patmr3SetBranchTargets(pVM, pPatch);
2777 if (rc != VINF_SUCCESS)
2778 {
2779 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2780 goto failure;
2781 }
2782
2783#ifdef LOG_ENABLED
2784 Log(("Patch code ----------------------------------------------------------\n"));
2785 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2786 /* Free leftover lock if any. */
2787 if (cacheRec.Lock.pvMap)
2788 {
2789 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2790 cacheRec.Lock.pvMap = NULL;
2791 }
2792 Log(("Patch code ends -----------------------------------------------------\n"));
2793#endif
2794
2795 /* make a copy of the guest code bytes that will be overwritten */
2796 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2797
2798 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2799 AssertRC(rc);
2800
2801 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2802 {
2803 /*uint8_t ASMInt3 = 0xCC; - unused */
2804
2805 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2806 /* Replace first opcode byte with 'int 3'. */
2807 rc = patmActivateInt3Patch(pVM, pPatch);
2808 if (RT_FAILURE(rc))
2809 goto failure;
2810
2811 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2812 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2813
2814 pPatch->flags &= ~PATMFL_INSTR_HINT;
2815 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2816 }
2817 else
2818 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2819 {
2820 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2821 /* now insert a jump in the guest code */
2822 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2823 AssertRC(rc);
2824 if (RT_FAILURE(rc))
2825 goto failure;
2826
2827 }
2828
2829#ifdef LOG_ENABLED
2830 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2831 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
2832 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2833#endif
2834
2835 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2836 pPatch->pTempInfo->nrIllegalInstr = 0;
2837
2838 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2839
2840 pPatch->uState = PATCH_ENABLED;
2841 return VINF_SUCCESS;
2842
2843failure:
2844 if (pPatchRec->CoreOffset.Key)
2845 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2846
2847 patmEmptyTree(pVM, &pPatch->FixupTree);
2848 pPatch->nrFixups = 0;
2849
2850 patmEmptyTree(pVM, &pPatch->JumpTree);
2851 pPatch->nrJumpRecs = 0;
2852
2853 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2854 pPatch->pTempInfo->nrIllegalInstr = 0;
2855
2856 /* Turn this cli patch into a dummy. */
2857 pPatch->uState = PATCH_REFUSED;
2858 pPatch->pPatchBlockOffset = 0;
2859
2860 // Give back the patch memory we no longer need
2861 Assert(orgOffsetPatchMem != (uint32_t)~0);
2862 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2863
2864 return rc;
2865}
2866
2867/**
2868 * Patch IDT handler
2869 *
2870 * @returns VBox status code.
2871 * @param pVM The VM to operate on.
2872 * @param pInstrGC Guest context point to privileged instruction
2873 * @param uOpSize Size of starting instruction
2874 * @param pPatchRec Patch record
2875 * @param pCacheRec Cache record ptr
2876 *
2877 * @note returns failure if patching is not allowed or possible
2878 *
2879 */
2880static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
2881{
2882 PPATCHINFO pPatch = &pPatchRec->patch;
2883 bool disret;
2884 DISCPUSTATE cpuPush, cpuJmp;
2885 uint32_t opsize;
2886 RTRCPTR pCurInstrGC = pInstrGC;
2887 uint8_t *pCurInstrHC, *pInstrHC;
2888 uint32_t orgOffsetPatchMem = ~0;
2889
2890 pInstrHC = pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2891 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
2892
2893 /*
2894 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2895 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2896 * condition here and only patch the common entypoint once.
2897 */
2898 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2899 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2900 Assert(disret);
2901 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2902 {
2903 RTRCPTR pJmpInstrGC;
2904 int rc;
2905 pCurInstrGC += opsize;
2906
2907 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2908 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2909 if ( disret
2910 && cpuJmp.pCurInstr->opcode == OP_JMP
2911 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2912 )
2913 {
2914 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2915 if (pJmpPatch == 0)
2916 {
2917 /* Patch it first! */
2918 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2919 if (rc != VINF_SUCCESS)
2920 goto failure;
2921 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2922 Assert(pJmpPatch);
2923 }
2924 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2925 goto failure;
2926
2927 /* save original offset (in case of failures later on) */
2928 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2929
2930 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2931 pPatch->uCurPatchOffset = 0;
2932 pPatch->nrPatch2GuestRecs = 0;
2933
2934#ifdef VBOX_WITH_STATISTICS
2935 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2936 if (RT_FAILURE(rc))
2937 goto failure;
2938#endif
2939
2940 /* Install fake cli patch (to clear the virtual IF) */
2941 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2942 if (RT_FAILURE(rc))
2943 goto failure;
2944
2945 /* Add lookup record for patch to guest address translation (for the push) */
2946 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2947
2948 /* Duplicate push. */
2949 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2950 if (RT_FAILURE(rc))
2951 goto failure;
2952
2953 /* Generate jump to common entrypoint. */
2954 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2955 if (RT_FAILURE(rc))
2956 goto failure;
2957
2958 /* size of patch block */
2959 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2960
2961 /* Update free pointer in patch memory. */
2962 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2963 /* Round to next 8 byte boundary */
2964 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2965
2966 /* There's no jump from guest to patch code. */
2967 pPatch->cbPatchJump = 0;
2968
2969
2970#ifdef LOG_ENABLED
2971 Log(("Patch code ----------------------------------------------------------\n"));
2972 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
2973 Log(("Patch code ends -----------------------------------------------------\n"));
2974#endif
2975 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
2976
2977 /*
2978 * Insert into patch to guest lookup tree
2979 */
2980 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2981 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2982 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2983 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2984
2985 pPatch->uState = PATCH_ENABLED;
2986
2987 return VINF_SUCCESS;
2988 }
2989 }
2990failure:
2991 /* Give back the patch memory we no longer need */
2992 if (orgOffsetPatchMem != (uint32_t)~0)
2993 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2994
2995 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
2996}
2997
2998/**
2999 * Install a trampoline to call a guest trap handler directly
3000 *
3001 * @returns VBox status code.
3002 * @param pVM The VM to operate on.
3003 * @param pInstrGC Guest context point to privileged instruction
3004 * @param pPatchRec Patch record
3005 * @param pCacheRec Cache record ptr
3006 *
3007 */
3008static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3009{
3010 PPATCHINFO pPatch = &pPatchRec->patch;
3011 int rc = VERR_PATCHING_REFUSED;
3012 uint32_t orgOffsetPatchMem = ~0;
3013#ifdef LOG_ENABLED
3014 bool disret;
3015 DISCPUSTATE cpu;
3016 uint32_t opsize;
3017 char szOutput[256];
3018#endif
3019
3020 // save original offset (in case of failures later on)
3021 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3022
3023 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3024 pPatch->uCurPatchOffset = 0;
3025 pPatch->nrPatch2GuestRecs = 0;
3026
3027#ifdef VBOX_WITH_STATISTICS
3028 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3029 if (RT_FAILURE(rc))
3030 goto failure;
3031#endif
3032
3033 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3034 if (RT_FAILURE(rc))
3035 goto failure;
3036
3037 /* size of patch block */
3038 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3039
3040 /* Update free pointer in patch memory. */
3041 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3042 /* Round to next 8 byte boundary */
3043 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3044
3045 /* There's no jump from guest to patch code. */
3046 pPatch->cbPatchJump = 0;
3047
3048#ifdef LOG_ENABLED
3049 Log(("Patch code ----------------------------------------------------------\n"));
3050 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3051 Log(("Patch code ends -----------------------------------------------------\n"));
3052#endif
3053
3054#ifdef LOG_ENABLED
3055 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3056 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3057 Log(("TRAP handler patch: %s", szOutput));
3058#endif
3059 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3060
3061 /*
3062 * Insert into patch to guest lookup tree
3063 */
3064 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3065 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3066 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3067 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3068
3069 pPatch->uState = PATCH_ENABLED;
3070 return VINF_SUCCESS;
3071
3072failure:
3073 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3074
3075 /* Turn this cli patch into a dummy. */
3076 pPatch->uState = PATCH_REFUSED;
3077 pPatch->pPatchBlockOffset = 0;
3078
3079 /* Give back the patch memory we no longer need */
3080 Assert(orgOffsetPatchMem != (uint32_t)~0);
3081 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3082
3083 return rc;
3084}
3085
3086
3087#ifdef LOG_ENABLED
3088/**
3089 * Check if the instruction is patched as a common idt handler
3090 *
3091 * @returns true or false
3092 * @param pVM The VM to operate on.
3093 * @param pInstrGC Guest context point to the instruction
3094 *
3095 */
3096static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3097{
3098 PPATMPATCHREC pRec;
3099
3100 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3101 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3102 return true;
3103 return false;
3104}
3105#endif //DEBUG
3106
3107
3108/**
3109 * Duplicates a complete function
3110 *
3111 * @returns VBox status code.
3112 * @param pVM The VM to operate on.
3113 * @param pInstrGC Guest context point to privileged instruction
3114 * @param pPatchRec Patch record
3115 * @param pCacheRec Cache record ptr
3116 *
3117 */
3118static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3119{
3120 PPATCHINFO pPatch = &pPatchRec->patch;
3121 int rc = VERR_PATCHING_REFUSED;
3122 DISCPUSTATE cpu;
3123 uint32_t orgOffsetPatchMem = ~0;
3124
3125 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3126 /* Save original offset (in case of failures later on). */
3127 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3128
3129 /* We will not go on indefinitely with call instruction handling. */
3130 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3131 {
3132 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3133 return VERR_PATCHING_REFUSED;
3134 }
3135
3136 pVM->patm.s.ulCallDepth++;
3137
3138#ifdef PATM_ENABLE_CALL
3139 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3140#endif
3141
3142 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3143
3144 pPatch->nrPatch2GuestRecs = 0;
3145 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3146 pPatch->uCurPatchOffset = 0;
3147
3148 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3149
3150 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3151 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3152 if (RT_FAILURE(rc))
3153 goto failure;
3154
3155#ifdef VBOX_WITH_STATISTICS
3156 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3157 if (RT_FAILURE(rc))
3158 goto failure;
3159#endif
3160
3161 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3162 if (rc != VINF_SUCCESS)
3163 {
3164 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3165 goto failure;
3166 }
3167
3168 //size of patch block
3169 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3170
3171 //update free pointer in patch memory
3172 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3173 /* Round to next 8 byte boundary. */
3174 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3175
3176 pPatch->uState = PATCH_ENABLED;
3177
3178 /*
3179 * Insert into patch to guest lookup tree
3180 */
3181 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3182 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3183 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3184 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3185 if (!rc)
3186 {
3187 rc = VERR_PATCHING_REFUSED;
3188 goto failure;
3189 }
3190
3191 /* Note that patmr3SetBranchTargets can install additional patches!! */
3192 rc = patmr3SetBranchTargets(pVM, pPatch);
3193 if (rc != VINF_SUCCESS)
3194 {
3195 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3196 goto failure;
3197 }
3198
3199#ifdef LOG_ENABLED
3200 Log(("Patch code ----------------------------------------------------------\n"));
3201 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3202 Log(("Patch code ends -----------------------------------------------------\n"));
3203#endif
3204
3205 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3206
3207 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3208 pPatch->pTempInfo->nrIllegalInstr = 0;
3209
3210 pVM->patm.s.ulCallDepth--;
3211 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3212 return VINF_SUCCESS;
3213
3214failure:
3215 if (pPatchRec->CoreOffset.Key)
3216 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3217
3218 patmEmptyTree(pVM, &pPatch->FixupTree);
3219 pPatch->nrFixups = 0;
3220
3221 patmEmptyTree(pVM, &pPatch->JumpTree);
3222 pPatch->nrJumpRecs = 0;
3223
3224 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3225 pPatch->pTempInfo->nrIllegalInstr = 0;
3226
3227 /* Turn this cli patch into a dummy. */
3228 pPatch->uState = PATCH_REFUSED;
3229 pPatch->pPatchBlockOffset = 0;
3230
3231 // Give back the patch memory we no longer need
3232 Assert(orgOffsetPatchMem != (uint32_t)~0);
3233 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3234
3235 pVM->patm.s.ulCallDepth--;
3236 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3237 return rc;
3238}
3239
3240/**
3241 * Creates trampoline code to jump inside an existing patch
3242 *
3243 * @returns VBox status code.
3244 * @param pVM The VM to operate on.
3245 * @param pInstrGC Guest context point to privileged instruction
3246 * @param pPatchRec Patch record
3247 *
3248 */
3249static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3250{
3251 PPATCHINFO pPatch = &pPatchRec->patch;
3252 RTRCPTR pPage, pPatchTargetGC = 0;
3253 uint32_t orgOffsetPatchMem = ~0;
3254 int rc = VERR_PATCHING_REFUSED;
3255
3256 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3257 /* Save original offset (in case of failures later on). */
3258 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3259
3260 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3261 /** @todo we already checked this before */
3262 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3263
3264 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3265 if (pPatchPage)
3266 {
3267 uint32_t i;
3268
3269 for (i=0;i<pPatchPage->cCount;i++)
3270 {
3271 if (pPatchPage->aPatch[i])
3272 {
3273 PPATCHINFO pPatch2 = pPatchPage->aPatch[i];
3274
3275 if ( (pPatch2->flags & PATMFL_DUPLICATE_FUNCTION)
3276 && pPatch2->uState == PATCH_ENABLED)
3277 {
3278 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch2, pInstrGC);
3279 if (pPatchTargetGC)
3280 {
3281 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3282 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch2->Patch2GuestAddrTree, offsetPatch, false);
3283 Assert(pPatchToGuestRec);
3284
3285 pPatchToGuestRec->fJumpTarget = true;
3286 Assert(pPatchTargetGC != pPatch2->pPrivInstrGC);
3287 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv\n", pPatch2->pPrivInstrGC));
3288 pPatch2->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3289 break;
3290 }
3291 }
3292 }
3293 }
3294 }
3295 AssertReturn(pPatchPage && pPatchTargetGC, VERR_PATCHING_REFUSED);
3296
3297 pPatch->nrPatch2GuestRecs = 0;
3298 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3299 pPatch->uCurPatchOffset = 0;
3300
3301 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3302 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3303 if (RT_FAILURE(rc))
3304 goto failure;
3305
3306#ifdef VBOX_WITH_STATISTICS
3307 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3308 if (RT_FAILURE(rc))
3309 goto failure;
3310#endif
3311
3312 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3313 if (RT_FAILURE(rc))
3314 goto failure;
3315
3316 /*
3317 * Insert into patch to guest lookup tree
3318 */
3319 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3320 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3321 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3322 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3323 if (!rc)
3324 {
3325 rc = VERR_PATCHING_REFUSED;
3326 goto failure;
3327 }
3328
3329 /* size of patch block */
3330 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3331
3332 /* Update free pointer in patch memory. */
3333 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3334 /* Round to next 8 byte boundary */
3335 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3336
3337 /* There's no jump from guest to patch code. */
3338 pPatch->cbPatchJump = 0;
3339
3340 /* Enable the patch. */
3341 pPatch->uState = PATCH_ENABLED;
3342 /* We allow this patch to be called as a function. */
3343 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3344 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3345 return VINF_SUCCESS;
3346
3347failure:
3348 if (pPatchRec->CoreOffset.Key)
3349 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3350
3351 patmEmptyTree(pVM, &pPatch->FixupTree);
3352 pPatch->nrFixups = 0;
3353
3354 patmEmptyTree(pVM, &pPatch->JumpTree);
3355 pPatch->nrJumpRecs = 0;
3356
3357 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3358 pPatch->pTempInfo->nrIllegalInstr = 0;
3359
3360 /* Turn this cli patch into a dummy. */
3361 pPatch->uState = PATCH_REFUSED;
3362 pPatch->pPatchBlockOffset = 0;
3363
3364 // Give back the patch memory we no longer need
3365 Assert(orgOffsetPatchMem != (uint32_t)~0);
3366 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3367
3368 return rc;
3369}
3370
3371
3372/**
3373 * Patch branch target function for call/jump at specified location.
3374 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3375 *
3376 * @returns VBox status code.
3377 * @param pVM The VM to operate on.
3378 * @param pCtx Guest context
3379 *
3380 */
3381VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3382{
3383 RTRCPTR pBranchTarget, pPage;
3384 int rc;
3385 RTRCPTR pPatchTargetGC = 0;
3386
3387 pBranchTarget = pCtx->edx;
3388 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3389
3390 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3391 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3392
3393 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3394 if (pPatchPage)
3395 {
3396 uint32_t i;
3397
3398 for (i=0;i<pPatchPage->cCount;i++)
3399 {
3400 if (pPatchPage->aPatch[i])
3401 {
3402 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3403
3404 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3405 && pPatch->uState == PATCH_ENABLED)
3406 {
3407 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3408 if (pPatchTargetGC)
3409 {
3410 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3411 break;
3412 }
3413 }
3414 }
3415 }
3416 }
3417
3418 if (pPatchTargetGC)
3419 {
3420 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3421 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3422 }
3423 else
3424 {
3425 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3426 }
3427
3428 if (rc == VINF_SUCCESS)
3429 {
3430 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3431 Assert(pPatchTargetGC);
3432 }
3433
3434 if (pPatchTargetGC)
3435 {
3436 pCtx->eax = pPatchTargetGC;
3437 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3438 }
3439 else
3440 {
3441 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3442 pCtx->eax = 0;
3443 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3444 }
3445 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3446 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3447 AssertRC(rc);
3448
3449 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3450 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3451 return VINF_SUCCESS;
3452}
3453
3454/**
3455 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3456 *
3457 * @returns VBox status code.
3458 * @param pVM The VM to operate on.
3459 * @param pCpu Disassembly CPU structure ptr
3460 * @param pInstrGC Guest context point to privileged instruction
3461 * @param pCacheRec Cache record ptr
3462 *
3463 */
3464static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3465{
3466 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3467 int rc = VERR_PATCHING_REFUSED;
3468 DISCPUSTATE cpu;
3469 RTRCPTR pTargetGC;
3470 PPATMPATCHREC pPatchFunction;
3471 uint32_t opsize;
3472 bool disret;
3473#ifdef LOG_ENABLED
3474 char szOutput[256];
3475#endif
3476
3477 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3478 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3479
3480 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3481 {
3482 rc = VERR_PATCHING_REFUSED;
3483 goto failure;
3484 }
3485
3486 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3487 if (pTargetGC == 0)
3488 {
3489 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3490 rc = VERR_PATCHING_REFUSED;
3491 goto failure;
3492 }
3493
3494 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3495 if (pPatchFunction == NULL)
3496 {
3497 for(;;)
3498 {
3499 /* It could be an indirect call (call -> jmp dest).
3500 * Note that it's dangerous to assume the jump will never change...
3501 */
3502 uint8_t *pTmpInstrHC;
3503
3504 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3505 Assert(pTmpInstrHC);
3506 if (pTmpInstrHC == 0)
3507 break;
3508
3509 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3510 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3511 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3512 break;
3513
3514 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3515 if (pTargetGC == 0)
3516 {
3517 break;
3518 }
3519
3520 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3521 break;
3522 }
3523 if (pPatchFunction == 0)
3524 {
3525 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3526 rc = VERR_PATCHING_REFUSED;
3527 goto failure;
3528 }
3529 }
3530
3531 // make a copy of the guest code bytes that will be overwritten
3532 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3533
3534 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3535 AssertRC(rc);
3536
3537 /* Now replace the original call in the guest code */
3538 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3539 AssertRC(rc);
3540 if (RT_FAILURE(rc))
3541 goto failure;
3542
3543 /* Lowest and highest address for write monitoring. */
3544 pPatch->pInstrGCLowest = pInstrGC;
3545 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3546
3547#ifdef LOG_ENABLED
3548 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3549 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3550 Log(("Call patch: %s", szOutput));
3551#endif
3552
3553 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3554
3555 pPatch->uState = PATCH_ENABLED;
3556 return VINF_SUCCESS;
3557
3558failure:
3559 /* Turn this patch into a dummy. */
3560 pPatch->uState = PATCH_REFUSED;
3561
3562 return rc;
3563}
3564
3565/**
3566 * Replace the address in an MMIO instruction with the cached version.
3567 *
3568 * @returns VBox status code.
3569 * @param pVM The VM to operate on.
3570 * @param pInstrGC Guest context point to privileged instruction
3571 * @param pCpu Disassembly CPU structure ptr
3572 * @param pCacheRec Cache record ptr
3573 *
3574 * @note returns failure if patching is not allowed or possible
3575 *
3576 */
3577static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3578{
3579 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3580 uint8_t *pPB;
3581 int rc = VERR_PATCHING_REFUSED;
3582#ifdef LOG_ENABLED
3583 DISCPUSTATE cpu;
3584 uint32_t opsize;
3585 bool disret;
3586 char szOutput[256];
3587#endif
3588
3589 Assert(pVM->patm.s.mmio.pCachedData);
3590 if (!pVM->patm.s.mmio.pCachedData)
3591 goto failure;
3592
3593 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3594 goto failure;
3595
3596 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3597 if (pPB == 0)
3598 goto failure;
3599
3600 /* Add relocation record for cached data access. */
3601 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3602 {
3603 Log(("Relocation failed for cached mmio address!!\n"));
3604 return VERR_PATCHING_REFUSED;
3605 }
3606#ifdef LOG_ENABLED
3607 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3608 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3609 Log(("MMIO patch old instruction: %s", szOutput));
3610#endif
3611
3612 /* Save original instruction. */
3613 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3614 AssertRC(rc);
3615
3616 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3617
3618 /* Replace address with that of the cached item. */
3619 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3620 AssertRC(rc);
3621 if (RT_FAILURE(rc))
3622 {
3623 goto failure;
3624 }
3625
3626#ifdef LOG_ENABLED
3627 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3628 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3629 Log(("MMIO patch: %s", szOutput));
3630#endif
3631 pVM->patm.s.mmio.pCachedData = 0;
3632 pVM->patm.s.mmio.GCPhys = 0;
3633 pPatch->uState = PATCH_ENABLED;
3634 return VINF_SUCCESS;
3635
3636failure:
3637 /* Turn this patch into a dummy. */
3638 pPatch->uState = PATCH_REFUSED;
3639
3640 return rc;
3641}
3642
3643
3644/**
3645 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3646 *
3647 * @returns VBox status code.
3648 * @param pVM The VM to operate on.
3649 * @param pInstrGC Guest context point to privileged instruction
3650 * @param pPatch Patch record
3651 *
3652 * @note returns failure if patching is not allowed or possible
3653 *
3654 */
3655static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3656{
3657 DISCPUSTATE cpu;
3658 uint32_t opsize;
3659 bool disret;
3660 uint8_t *pInstrHC;
3661#ifdef LOG_ENABLED
3662 char szOutput[256];
3663#endif
3664
3665 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3666
3667 /* Convert GC to HC address. */
3668 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3669 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3670
3671 /* Disassemble mmio instruction. */
3672 cpu.mode = pPatch->uOpMode;
3673 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3674 if (disret == false)
3675 {
3676 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3677 return VERR_PATCHING_REFUSED;
3678 }
3679
3680 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3681 if (opsize > MAX_INSTR_SIZE)
3682 return VERR_PATCHING_REFUSED;
3683 if (cpu.param2.flags != USE_DISPLACEMENT32)
3684 return VERR_PATCHING_REFUSED;
3685
3686 /* Add relocation record for cached data access. */
3687 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3688 {
3689 Log(("Relocation failed for cached mmio address!!\n"));
3690 return VERR_PATCHING_REFUSED;
3691 }
3692 /* Replace address with that of the cached item. */
3693 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3694
3695 /* Lowest and highest address for write monitoring. */
3696 pPatch->pInstrGCLowest = pInstrGC;
3697 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3698
3699#ifdef LOG_ENABLED
3700 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3701 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3702 Log(("MMIO patch: %s", szOutput));
3703#endif
3704
3705 pVM->patm.s.mmio.pCachedData = 0;
3706 pVM->patm.s.mmio.GCPhys = 0;
3707 return VINF_SUCCESS;
3708}
3709
3710/**
3711 * Activates an int3 patch
3712 *
3713 * @returns VBox status code.
3714 * @param pVM The VM to operate on.
3715 * @param pPatch Patch record
3716 */
3717static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3718{
3719 uint8_t ASMInt3 = 0xCC;
3720 int rc;
3721
3722 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3723 Assert(pPatch->uState != PATCH_ENABLED);
3724
3725 /* Replace first opcode byte with 'int 3'. */
3726 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &ASMInt3, sizeof(ASMInt3));
3727 AssertRC(rc);
3728
3729 pPatch->cbPatchJump = sizeof(ASMInt3);
3730
3731 return rc;
3732}
3733
3734/**
3735 * Deactivates an int3 patch
3736 *
3737 * @returns VBox status code.
3738 * @param pVM The VM to operate on.
3739 * @param pPatch Patch record
3740 */
3741static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3742{
3743 uint8_t ASMInt3 = 0xCC;
3744 int rc;
3745
3746 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3747 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3748
3749 /* Restore first opcode byte. */
3750 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3751 AssertRC(rc);
3752 return rc;
3753}
3754
3755/**
3756 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
3757 *
3758 * @returns VBox status code.
3759 * @param pVM The VM to operate on.
3760 * @param pInstrGC Guest context point to privileged instruction
3761 * @param pInstrHC Host context point to privileged instruction
3762 * @param pCpu Disassembly CPU structure ptr
3763 * @param pPatch Patch record
3764 *
3765 * @note returns failure if patching is not allowed or possible
3766 *
3767 */
3768VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3769{
3770 uint8_t ASMInt3 = 0xCC;
3771 int rc;
3772
3773 /** @note Do not use patch memory here! It might called during patch installation too. */
3774
3775#ifdef LOG_ENABLED
3776 DISCPUSTATE cpu;
3777 char szOutput[256];
3778 uint32_t opsize;
3779
3780 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3781 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3782 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3783#endif
3784
3785 /* Save the original instruction. */
3786 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3787 AssertRC(rc);
3788 pPatch->cbPatchJump = sizeof(ASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3789
3790 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3791
3792 /* Replace first opcode byte with 'int 3'. */
3793 rc = patmActivateInt3Patch(pVM, pPatch);
3794 if (RT_FAILURE(rc))
3795 goto failure;
3796
3797 /* Lowest and highest address for write monitoring. */
3798 pPatch->pInstrGCLowest = pInstrGC;
3799 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3800
3801 pPatch->uState = PATCH_ENABLED;
3802 return VINF_SUCCESS;
3803
3804failure:
3805 /* Turn this patch into a dummy. */
3806 return VERR_PATCHING_REFUSED;
3807}
3808
3809#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3810/**
3811 * Patch a jump instruction at specified location
3812 *
3813 * @returns VBox status code.
3814 * @param pVM The VM to operate on.
3815 * @param pInstrGC Guest context point to privileged instruction
3816 * @param pInstrHC Host context point to privileged instruction
3817 * @param pCpu Disassembly CPU structure ptr
3818 * @param pPatchRec Patch record
3819 *
3820 * @note returns failure if patching is not allowed or possible
3821 *
3822 */
3823int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3824{
3825 PPATCHINFO pPatch = &pPatchRec->patch;
3826 int rc = VERR_PATCHING_REFUSED;
3827#ifdef LOG_ENABLED
3828 bool disret;
3829 DISCPUSTATE cpu;
3830 uint32_t opsize;
3831 char szOutput[256];
3832#endif
3833
3834 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3835 pPatch->uCurPatchOffset = 0;
3836 pPatch->cbPatchBlockSize = 0;
3837 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3838
3839 /*
3840 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3841 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3842 */
3843 switch (pCpu->pCurInstr->opcode)
3844 {
3845 case OP_JO:
3846 case OP_JNO:
3847 case OP_JC:
3848 case OP_JNC:
3849 case OP_JE:
3850 case OP_JNE:
3851 case OP_JBE:
3852 case OP_JNBE:
3853 case OP_JS:
3854 case OP_JNS:
3855 case OP_JP:
3856 case OP_JNP:
3857 case OP_JL:
3858 case OP_JNL:
3859 case OP_JLE:
3860 case OP_JNLE:
3861 case OP_JMP:
3862 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3863 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3864 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3865 goto failure;
3866
3867 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3868 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3869 goto failure;
3870
3871 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3872 {
3873 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3874 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3875 rc = VERR_PATCHING_REFUSED;
3876 goto failure;
3877 }
3878
3879 break;
3880
3881 default:
3882 goto failure;
3883 }
3884
3885 // make a copy of the guest code bytes that will be overwritten
3886 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3887 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3888 pPatch->cbPatchJump = pCpu->opsize;
3889
3890 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3891 AssertRC(rc);
3892
3893 /* Now insert a jump in the guest code. */
3894 /*
3895 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3896 * references the target instruction in the conflict patch.
3897 */
3898 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3899
3900 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3901 pPatch->pPatchJumpDestGC = pJmpDest;
3902
3903 PATMP2GLOOKUPREC cacheRec;
3904 RT_ZERO(cacheRec);
3905 cacheRec.pPatch = pPatch;
3906
3907 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
3908 /* Free leftover lock if any. */
3909 if (cacheRec.Lock.pvMap)
3910 {
3911 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
3912 cacheRec.Lock.pvMap = NULL;
3913 }
3914 AssertRC(rc);
3915 if (RT_FAILURE(rc))
3916 goto failure;
3917
3918 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3919
3920#ifdef LOG_ENABLED
3921 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3922 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3923 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3924#endif
3925
3926 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3927
3928 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3929
3930 /* Lowest and highest address for write monitoring. */
3931 pPatch->pInstrGCLowest = pInstrGC;
3932 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3933
3934 pPatch->uState = PATCH_ENABLED;
3935 return VINF_SUCCESS;
3936
3937failure:
3938 /* Turn this cli patch into a dummy. */
3939 pPatch->uState = PATCH_REFUSED;
3940
3941 return rc;
3942}
3943#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
3944
3945
3946/**
3947 * Gives hint to PATM about supervisor guest instructions
3948 *
3949 * @returns VBox status code.
3950 * @param pVM The VM to operate on.
3951 * @param pInstr Guest context point to privileged instruction
3952 * @param flags Patch flags
3953 */
3954VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
3955{
3956 Assert(pInstrGC);
3957 Assert(flags == PATMFL_CODE32);
3958
3959 Log(("PATMR3AddHint %RRv\n", pInstrGC));
3960 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
3961}
3962
3963/**
3964 * Patch privileged instruction at specified location
3965 *
3966 * @returns VBox status code.
3967 * @param pVM The VM to operate on.
3968 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
3969 * @param flags Patch flags
3970 *
3971 * @note returns failure if patching is not allowed or possible
3972 */
3973VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
3974{
3975 DISCPUSTATE cpu;
3976 R3PTRTYPE(uint8_t *) pInstrHC;
3977 uint32_t opsize;
3978 PPATMPATCHREC pPatchRec;
3979 PCPUMCTX pCtx = 0;
3980 bool disret;
3981 int rc;
3982 PVMCPU pVCpu = VMMGetCpu0(pVM);
3983
3984 if ( !pVM
3985 || pInstrGC == 0
3986 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
3987 {
3988 AssertFailed();
3989 return VERR_INVALID_PARAMETER;
3990 }
3991
3992 if (PATMIsEnabled(pVM) == false)
3993 return VERR_PATCHING_REFUSED;
3994
3995 /* Test for patch conflict only with patches that actually change guest code. */
3996 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
3997 {
3998 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
3999 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4000 if (pConflictPatch != 0)
4001 return VERR_PATCHING_REFUSED;
4002 }
4003
4004 if (!(flags & PATMFL_CODE32))
4005 {
4006 /** @todo Only 32 bits code right now */
4007 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4008 return VERR_NOT_IMPLEMENTED;
4009 }
4010
4011 /* We ran out of patch memory; don't bother anymore. */
4012 if (pVM->patm.s.fOutOfMemory == true)
4013 return VERR_PATCHING_REFUSED;
4014
4015 /* Make sure the code selector is wide open; otherwise refuse. */
4016 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4017 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
4018 {
4019 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4020 if (pInstrGCFlat != pInstrGC)
4021 {
4022 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4023 return VERR_PATCHING_REFUSED;
4024 }
4025 }
4026
4027 /** @note the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4028 if (!(flags & PATMFL_GUEST_SPECIFIC))
4029 {
4030 /* New code. Make sure CSAM has a go at it first. */
4031 CSAMR3CheckCode(pVM, pInstrGC);
4032 }
4033
4034 /** @note obsolete */
4035 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4036 && (flags & PATMFL_MMIO_ACCESS))
4037 {
4038 RTRCUINTPTR offset;
4039 void *pvPatchCoreOffset;
4040
4041 /* Find the patch record. */
4042 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4043 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4044 if (pvPatchCoreOffset == NULL)
4045 {
4046 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4047 return VERR_PATCH_NOT_FOUND; //fatal error
4048 }
4049 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4050
4051 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4052 }
4053
4054 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4055
4056 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4057 if (pPatchRec)
4058 {
4059 Assert(!(flags & PATMFL_TRAMPOLINE));
4060
4061 /* Hints about existing patches are ignored. */
4062 if (flags & PATMFL_INSTR_HINT)
4063 return VERR_PATCHING_REFUSED;
4064
4065 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4066 {
4067 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4068 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4069 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4070 }
4071
4072 if (pPatchRec->patch.uState == PATCH_DISABLED)
4073 {
4074 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4075 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4076 {
4077 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4078 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4079 }
4080 else
4081 Log(("Enabling patch %RRv again\n", pInstrGC));
4082
4083 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4084 rc = PATMR3EnablePatch(pVM, pInstrGC);
4085 if (RT_SUCCESS(rc))
4086 return VWRN_PATCH_ENABLED;
4087
4088 return rc;
4089 }
4090 if ( pPatchRec->patch.uState == PATCH_ENABLED
4091 || pPatchRec->patch.uState == PATCH_DIRTY)
4092 {
4093 /*
4094 * The patch might have been overwritten.
4095 */
4096 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4097 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4098 {
4099 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4100 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4101 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4102 {
4103 if (flags & PATMFL_IDTHANDLER)
4104 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4105
4106 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4107 }
4108 }
4109 rc = PATMR3RemovePatch(pVM, pInstrGC);
4110 if (RT_FAILURE(rc))
4111 return VERR_PATCHING_REFUSED;
4112 }
4113 else
4114 {
4115 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4116 /* already tried it once! */
4117 return VERR_PATCHING_REFUSED;
4118 }
4119 }
4120
4121 RTGCPHYS GCPhys;
4122 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4123 if (rc != VINF_SUCCESS)
4124 {
4125 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4126 return rc;
4127 }
4128 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4129 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4130 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4131 {
4132 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4133 return VERR_PATCHING_REFUSED;
4134 }
4135
4136 /* Initialize cache record for guest address translations. */
4137 PATMP2GLOOKUPREC cacheRec;
4138 RT_ZERO(cacheRec);
4139
4140 pInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4141 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4142
4143 /* Allocate patch record. */
4144 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4145 if (RT_FAILURE(rc))
4146 {
4147 Log(("Out of memory!!!!\n"));
4148 return VERR_NO_MEMORY;
4149 }
4150 pPatchRec->Core.Key = pInstrGC;
4151 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4152 /* Insert patch record into the lookup tree. */
4153 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4154 Assert(rc);
4155
4156 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4157 pPatchRec->patch.flags = flags;
4158 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4159
4160 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4161 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4162
4163 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4164 {
4165 /*
4166 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4167 */
4168 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4169 if (pPatchNear)
4170 {
4171 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4172 {
4173 Log(("Dangerous patch; would overwrite the ususable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4174
4175 pPatchRec->patch.uState = PATCH_UNUSABLE;
4176 /*
4177 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4178 */
4179 return VERR_PATCHING_REFUSED;
4180 }
4181 }
4182 }
4183
4184 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4185 if (pPatchRec->patch.pTempInfo == 0)
4186 {
4187 Log(("Out of memory!!!!\n"));
4188 return VERR_NO_MEMORY;
4189 }
4190
4191 cpu.mode = pPatchRec->patch.uOpMode;
4192 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, NULL, &opsize, NULL);
4193 if (disret == false)
4194 {
4195 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4196 return VERR_PATCHING_REFUSED;
4197 }
4198
4199 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4200 if (opsize > MAX_INSTR_SIZE)
4201 return VERR_PATCHING_REFUSED;
4202
4203 pPatchRec->patch.cbPrivInstr = opsize;
4204 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4205
4206 /* Restricted hinting for now. */
4207 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4208
4209 /* Initialize cache record patch pointer. */
4210 cacheRec.pPatch = &pPatchRec->patch;
4211
4212 /* Allocate statistics slot */
4213 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4214 {
4215 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4216 }
4217 else
4218 {
4219 Log(("WARNING: Patch index wrap around!!\n"));
4220 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4221 }
4222
4223 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4224 {
4225 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4226 }
4227 else
4228 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4229 {
4230 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4231 }
4232 else
4233 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4234 {
4235 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4236 }
4237 else
4238 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4239 {
4240 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4241 }
4242 else
4243 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4244 {
4245 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4246 }
4247 else
4248 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4249 {
4250 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4251 }
4252 else
4253 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4254 {
4255 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4256 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4257
4258 rc = patmIdtHandler(pVM, pInstrGC, opsize, pPatchRec, &cacheRec);
4259#ifdef VBOX_WITH_STATISTICS
4260 if ( rc == VINF_SUCCESS
4261 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4262 {
4263 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4264 }
4265#endif
4266 }
4267 else
4268 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4269 {
4270 switch (cpu.pCurInstr->opcode)
4271 {
4272 case OP_SYSENTER:
4273 case OP_PUSH:
4274 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4275 if (rc == VINF_SUCCESS)
4276 {
4277 if (rc == VINF_SUCCESS)
4278 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4279 return rc;
4280 }
4281 break;
4282
4283 default:
4284 rc = VERR_NOT_IMPLEMENTED;
4285 break;
4286 }
4287 }
4288 else
4289 {
4290 switch (cpu.pCurInstr->opcode)
4291 {
4292 case OP_SYSENTER:
4293 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4294 if (rc == VINF_SUCCESS)
4295 {
4296 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4297 return VINF_SUCCESS;
4298 }
4299 break;
4300
4301#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4302 case OP_JO:
4303 case OP_JNO:
4304 case OP_JC:
4305 case OP_JNC:
4306 case OP_JE:
4307 case OP_JNE:
4308 case OP_JBE:
4309 case OP_JNBE:
4310 case OP_JS:
4311 case OP_JNS:
4312 case OP_JP:
4313 case OP_JNP:
4314 case OP_JL:
4315 case OP_JNL:
4316 case OP_JLE:
4317 case OP_JNLE:
4318 case OP_JECXZ:
4319 case OP_LOOP:
4320 case OP_LOOPNE:
4321 case OP_LOOPE:
4322 case OP_JMP:
4323 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4324 {
4325 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4326 break;
4327 }
4328 return VERR_NOT_IMPLEMENTED;
4329#endif
4330
4331 case OP_PUSHF:
4332 case OP_CLI:
4333 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4334 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4335 break;
4336
4337 case OP_STR:
4338 case OP_SGDT:
4339 case OP_SLDT:
4340 case OP_SIDT:
4341 case OP_CPUID:
4342 case OP_LSL:
4343 case OP_LAR:
4344 case OP_SMSW:
4345 case OP_VERW:
4346 case OP_VERR:
4347 case OP_IRET:
4348 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4349 break;
4350
4351 default:
4352 return VERR_NOT_IMPLEMENTED;
4353 }
4354 }
4355
4356 if (rc != VINF_SUCCESS)
4357 {
4358 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4359 {
4360 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4361 pPatchRec->patch.nrPatch2GuestRecs = 0;
4362 }
4363 pVM->patm.s.uCurrentPatchIdx--;
4364 }
4365 else
4366 {
4367 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4368 AssertRCReturn(rc, rc);
4369
4370 /* Keep track upper and lower boundaries of patched instructions */
4371 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4372 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4373 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4374 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4375
4376 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4377 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4378
4379 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4380 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4381
4382 rc = VINF_SUCCESS;
4383
4384 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4385 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4386 {
4387 rc = PATMR3DisablePatch(pVM, pInstrGC);
4388 AssertRCReturn(rc, rc);
4389 }
4390
4391#ifdef VBOX_WITH_STATISTICS
4392 /* Register statistics counter */
4393 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4394 {
4395 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4396 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4397#ifndef DEBUG_sandervl
4398 /* Full breakdown for the GUI. */
4399 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4400 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4401 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4402 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4403 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4404 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4405 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4406 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4407 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4408 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4409 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4410 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4411 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4412 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4413 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4414 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4415#endif
4416 }
4417#endif
4418 }
4419 /* Free leftover lock if any. */
4420 if (cacheRec.Lock.pvMap)
4421 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4422 return rc;
4423}
4424
4425/**
4426 * Query instruction size
4427 *
4428 * @returns VBox status code.
4429 * @param pVM The VM to operate on.
4430 * @param pPatch Patch record
4431 * @param pInstrGC Instruction address
4432 */
4433static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4434{
4435 uint8_t *pInstrHC;
4436 PGMPAGEMAPLOCK Lock;
4437
4438 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4439 if (rc == VINF_SUCCESS)
4440 {
4441 DISCPUSTATE cpu;
4442 bool disret;
4443 uint32_t opsize;
4444
4445 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4446 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4447 PGMPhysReleasePageMappingLock(pVM, &Lock);
4448 if (disret)
4449 return opsize;
4450 }
4451 return 0;
4452}
4453
4454/**
4455 * Add patch to page record
4456 *
4457 * @returns VBox status code.
4458 * @param pVM The VM to operate on.
4459 * @param pPage Page address
4460 * @param pPatch Patch record
4461 */
4462int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4463{
4464 PPATMPATCHPAGE pPatchPage;
4465 int rc;
4466
4467 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4468
4469 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4470 if (pPatchPage)
4471 {
4472 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4473 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4474 {
4475 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4476 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4477
4478 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4479 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4480 if (RT_FAILURE(rc))
4481 {
4482 Log(("Out of memory!!!!\n"));
4483 return VERR_NO_MEMORY;
4484 }
4485 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4486 MMHyperFree(pVM, paPatchOld);
4487 }
4488 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4489 pPatchPage->cCount++;
4490 }
4491 else
4492 {
4493 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4494 if (RT_FAILURE(rc))
4495 {
4496 Log(("Out of memory!!!!\n"));
4497 return VERR_NO_MEMORY;
4498 }
4499 pPatchPage->Core.Key = pPage;
4500 pPatchPage->cCount = 1;
4501 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4502
4503 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4504 if (RT_FAILURE(rc))
4505 {
4506 Log(("Out of memory!!!!\n"));
4507 MMHyperFree(pVM, pPatchPage);
4508 return VERR_NO_MEMORY;
4509 }
4510 pPatchPage->aPatch[0] = pPatch;
4511
4512 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4513 Assert(rc);
4514 pVM->patm.s.cPageRecords++;
4515
4516 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4517 }
4518 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4519
4520 /* Get the closest guest instruction (from below) */
4521 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4522 Assert(pGuestToPatchRec);
4523 if (pGuestToPatchRec)
4524 {
4525 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4526 if ( pPatchPage->pLowestAddrGC == 0
4527 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4528 {
4529 RTRCUINTPTR offset;
4530
4531 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4532
4533 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4534 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4535 if (offset && offset < MAX_INSTR_SIZE)
4536 {
4537 /* Get the closest guest instruction (from above) */
4538 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4539
4540 if (pGuestToPatchRec)
4541 {
4542 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4543 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4544 {
4545 pPatchPage->pLowestAddrGC = pPage;
4546 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4547 }
4548 }
4549 }
4550 }
4551 }
4552
4553 /* Get the closest guest instruction (from above) */
4554 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4555 Assert(pGuestToPatchRec);
4556 if (pGuestToPatchRec)
4557 {
4558 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4559 if ( pPatchPage->pHighestAddrGC == 0
4560 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4561 {
4562 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4563 /* Increase by instruction size. */
4564 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4565//// Assert(size);
4566 pPatchPage->pHighestAddrGC += size;
4567 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4568 }
4569 }
4570
4571 return VINF_SUCCESS;
4572}
4573
4574/**
4575 * Remove patch from page record
4576 *
4577 * @returns VBox status code.
4578 * @param pVM The VM to operate on.
4579 * @param pPage Page address
4580 * @param pPatch Patch record
4581 */
4582int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4583{
4584 PPATMPATCHPAGE pPatchPage;
4585 int rc;
4586
4587 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4588 Assert(pPatchPage);
4589
4590 if (!pPatchPage)
4591 return VERR_INVALID_PARAMETER;
4592
4593 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4594
4595 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4596 if (pPatchPage->cCount > 1)
4597 {
4598 uint32_t i;
4599
4600 /* Used by multiple patches */
4601 for (i=0;i<pPatchPage->cCount;i++)
4602 {
4603 if (pPatchPage->aPatch[i] == pPatch)
4604 {
4605 pPatchPage->aPatch[i] = 0;
4606 break;
4607 }
4608 }
4609 /* close the gap between the remaining pointers. */
4610 if (i < pPatchPage->cCount - 1)
4611 {
4612 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4613 }
4614 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4615
4616 pPatchPage->cCount--;
4617 }
4618 else
4619 {
4620 PPATMPATCHPAGE pPatchNode;
4621
4622 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4623
4624 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4625 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4626 Assert(pPatchNode && pPatchNode == pPatchPage);
4627
4628 Assert(pPatchPage->aPatch);
4629 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4630 AssertRC(rc);
4631 rc = MMHyperFree(pVM, pPatchPage);
4632 AssertRC(rc);
4633 pVM->patm.s.cPageRecords--;
4634 }
4635 return VINF_SUCCESS;
4636}
4637
4638/**
4639 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4640 *
4641 * @returns VBox status code.
4642 * @param pVM The VM to operate on.
4643 * @param pPatch Patch record
4644 */
4645int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4646{
4647 int rc;
4648 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4649
4650 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4651 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4652 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4653
4654 /** @todo optimize better (large gaps between current and next used page) */
4655 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4656 {
4657 /* Get the closest guest instruction (from above) */
4658 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4659 if ( pGuestToPatchRec
4660 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4661 )
4662 {
4663 /* Code in page really patched -> add record */
4664 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4665 AssertRC(rc);
4666 }
4667 }
4668 pPatch->flags |= PATMFL_CODE_MONITORED;
4669 return VINF_SUCCESS;
4670}
4671
4672/**
4673 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4674 *
4675 * @returns VBox status code.
4676 * @param pVM The VM to operate on.
4677 * @param pPatch Patch record
4678 */
4679int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4680{
4681 int rc;
4682 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4683
4684 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4685 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4686 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4687
4688 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4689 {
4690 /* Get the closest guest instruction (from above) */
4691 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4692 if ( pGuestToPatchRec
4693 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4694 )
4695 {
4696 /* Code in page really patched -> remove record */
4697 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4698 AssertRC(rc);
4699 }
4700 }
4701 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4702 return VINF_SUCCESS;
4703}
4704
4705/**
4706 * Notifies PATM about a (potential) write to code that has been patched.
4707 *
4708 * @returns VBox status code.
4709 * @param pVM The VM to operate on.
4710 * @param GCPtr GC pointer to write address
4711 * @param cbWrite Nr of bytes to write
4712 *
4713 */
4714VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4715{
4716 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4717
4718 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4719
4720 Assert(VM_IS_EMT(pVM));
4721
4722 /* Quick boundary check */
4723 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4724 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4725 )
4726 return VINF_SUCCESS;
4727
4728 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4729
4730 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4731 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4732
4733 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4734 {
4735loop_start:
4736 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4737 if (pPatchPage)
4738 {
4739 uint32_t i;
4740 bool fValidPatchWrite = false;
4741
4742 /* Quick check to see if the write is in the patched part of the page */
4743 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4744 || pPatchPage->pHighestAddrGC < GCPtr)
4745 {
4746 break;
4747 }
4748
4749 for (i=0;i<pPatchPage->cCount;i++)
4750 {
4751 if (pPatchPage->aPatch[i])
4752 {
4753 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4754 RTRCPTR pPatchInstrGC;
4755 //unused: bool fForceBreak = false;
4756
4757 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4758 /** @todo inefficient and includes redundant checks for multiple pages. */
4759 for (uint32_t j=0; j<cbWrite; j++)
4760 {
4761 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4762
4763 if ( pPatch->cbPatchJump
4764 && pGuestPtrGC >= pPatch->pPrivInstrGC
4765 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4766 {
4767 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4768 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4769 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4770 if (rc == VINF_SUCCESS)
4771 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4772 goto loop_start;
4773
4774 continue;
4775 }
4776
4777 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4778 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4779 if (!pPatchInstrGC)
4780 {
4781 RTRCPTR pClosestInstrGC;
4782 uint32_t size;
4783
4784 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4785 if (pPatchInstrGC)
4786 {
4787 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4788 Assert(pClosestInstrGC <= pGuestPtrGC);
4789 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4790 /* Check if this is not a write into a gap between two patches */
4791 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4792 pPatchInstrGC = 0;
4793 }
4794 }
4795 if (pPatchInstrGC)
4796 {
4797 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4798
4799 fValidPatchWrite = true;
4800
4801 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4802 Assert(pPatchToGuestRec);
4803 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4804 {
4805 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4806
4807 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4808 {
4809 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4810
4811 PATMR3MarkDirtyPatch(pVM, pPatch);
4812
4813 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4814 goto loop_start;
4815 }
4816 else
4817 {
4818 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4819 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4820
4821 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4822 pPatchToGuestRec->fDirty = true;
4823
4824 *pInstrHC = 0xCC;
4825
4826 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4827 }
4828 }
4829 /* else already marked dirty */
4830 }
4831 }
4832 }
4833 } /* for each patch */
4834
4835 if (fValidPatchWrite == false)
4836 {
4837 /* Write to a part of the page that either:
4838 * - doesn't contain any code (shared code/data); rather unlikely
4839 * - old code page that's no longer in active use.
4840 */
4841invalid_write_loop_start:
4842 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4843
4844 if (pPatchPage)
4845 {
4846 for (i=0;i<pPatchPage->cCount;i++)
4847 {
4848 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4849
4850 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4851 {
4852 /** @note possibly dangerous assumption that all future writes will be harmless. */
4853 if (pPatch->flags & PATMFL_IDTHANDLER)
4854 {
4855 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4856
4857 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4858 int rc = patmRemovePatchPages(pVM, pPatch);
4859 AssertRC(rc);
4860 }
4861 else
4862 {
4863 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4864 PATMR3MarkDirtyPatch(pVM, pPatch);
4865 }
4866 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4867 goto invalid_write_loop_start;
4868 }
4869 } /* for */
4870 }
4871 }
4872 }
4873 }
4874 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4875 return VINF_SUCCESS;
4876
4877}
4878
4879/**
4880 * Disable all patches in a flushed page
4881 *
4882 * @returns VBox status code
4883 * @param pVM The VM to operate on.
4884 * @param addr GC address of the page to flush
4885 */
4886/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4887 */
4888VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4889{
4890 addr &= PAGE_BASE_GC_MASK;
4891
4892 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4893 if (pPatchPage)
4894 {
4895 int i;
4896
4897 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4898 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4899 {
4900 if (pPatchPage->aPatch[i])
4901 {
4902 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4903
4904 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4905 PATMR3MarkDirtyPatch(pVM, pPatch);
4906 }
4907 }
4908 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4909 }
4910 return VINF_SUCCESS;
4911}
4912
4913/**
4914 * Checks if the instructions at the specified address has been patched already.
4915 *
4916 * @returns boolean, patched or not
4917 * @param pVM The VM to operate on.
4918 * @param pInstrGC Guest context pointer to instruction
4919 */
4920VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4921{
4922 PPATMPATCHREC pPatchRec;
4923 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4924 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4925 return true;
4926 return false;
4927}
4928
4929/**
4930 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4931 *
4932 * @returns VBox status code.
4933 * @param pVM The VM to operate on.
4934 * @param pInstrGC GC address of instr
4935 * @param pByte opcode byte pointer (OUT)
4936 *
4937 */
4938VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4939{
4940 PPATMPATCHREC pPatchRec;
4941
4942 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
4943
4944 /* Shortcut. */
4945 if ( !PATMIsEnabled(pVM)
4946 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
4947 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
4948 {
4949 return VERR_PATCH_NOT_FOUND;
4950 }
4951
4952 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
4953 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
4954 if ( pPatchRec
4955 && pPatchRec->patch.uState == PATCH_ENABLED
4956 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
4957 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
4958 {
4959 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
4960 *pByte = pPatchRec->patch.aPrivInstr[offset];
4961
4962 if (pPatchRec->patch.cbPatchJump == 1)
4963 {
4964 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
4965 }
4966 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
4967 return VINF_SUCCESS;
4968 }
4969 return VERR_PATCH_NOT_FOUND;
4970}
4971
4972/**
4973 * Disable patch for privileged instruction at specified location
4974 *
4975 * @returns VBox status code.
4976 * @param pVM The VM to operate on.
4977 * @param pInstr Guest context point to privileged instruction
4978 *
4979 * @note returns failure if patching is not allowed or possible
4980 *
4981 */
4982VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
4983{
4984 PPATMPATCHREC pPatchRec;
4985 PPATCHINFO pPatch;
4986
4987 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
4988 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4989 if (pPatchRec)
4990 {
4991 int rc = VINF_SUCCESS;
4992
4993 pPatch = &pPatchRec->patch;
4994
4995 /* Already disabled? */
4996 if (pPatch->uState == PATCH_DISABLED)
4997 return VINF_SUCCESS;
4998
4999 /* Clear the IDT entries for the patch we're disabling. */
5000 /** @note very important as we clear IF in the patch itself */
5001 /** @todo this needs to be changed */
5002 if (pPatch->flags & PATMFL_IDTHANDLER)
5003 {
5004 uint32_t iGate;
5005
5006 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5007 if (iGate != (uint32_t)~0)
5008 {
5009 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5010 if (++cIDTHandlersDisabled < 256)
5011 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5012 }
5013 }
5014
5015 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5016 if ( pPatch->pPatchBlockOffset
5017 && pPatch->uState == PATCH_ENABLED)
5018 {
5019 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5020 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5021 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5022 }
5023
5024 /* IDT or function patches haven't changed any guest code. */
5025 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5026 {
5027 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5028 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5029
5030 if (pPatch->uState != PATCH_REFUSED)
5031 {
5032 uint8_t temp[16];
5033
5034 Assert(pPatch->cbPatchJump < sizeof(temp));
5035
5036 /* Let's first check if the guest code is still the same. */
5037 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5038 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5039 if (rc == VINF_SUCCESS)
5040 {
5041 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5042
5043 if ( temp[0] != 0xE9 /* jmp opcode */
5044 || *(RTRCINTPTR *)(&temp[1]) != displ
5045 )
5046 {
5047 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5048 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5049 /* Remove it completely */
5050 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5051 rc = PATMR3RemovePatch(pVM, pInstrGC);
5052 AssertRC(rc);
5053 return VWRN_PATCH_REMOVED;
5054 }
5055 patmRemoveJumpToPatch(pVM, pPatch);
5056 }
5057 else
5058 {
5059 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5060 pPatch->uState = PATCH_DISABLE_PENDING;
5061 }
5062 }
5063 else
5064 {
5065 AssertMsgFailed(("Patch was refused!\n"));
5066 return VERR_PATCH_ALREADY_DISABLED;
5067 }
5068 }
5069 else
5070 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5071 {
5072 uint8_t temp[16];
5073
5074 Assert(pPatch->cbPatchJump < sizeof(temp));
5075
5076 /* Let's first check if the guest code is still the same. */
5077 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5078 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5079 if (rc == VINF_SUCCESS)
5080 {
5081 if (temp[0] != 0xCC)
5082 {
5083 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5084 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5085 /* Remove it completely */
5086 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5087 rc = PATMR3RemovePatch(pVM, pInstrGC);
5088 AssertRC(rc);
5089 return VWRN_PATCH_REMOVED;
5090 }
5091 patmDeactivateInt3Patch(pVM, pPatch);
5092 }
5093 }
5094
5095 if (rc == VINF_SUCCESS)
5096 {
5097 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5098 if (pPatch->uState == PATCH_DISABLE_PENDING)
5099 {
5100 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5101 pPatch->uState = PATCH_UNUSABLE;
5102 }
5103 else
5104 if (pPatch->uState != PATCH_DIRTY)
5105 {
5106 pPatch->uOldState = pPatch->uState;
5107 pPatch->uState = PATCH_DISABLED;
5108 }
5109 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5110 }
5111
5112 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5113 return VINF_SUCCESS;
5114 }
5115 Log(("Patch not found!\n"));
5116 return VERR_PATCH_NOT_FOUND;
5117}
5118
5119/**
5120 * Permanently disable patch for privileged instruction at specified location
5121 *
5122 * @returns VBox status code.
5123 * @param pVM The VM to operate on.
5124 * @param pInstr Guest context instruction pointer
5125 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5126 * @param pConflictPatch Conflicting patch
5127 *
5128 */
5129static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5130{
5131#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5132 PATCHINFO patch;
5133 DISCPUSTATE cpu;
5134 R3PTRTYPE(uint8_t *) pInstrHC;
5135 uint32_t opsize;
5136 bool disret;
5137 int rc;
5138
5139 RT_ZERO(patch);
5140 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5141 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5142 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5143 /*
5144 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5145 * with one that jumps right into the conflict patch.
5146 * Otherwise we must disable the conflicting patch to avoid serious problems.
5147 */
5148 if ( disret == true
5149 && (pConflictPatch->flags & PATMFL_CODE32)
5150 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5151 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5152 {
5153 /* Hint patches must be enabled first. */
5154 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5155 {
5156 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5157 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5158 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5159 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5160 /* Enabling might fail if the patched code has changed in the meantime. */
5161 if (rc != VINF_SUCCESS)
5162 return rc;
5163 }
5164
5165 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5166 if (RT_SUCCESS(rc))
5167 {
5168 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5169 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5170 return VINF_SUCCESS;
5171 }
5172 }
5173#endif
5174
5175 if (pConflictPatch->opcode == OP_CLI)
5176 {
5177 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5178 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5179 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5180 if (rc == VWRN_PATCH_REMOVED)
5181 return VINF_SUCCESS;
5182 if (RT_SUCCESS(rc))
5183 {
5184 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5185 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5186 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5187 if (rc == VERR_PATCH_NOT_FOUND)
5188 return VINF_SUCCESS; /* removed already */
5189
5190 AssertRC(rc);
5191 if (RT_SUCCESS(rc))
5192 {
5193 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5194 return VINF_SUCCESS;
5195 }
5196 }
5197 /* else turned into unusable patch (see below) */
5198 }
5199 else
5200 {
5201 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5202 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5203 if (rc == VWRN_PATCH_REMOVED)
5204 return VINF_SUCCESS;
5205 }
5206
5207 /* No need to monitor the code anymore. */
5208 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5209 {
5210 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5211 AssertRC(rc);
5212 }
5213 pConflictPatch->uState = PATCH_UNUSABLE;
5214 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5215 return VERR_PATCH_DISABLED;
5216}
5217
5218/**
5219 * Enable patch for privileged instruction at specified location
5220 *
5221 * @returns VBox status code.
5222 * @param pVM The VM to operate on.
5223 * @param pInstr Guest context point to privileged instruction
5224 *
5225 * @note returns failure if patching is not allowed or possible
5226 *
5227 */
5228VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5229{
5230 PPATMPATCHREC pPatchRec;
5231 PPATCHINFO pPatch;
5232
5233 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5234 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5235 if (pPatchRec)
5236 {
5237 int rc = VINF_SUCCESS;
5238
5239 pPatch = &pPatchRec->patch;
5240
5241 if (pPatch->uState == PATCH_DISABLED)
5242 {
5243 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5244 {
5245 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5246 uint8_t temp[16];
5247
5248 Assert(pPatch->cbPatchJump < sizeof(temp));
5249
5250 /* Let's first check if the guest code is still the same. */
5251 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5252 AssertRC(rc2);
5253 if (rc2 == VINF_SUCCESS)
5254 {
5255 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5256 {
5257 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5258 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5259 /* Remove it completely */
5260 rc = PATMR3RemovePatch(pVM, pInstrGC);
5261 AssertRC(rc);
5262 return VERR_PATCH_NOT_FOUND;
5263 }
5264
5265 PATMP2GLOOKUPREC cacheRec;
5266 RT_ZERO(cacheRec);
5267 cacheRec.pPatch = pPatch;
5268
5269 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5270 /* Free leftover lock if any. */
5271 if (cacheRec.Lock.pvMap)
5272 {
5273 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5274 cacheRec.Lock.pvMap = NULL;
5275 }
5276 AssertRC(rc2);
5277 if (RT_FAILURE(rc2))
5278 return rc2;
5279
5280#ifdef DEBUG
5281 {
5282 DISCPUSTATE cpu;
5283 char szOutput[256];
5284 uint32_t opsize, i = 0;
5285 bool disret;
5286 i = 0;
5287 while(i < pPatch->cbPatchJump)
5288 {
5289 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5290 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
5291 Log(("Renewed patch instr: %s", szOutput));
5292 i += opsize;
5293 }
5294 }
5295#endif
5296 }
5297 }
5298 else
5299 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5300 {
5301 uint8_t temp[16];
5302
5303 Assert(pPatch->cbPatchJump < sizeof(temp));
5304
5305 /* Let's first check if the guest code is still the same. */
5306 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5307 AssertRC(rc2);
5308
5309 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5310 {
5311 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5312 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5313 rc = PATMR3RemovePatch(pVM, pInstrGC);
5314 AssertRC(rc);
5315 return VERR_PATCH_NOT_FOUND;
5316 }
5317
5318 rc2 = patmActivateInt3Patch(pVM, pPatch);
5319 if (RT_FAILURE(rc2))
5320 return rc2;
5321 }
5322
5323 pPatch->uState = pPatch->uOldState; //restore state
5324
5325 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5326 if (pPatch->pPatchBlockOffset)
5327 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5328
5329 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5330 }
5331 else
5332 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5333
5334 return rc;
5335 }
5336 return VERR_PATCH_NOT_FOUND;
5337}
5338
5339/**
5340 * Remove patch for privileged instruction at specified location
5341 *
5342 * @returns VBox status code.
5343 * @param pVM The VM to operate on.
5344 * @param pPatchRec Patch record
5345 * @param fForceRemove Remove *all* patches
5346 */
5347int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5348{
5349 PPATCHINFO pPatch;
5350
5351 pPatch = &pPatchRec->patch;
5352
5353 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5354 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5355 {
5356 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5357 return VERR_ACCESS_DENIED;
5358 }
5359 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5360
5361 /** @note NEVER EVER REUSE PATCH MEMORY */
5362 /** @note PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5363
5364 if (pPatchRec->patch.pPatchBlockOffset)
5365 {
5366 PAVLOU32NODECORE pNode;
5367
5368 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5369 Assert(pNode);
5370 }
5371
5372 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5373 {
5374 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5375 AssertRC(rc);
5376 }
5377
5378#ifdef VBOX_WITH_STATISTICS
5379 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5380 {
5381 STAMR3Deregister(pVM, &pPatchRec->patch);
5382#ifndef DEBUG_sandervl
5383 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5384 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5385 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5386 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5387 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5388 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5389 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5390 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5391 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5392 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5393 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5394 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5395 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5396 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5397#endif
5398 }
5399#endif
5400
5401 /** @note no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5402 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5403 pPatch->nrPatch2GuestRecs = 0;
5404 Assert(pPatch->Patch2GuestAddrTree == 0);
5405
5406 patmEmptyTree(pVM, &pPatch->FixupTree);
5407 pPatch->nrFixups = 0;
5408 Assert(pPatch->FixupTree == 0);
5409
5410 if (pPatchRec->patch.pTempInfo)
5411 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5412
5413 /** @note might fail, because it has already been removed (e.g. during reset). */
5414 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5415
5416 /* Free the patch record */
5417 MMHyperFree(pVM, pPatchRec);
5418 return VINF_SUCCESS;
5419}
5420
5421/**
5422 * Attempt to refresh the patch by recompiling its entire code block
5423 *
5424 * @returns VBox status code.
5425 * @param pVM The VM to operate on.
5426 * @param pPatchRec Patch record
5427 */
5428int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5429{
5430 PPATCHINFO pPatch;
5431 int rc;
5432 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5433
5434 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5435
5436 pPatch = &pPatchRec->patch;
5437 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5438 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5439 {
5440 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist\n"));
5441 return VERR_PATCHING_REFUSED;
5442 }
5443
5444 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5445
5446 rc = PATMR3DisablePatch(pVM, pInstrGC);
5447 AssertRC(rc);
5448
5449 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5450 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5451#ifdef VBOX_WITH_STATISTICS
5452 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5453 {
5454 STAMR3Deregister(pVM, &pPatchRec->patch);
5455#ifndef DEBUG_sandervl
5456 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5457 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5458 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5459 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5460 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5461 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5462 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5463 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5464 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5465 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5466 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5467 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5468 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5469 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5470#endif
5471 }
5472#endif
5473
5474 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5475
5476 /* Attempt to install a new patch. */
5477 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5478 if (RT_SUCCESS(rc))
5479 {
5480 RTRCPTR pPatchTargetGC;
5481 PPATMPATCHREC pNewPatchRec;
5482
5483 /* Determine target address in new patch */
5484 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5485 Assert(pPatchTargetGC);
5486 if (!pPatchTargetGC)
5487 {
5488 rc = VERR_PATCHING_REFUSED;
5489 goto failure;
5490 }
5491
5492 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5493 pPatch->uCurPatchOffset = 0;
5494
5495 /* insert jump to new patch in old patch block */
5496 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5497 if (RT_FAILURE(rc))
5498 goto failure;
5499
5500 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5501 Assert(pNewPatchRec); /* can't fail */
5502
5503 /* Remove old patch (only do that when everything is finished) */
5504 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5505 AssertRC(rc2);
5506
5507 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5508 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5509
5510 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5511 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5512
5513 /* Used by another patch, so don't remove it! */
5514 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5515 }
5516
5517failure:
5518 if (RT_FAILURE(rc))
5519 {
5520 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5521
5522 /* Remove the new inactive patch */
5523 rc = PATMR3RemovePatch(pVM, pInstrGC);
5524 AssertRC(rc);
5525
5526 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5527 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5528
5529 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5530 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5531 AssertRC(rc2);
5532
5533 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5534 }
5535 return rc;
5536}
5537
5538/**
5539 * Find patch for privileged instruction at specified location
5540 *
5541 * @returns Patch structure pointer if found; else NULL
5542 * @param pVM The VM to operate on.
5543 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5544 * @param fIncludeHints Include hinted patches or not
5545 *
5546 */
5547PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5548{
5549 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5550 /* if the patch is enabled, the pointer is not indentical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5551 if (pPatchRec)
5552 {
5553 if ( pPatchRec->patch.uState == PATCH_ENABLED
5554 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5555 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5556 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5557 {
5558 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5559 return &pPatchRec->patch;
5560 }
5561 else
5562 if ( fIncludeHints
5563 && pPatchRec->patch.uState == PATCH_DISABLED
5564 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5565 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5566 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5567 {
5568 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5569 return &pPatchRec->patch;
5570 }
5571 }
5572 return NULL;
5573}
5574
5575/**
5576 * Checks whether the GC address is inside a generated patch jump
5577 *
5578 * @returns true -> yes, false -> no
5579 * @param pVM The VM to operate on.
5580 * @param pAddr Guest context address
5581 * @param pPatchAddr Guest context patch address (if true)
5582 */
5583VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5584{
5585 RTRCPTR addr;
5586 PPATCHINFO pPatch;
5587
5588 if (PATMIsEnabled(pVM) == false)
5589 return false;
5590
5591 if (pPatchAddr == NULL)
5592 pPatchAddr = &addr;
5593
5594 *pPatchAddr = 0;
5595
5596 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5597 if (pPatch)
5598 *pPatchAddr = pPatch->pPrivInstrGC;
5599
5600 return *pPatchAddr == 0 ? false : true;
5601}
5602
5603/**
5604 * Remove patch for privileged instruction at specified location
5605 *
5606 * @returns VBox status code.
5607 * @param pVM The VM to operate on.
5608 * @param pInstr Guest context point to privileged instruction
5609 *
5610 * @note returns failure if patching is not allowed or possible
5611 *
5612 */
5613VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5614{
5615 PPATMPATCHREC pPatchRec;
5616
5617 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5618 if (pPatchRec)
5619 {
5620 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5621 if (rc == VWRN_PATCH_REMOVED)
5622 return VINF_SUCCESS;
5623
5624 return PATMRemovePatch(pVM, pPatchRec, false);
5625 }
5626 AssertFailed();
5627 return VERR_PATCH_NOT_FOUND;
5628}
5629
5630/**
5631 * Mark patch as dirty
5632 *
5633 * @returns VBox status code.
5634 * @param pVM The VM to operate on.
5635 * @param pPatch Patch record
5636 *
5637 * @note returns failure if patching is not allowed or possible
5638 *
5639 */
5640VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5641{
5642 if (pPatch->pPatchBlockOffset)
5643 {
5644 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5645 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5646 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5647 }
5648
5649 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5650 /* Put back the replaced instruction. */
5651 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5652 if (rc == VWRN_PATCH_REMOVED)
5653 return VINF_SUCCESS;
5654
5655 /** @note we don't restore patch pages for patches that are not enabled! */
5656 /** @note be careful when changing this behaviour!! */
5657
5658 /* The patch pages are no longer marked for self-modifying code detection */
5659 if (pPatch->flags & PATMFL_CODE_MONITORED)
5660 {
5661 rc = patmRemovePatchPages(pVM, pPatch);
5662 AssertRCReturn(rc, rc);
5663 }
5664 pPatch->uState = PATCH_DIRTY;
5665
5666 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5667 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5668
5669 return VINF_SUCCESS;
5670}
5671
5672/**
5673 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5674 *
5675 * @returns VBox status code.
5676 * @param pVM The VM to operate on.
5677 * @param pPatch Patch block structure pointer
5678 * @param pPatchGC GC address in patch block
5679 */
5680RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5681{
5682 Assert(pPatch->Patch2GuestAddrTree);
5683 /* Get the closest record from below. */
5684 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5685 if (pPatchToGuestRec)
5686 return pPatchToGuestRec->pOrgInstrGC;
5687
5688 return 0;
5689}
5690
5691/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5692 *
5693 * @returns corresponding GC pointer in patch block
5694 * @param pVM The VM to operate on.
5695 * @param pPatch Current patch block pointer
5696 * @param pInstrGC Guest context pointer to privileged instruction
5697 *
5698 */
5699RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5700{
5701 if (pPatch->Guest2PatchAddrTree)
5702 {
5703 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5704 if (pGuestToPatchRec)
5705 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5706 }
5707
5708 return 0;
5709}
5710
5711/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5712 *
5713 * @returns corresponding GC pointer in patch block
5714 * @param pVM The VM to operate on.
5715 * @param pPatch Current patch block pointer
5716 * @param pInstrGC Guest context pointer to privileged instruction
5717 *
5718 */
5719RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5720{
5721 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5722 if (pGuestToPatchRec)
5723 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5724
5725 return 0;
5726}
5727
5728/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5729 *
5730 * @returns corresponding GC pointer in patch block
5731 * @param pVM The VM to operate on.
5732 * @param pInstrGC Guest context pointer to privileged instruction
5733 *
5734 */
5735VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5736{
5737 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5738 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5739 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5740 else
5741 return 0;
5742}
5743
5744/**
5745 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5746 *
5747 * @returns original GC instruction pointer or 0 if not found
5748 * @param pVM The VM to operate on.
5749 * @param pPatchGC GC address in patch block
5750 * @param pEnmState State of the translated address (out)
5751 *
5752 */
5753VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5754{
5755 PPATMPATCHREC pPatchRec;
5756 void *pvPatchCoreOffset;
5757 RTRCPTR pPrivInstrGC;
5758
5759 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5760 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5761 if (pvPatchCoreOffset == 0)
5762 {
5763 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5764 return 0;
5765 }
5766 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5767 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5768 if (pEnmState)
5769 {
5770 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5771 || pPatchRec->patch.uState == PATCH_DIRTY
5772 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5773 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5774 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5775
5776 if ( !pPrivInstrGC
5777 || pPatchRec->patch.uState == PATCH_UNUSABLE
5778 || pPatchRec->patch.uState == PATCH_REFUSED)
5779 {
5780 pPrivInstrGC = 0;
5781 *pEnmState = PATMTRANS_FAILED;
5782 }
5783 else
5784 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5785 {
5786 *pEnmState = PATMTRANS_INHIBITIRQ;
5787 }
5788 else
5789 if ( pPatchRec->patch.uState == PATCH_ENABLED
5790 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5791 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5792 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5793 {
5794 *pEnmState = PATMTRANS_OVERWRITTEN;
5795 }
5796 else
5797 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5798 {
5799 *pEnmState = PATMTRANS_OVERWRITTEN;
5800 }
5801 else
5802 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5803 {
5804 *pEnmState = PATMTRANS_PATCHSTART;
5805 }
5806 else
5807 *pEnmState = PATMTRANS_SAFE;
5808 }
5809 return pPrivInstrGC;
5810}
5811
5812/**
5813 * Returns the GC pointer of the patch for the specified GC address
5814 *
5815 * @returns VBox status code.
5816 * @param pVM The VM to operate on.
5817 * @param pAddrGC Guest context address
5818 */
5819VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5820{
5821 PPATMPATCHREC pPatchRec;
5822
5823 /* Find the patch record. */
5824 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5825 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5826 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5827 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5828 else
5829 return 0;
5830}
5831
5832/**
5833 * Attempt to recover dirty instructions
5834 *
5835 * @returns VBox status code.
5836 * @param pVM The VM to operate on.
5837 * @param pCtx CPU context
5838 * @param pPatch Patch record
5839 * @param pPatchToGuestRec Patch to guest address record
5840 * @param pEip GC pointer of trapping instruction
5841 */
5842static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5843{
5844 DISCPUSTATE CpuOld, CpuNew;
5845 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5846 int rc;
5847 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5848 uint32_t cbDirty;
5849 PRECPATCHTOGUEST pRec;
5850 PVMCPU pVCpu = VMMGetCpu0(pVM);
5851
5852 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5853
5854 pRec = pPatchToGuestRec;
5855 pCurInstrGC = pPatchToGuestRec->pOrgInstrGC;
5856 pCurPatchInstrGC = pEip;
5857 cbDirty = 0;
5858 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5859
5860 /* Find all adjacent dirty instructions */
5861 while (true)
5862 {
5863 if (pRec->fJumpTarget)
5864 {
5865 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5866 pRec->fDirty = false;
5867 return VERR_PATCHING_REFUSED;
5868 }
5869
5870 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
5871 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5872 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
5873
5874 /* Only harmless instructions are acceptable. */
5875 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
5876 if ( RT_FAILURE(rc)
5877 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
5878 {
5879 if (RT_SUCCESS(rc))
5880 cbDirty += CpuOld.opsize;
5881 else
5882 if (!cbDirty)
5883 cbDirty = 1;
5884 break;
5885 }
5886
5887#ifdef DEBUG
5888 char szBuf[256];
5889 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
5890 szBuf, sizeof(szBuf), NULL);
5891 Log(("DIRTY: %s\n", szBuf));
5892#endif
5893 /* Mark as clean; if we fail we'll let it always fault. */
5894 pRec->fDirty = false;
5895
5896 /** Remove old lookup record. */
5897 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
5898
5899 pCurPatchInstrGC += CpuOld.opsize;
5900 cbDirty += CpuOld.opsize;
5901
5902 /* Let's see if there's another dirty instruction right after. */
5903 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5904 if (!pRec || !pRec->fDirty)
5905 break; /* no more dirty instructions */
5906
5907 /* In case of complex instructions the next guest instruction could be quite far off. */
5908 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
5909 }
5910
5911 if ( RT_SUCCESS(rc)
5912 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
5913 )
5914 {
5915 uint32_t cbLeft;
5916
5917 pCurPatchInstrHC = pPatchInstrHC;
5918 pCurPatchInstrGC = pEip;
5919 cbLeft = cbDirty;
5920
5921 while (cbLeft && RT_SUCCESS(rc))
5922 {
5923 bool fValidInstr;
5924
5925 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
5926
5927 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
5928 if ( !fValidInstr
5929 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
5930 )
5931 {
5932 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
5933
5934 if ( pTargetGC >= pPatchToGuestRec->pOrgInstrGC
5935 && pTargetGC <= pPatchToGuestRec->pOrgInstrGC + cbDirty
5936 )
5937 {
5938 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
5939 fValidInstr = true;
5940 }
5941 }
5942
5943 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
5944 if ( rc == VINF_SUCCESS
5945 && CpuNew.opsize <= cbLeft /* must still fit */
5946 && fValidInstr
5947 )
5948 {
5949#ifdef DEBUG
5950 char szBuf[256];
5951 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
5952 szBuf, sizeof(szBuf), NULL);
5953 Log(("NEW: %s\n", szBuf));
5954#endif
5955
5956 /* Copy the new instruction. */
5957 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
5958 AssertRC(rc);
5959
5960 /* Add a new lookup record for the duplicated instruction. */
5961 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5962 }
5963 else
5964 {
5965#ifdef DEBUG
5966 char szBuf[256];
5967 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
5968 szBuf, sizeof(szBuf), NULL);
5969 Log(("NEW: %s (FAILED)\n", szBuf));
5970#endif
5971 /* Restore the old lookup record for the duplicated instruction. */
5972 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5973
5974 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
5975 rc = VERR_PATCHING_REFUSED;
5976 break;
5977 }
5978 pCurInstrGC += CpuNew.opsize;
5979 pCurPatchInstrHC += CpuNew.opsize;
5980 pCurPatchInstrGC += CpuNew.opsize;
5981 cbLeft -= CpuNew.opsize;
5982
5983 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
5984 if (!cbLeft)
5985 {
5986 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
5987 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
5988 {
5989 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5990 if (pRec)
5991 {
5992 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
5993 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5994
5995 Assert(!pRec->fDirty);
5996
5997 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
5998 if (cbFiller >= SIZEOF_NEARJUMP32)
5999 {
6000 pPatchFillHC[0] = 0xE9;
6001 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6002#ifdef DEBUG
6003 char szBuf[256];
6004 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6005 szBuf, sizeof(szBuf), NULL);
6006 Log(("FILL: %s\n", szBuf));
6007#endif
6008 }
6009 else
6010 {
6011 for (unsigned i = 0; i < cbFiller; i++)
6012 {
6013 pPatchFillHC[i] = 0x90; /* NOP */
6014#ifdef DEBUG
6015 char szBuf[256];
6016 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC + i,
6017 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6018 Log(("FILL: %s\n", szBuf));
6019#endif
6020 }
6021 }
6022 }
6023 }
6024 }
6025 }
6026 }
6027 else
6028 rc = VERR_PATCHING_REFUSED;
6029
6030 if (RT_SUCCESS(rc))
6031 {
6032 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6033 }
6034 else
6035 {
6036 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6037 Assert(cbDirty);
6038
6039 /* Mark the whole instruction stream with breakpoints. */
6040 if (cbDirty)
6041 memset(pPatchInstrHC, 0xCC, cbDirty);
6042
6043 if ( pVM->patm.s.fOutOfMemory == false
6044 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6045 {
6046 rc = patmR3RefreshPatch(pVM, pPatch);
6047 if (RT_FAILURE(rc))
6048 {
6049 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6050 }
6051 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6052 rc = VERR_PATCHING_REFUSED;
6053 }
6054 }
6055 return rc;
6056}
6057
6058/**
6059 * Handle trap inside patch code
6060 *
6061 * @returns VBox status code.
6062 * @param pVM The VM to operate on.
6063 * @param pCtx CPU context
6064 * @param pEip GC pointer of trapping instruction
6065 * @param ppNewEip GC pointer to new instruction
6066 */
6067VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6068{
6069 PPATMPATCHREC pPatch = 0;
6070 void *pvPatchCoreOffset;
6071 RTRCUINTPTR offset;
6072 RTRCPTR pNewEip;
6073 int rc ;
6074 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6075 PVMCPU pVCpu = VMMGetCpu0(pVM);
6076
6077 Assert(pVM->cCpus == 1);
6078
6079 pNewEip = 0;
6080 *ppNewEip = 0;
6081
6082 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6083
6084 /* Find the patch record. */
6085 /** @note there might not be a patch to guest translation record (global function) */
6086 offset = pEip - pVM->patm.s.pPatchMemGC;
6087 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6088 if (pvPatchCoreOffset)
6089 {
6090 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6091
6092 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6093
6094 if (pPatch->patch.uState == PATCH_DIRTY)
6095 {
6096 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6097 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6098 {
6099 /* Function duplication patches set fPIF to 1 on entry */
6100 pVM->patm.s.pGCStateHC->fPIF = 1;
6101 }
6102 }
6103 else
6104 if (pPatch->patch.uState == PATCH_DISABLED)
6105 {
6106 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6107 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6108 {
6109 /* Function duplication patches set fPIF to 1 on entry */
6110 pVM->patm.s.pGCStateHC->fPIF = 1;
6111 }
6112 }
6113 else
6114 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6115 {
6116 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6117
6118 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6119 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6120 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6121 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6122 }
6123
6124 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6125 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6126
6127 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6128 pPatch->patch.cTraps++;
6129 PATM_STAT_FAULT_INC(&pPatch->patch);
6130 }
6131 else
6132 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6133
6134 /* Check if we were interrupted in PATM generated instruction code. */
6135 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6136 {
6137 DISCPUSTATE Cpu;
6138 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6139 AssertRC(rc);
6140
6141 if ( rc == VINF_SUCCESS
6142 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6143 || Cpu.pCurInstr->opcode == OP_PUSH
6144 || Cpu.pCurInstr->opcode == OP_CALL)
6145 )
6146 {
6147 uint64_t fFlags;
6148
6149 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6150
6151 if (Cpu.pCurInstr->opcode == OP_PUSH)
6152 {
6153 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6154 if ( rc == VINF_SUCCESS
6155 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6156 {
6157 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6158
6159 /* Reset the PATM stack. */
6160 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6161
6162 pVM->patm.s.pGCStateHC->fPIF = 1;
6163
6164 Log(("Faulting push -> go back to the original instruction\n"));
6165
6166 /* continue at the original instruction */
6167 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6168 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6169 return VINF_SUCCESS;
6170 }
6171 }
6172
6173 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6174 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6175 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6176 if (rc == VINF_SUCCESS)
6177 {
6178 /* The guest page *must* be present. */
6179 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6180 if ( rc == VINF_SUCCESS
6181 && (fFlags & X86_PTE_P))
6182 {
6183 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6184 return VINF_PATCH_CONTINUE;
6185 }
6186 }
6187 }
6188 else
6189 if (pPatch->patch.pPrivInstrGC == pNewEip)
6190 {
6191 /* Invalidated patch or first instruction overwritten.
6192 * We can ignore the fPIF state in this case.
6193 */
6194 /* Reset the PATM stack. */
6195 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6196
6197 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6198
6199 pVM->patm.s.pGCStateHC->fPIF = 1;
6200
6201 /* continue at the original instruction */
6202 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6203 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6204 return VINF_SUCCESS;
6205 }
6206
6207 char szBuf[256];
6208 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6209
6210 /* Very bad. We crashed in emitted code. Probably stack? */
6211 if (pPatch)
6212 {
6213 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6214 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6215 }
6216 else
6217 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6218 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6219 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6220 }
6221
6222 /* From here on, we must have a valid patch to guest translation. */
6223 if (pvPatchCoreOffset == 0)
6224 {
6225 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6226 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6227 return VERR_PATCH_NOT_FOUND;
6228 }
6229
6230 /* Take care of dirty/changed instructions. */
6231 if (pPatchToGuestRec->fDirty)
6232 {
6233 Assert(pPatchToGuestRec->Core.Key == offset);
6234 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6235
6236 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6237 if (RT_SUCCESS(rc))
6238 {
6239 /* Retry the current instruction. */
6240 pNewEip = pEip;
6241 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6242 }
6243 else
6244 {
6245 /* Reset the PATM stack. */
6246 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6247
6248 rc = VINF_SUCCESS; /* Continue at original instruction. */
6249 }
6250
6251 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6252 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6253 return rc;
6254 }
6255
6256#ifdef VBOX_STRICT
6257 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6258 {
6259 DISCPUSTATE cpu;
6260 bool disret;
6261 uint32_t opsize;
6262 PATMP2GLOOKUPREC cacheRec;
6263 RT_ZERO(cacheRec);
6264 cacheRec.pPatch = &pPatch->patch;
6265
6266 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6267 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6268 if (cacheRec.Lock.pvMap)
6269 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6270
6271 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6272 {
6273 RTRCPTR retaddr;
6274 PCPUMCTX pCtx2;
6275
6276 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6277
6278 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6279 AssertRC(rc);
6280
6281 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6282 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6283 }
6284 }
6285#endif
6286
6287 /* Return original address, correct by subtracting the CS base address. */
6288 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6289
6290 /* Reset the PATM stack. */
6291 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6292
6293 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6294 {
6295 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6296 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6297#ifdef VBOX_STRICT
6298 DISCPUSTATE cpu;
6299 bool disret;
6300 uint32_t opsize;
6301 PATMP2GLOOKUPREC cacheRec;
6302 RT_ZERO(cacheRec);
6303 cacheRec.pPatch = &pPatch->patch;
6304
6305 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6306 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6307 if (cacheRec.Lock.pvMap)
6308 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6309
6310 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6311 {
6312 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6313 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6314 if (cacheRec.Lock.pvMap)
6315 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6316
6317 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6318 }
6319#endif
6320 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6321 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6322 }
6323
6324 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6325#ifdef LOG_ENABLED
6326 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6327#endif
6328 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6329 {
6330 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6331 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6332 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6333 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6334 return VERR_PATCH_DISABLED;
6335 }
6336
6337#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6338 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6339 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6340 {
6341 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6342 //we are only wasting time, back out the patch
6343 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6344 pTrapRec->pNextPatchInstr = 0;
6345 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6346 return VERR_PATCH_DISABLED;
6347 }
6348#endif
6349
6350 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6351 return VINF_SUCCESS;
6352}
6353
6354
6355/**
6356 * Handle page-fault in monitored page
6357 *
6358 * @returns VBox status code.
6359 * @param pVM The VM to operate on.
6360 */
6361VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6362{
6363 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6364
6365 addr &= PAGE_BASE_GC_MASK;
6366
6367 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6368 AssertRC(rc); NOREF(rc);
6369
6370 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6371 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6372 {
6373 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6374 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6375 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6376 if (rc == VWRN_PATCH_REMOVED)
6377 return VINF_SUCCESS;
6378
6379 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6380
6381 if (addr == pPatchRec->patch.pPrivInstrGC)
6382 addr++;
6383 }
6384
6385 for(;;)
6386 {
6387 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6388
6389 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6390 break;
6391
6392 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6393 {
6394 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6395 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6396 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6397 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6398 }
6399 addr = pPatchRec->patch.pPrivInstrGC + 1;
6400 }
6401
6402 pVM->patm.s.pvFaultMonitor = 0;
6403 return VINF_SUCCESS;
6404}
6405
6406
6407#ifdef VBOX_WITH_STATISTICS
6408
6409static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6410{
6411 if (pPatch->flags & PATMFL_SYSENTER)
6412 {
6413 return "SYSENT";
6414 }
6415 else
6416 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6417 {
6418 static char szTrap[16];
6419 uint32_t iGate;
6420
6421 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6422 if (iGate < 256)
6423 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6424 else
6425 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6426 return szTrap;
6427 }
6428 else
6429 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6430 return "DUPFUNC";
6431 else
6432 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6433 return "FUNCCALL";
6434 else
6435 if (pPatch->flags & PATMFL_TRAMPOLINE)
6436 return "TRAMP";
6437 else
6438 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6439}
6440
6441static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6442{
6443 switch(pPatch->uState)
6444 {
6445 case PATCH_ENABLED:
6446 return "ENA";
6447 case PATCH_DISABLED:
6448 return "DIS";
6449 case PATCH_DIRTY:
6450 return "DIR";
6451 case PATCH_UNUSABLE:
6452 return "UNU";
6453 case PATCH_REFUSED:
6454 return "REF";
6455 case PATCH_DISABLE_PENDING:
6456 return "DIP";
6457 default:
6458 AssertFailed();
6459 return " ";
6460 }
6461}
6462
6463/**
6464 * Resets the sample.
6465 * @param pVM The VM handle.
6466 * @param pvSample The sample registered using STAMR3RegisterCallback.
6467 */
6468static void patmResetStat(PVM pVM, void *pvSample)
6469{
6470 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6471 Assert(pPatch);
6472
6473 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6474 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6475}
6476
6477/**
6478 * Prints the sample into the buffer.
6479 *
6480 * @param pVM The VM handle.
6481 * @param pvSample The sample registered using STAMR3RegisterCallback.
6482 * @param pszBuf The buffer to print into.
6483 * @param cchBuf The size of the buffer.
6484 */
6485static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6486{
6487 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6488 Assert(pPatch);
6489
6490 Assert(pPatch->uState != PATCH_REFUSED);
6491 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6492
6493 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6494 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6495 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6496}
6497
6498/**
6499 * Returns the GC address of the corresponding patch statistics counter
6500 *
6501 * @returns Stat address
6502 * @param pVM The VM to operate on.
6503 * @param pPatch Patch structure
6504 */
6505RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6506{
6507 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6508 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6509}
6510
6511#endif /* VBOX_WITH_STATISTICS */
6512
6513#ifdef VBOX_WITH_DEBUGGER
6514/**
6515 * The '.patmoff' command.
6516 *
6517 * @returns VBox status.
6518 * @param pCmd Pointer to the command descriptor (as registered).
6519 * @param pCmdHlp Pointer to command helper functions.
6520 * @param pVM Pointer to the current VM (if any).
6521 * @param paArgs Pointer to (readonly) array of arguments.
6522 * @param cArgs Number of arguments in the array.
6523 */
6524static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6525{
6526 /*
6527 * Validate input.
6528 */
6529 if (!pVM)
6530 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6531
6532 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6533 PATMR3AllowPatching(pVM, false);
6534 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6535}
6536
6537/**
6538 * The '.patmon' command.
6539 *
6540 * @returns VBox status.
6541 * @param pCmd Pointer to the command descriptor (as registered).
6542 * @param pCmdHlp Pointer to command helper functions.
6543 * @param pVM Pointer to the current VM (if any).
6544 * @param paArgs Pointer to (readonly) array of arguments.
6545 * @param cArgs Number of arguments in the array.
6546 */
6547static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6548{
6549 /*
6550 * Validate input.
6551 */
6552 if (!pVM)
6553 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6554
6555 PATMR3AllowPatching(pVM, true);
6556 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6557 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6558}
6559#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette