VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 58311

Last change on this file since 58311 was 58126, checked in by vboxsync, 9 years ago

VMM: Fixed almost all the Doxygen warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 263.0 KB
Line 
1/* $Id: PATM.cpp 58126 2015-10-08 20:59:48Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2015 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20
21/*********************************************************************************************************************************
22* Header Files *
23*********************************************************************************************************************************/
24#define LOG_GROUP LOG_GROUP_PATM
25#include <VBox/vmm/patm.h>
26#include <VBox/vmm/stam.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <VBox/vmm/cpumdis.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/mm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/ssm.h>
36#include <VBox/vmm/trpm.h>
37#include <VBox/vmm/cfgm.h>
38#include <VBox/param.h>
39#include <VBox/vmm/selm.h>
40#include <VBox/vmm/csam.h>
41#include <iprt/avl.h>
42#include "PATMInternal.h"
43#include "PATMPatch.h"
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/uvm.h>
46#include <VBox/dbg.h>
47#include <VBox/err.h>
48#include <VBox/log.h>
49#include <iprt/assert.h>
50#include <iprt/asm.h>
51#include <VBox/dis.h>
52#include <VBox/disopcode.h>
53#include "internal/pgm.h"
54
55#include <iprt/string.h>
56#include "PATMA.h"
57
58//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
59//#define PATM_DISABLE_ALL
60
61/**
62 * Refresh trampoline patch state.
63 */
64typedef struct PATMREFRESHPATCH
65{
66 /** Pointer to the VM structure. */
67 PVM pVM;
68 /** The trampoline patch record. */
69 PPATCHINFO pPatchTrampoline;
70 /** The new patch we want to jump to. */
71 PPATCHINFO pPatchRec;
72} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
73
74
75#define PATMREAD_RAWCODE 1 /* read code as-is */
76#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
77#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
78
79/*
80 * Private structure used during disassembly
81 */
82typedef struct
83{
84 PVM pVM;
85 PPATCHINFO pPatchInfo;
86 R3PTRTYPE(uint8_t *) pbInstrHC;
87 RTRCPTR pInstrGC;
88 uint32_t fReadFlags;
89} PATMDISASM, *PPATMDISASM;
90
91
92/*********************************************************************************************************************************
93* Internal Functions *
94*********************************************************************************************************************************/
95static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
96static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
97static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
98
99#ifdef LOG_ENABLED // keep gcc quiet
100static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
101#endif
102#ifdef VBOX_WITH_STATISTICS
103static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
104static void patmResetStat(PVM pVM, void *pvSample);
105static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
106#endif
107
108#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
109#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
110
111static int patmReinit(PVM pVM);
112static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
113static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
114static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
115
116#ifdef VBOX_WITH_DEBUGGER
117static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
118static FNDBGCCMD patmr3CmdOn;
119static FNDBGCCMD patmr3CmdOff;
120
121/** Command descriptors. */
122static const DBGCCMD g_aCmds[] =
123{
124 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
125 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
126 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
127};
128#endif
129
130/* Don't want to break saved states, so put it here as a global variable. */
131static unsigned int cIDTHandlersDisabled = 0;
132
133/**
134 * Initializes the PATM.
135 *
136 * @returns VBox status code.
137 * @param pVM The cross context VM structure.
138 */
139VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
140{
141 int rc;
142
143 /*
144 * We only need a saved state dummy loader if HM is enabled.
145 */
146 if (HMIsEnabled(pVM))
147 {
148 pVM->fPATMEnabled = false;
149 return SSMR3RegisterStub(pVM, "PATM", 0);
150 }
151
152 /*
153 * Raw-mode.
154 */
155 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
156
157 /* These values can't change as they are hardcoded in patch code (old saved states!) */
158 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
159 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
160 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
161 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
162
163 AssertReleaseMsg(g_fPatmInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
164 ("Interrupt flags out of sync!! g_fPatmInterruptFlag=%#x expected %#x. broken assembler?\n", g_fPatmInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
165
166 /* Allocate patch memory and GC patch state memory. */
167 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
168 /* Add another page in case the generated code is much larger than expected. */
169 /** @todo bad safety precaution */
170 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
171 if (RT_FAILURE(rc))
172 {
173 Log(("MMHyperAlloc failed with %Rrc\n", rc));
174 return rc;
175 }
176 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
177
178 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
179 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
180 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
181
182 patmR3DbgInit(pVM);
183
184 /*
185 * Hypervisor memory for GC status data (read/write)
186 *
187 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
188 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
189 *
190 */
191 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
192 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
193 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
194
195 /* Hypervisor memory for patch statistics */
196 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
197 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
198
199 /* Memory for patch lookup trees. */
200 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
201 AssertRCReturn(rc, rc);
202 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
203
204#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
205 /* Check CFGM option. */
206 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
207 if (RT_FAILURE(rc))
208# ifdef PATM_DISABLE_ALL
209 pVM->fPATMEnabled = false;
210# else
211 pVM->fPATMEnabled = true;
212# endif
213#endif
214
215 rc = patmReinit(pVM);
216 AssertRC(rc);
217 if (RT_FAILURE(rc))
218 return rc;
219
220 /*
221 * Register the virtual page access handler type.
222 */
223 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_ALL, false /*fRelocUserRC*/,
224 NULL /*pfnInvalidateR3*/,
225 patmVirtPageHandler,
226 "patmVirtPageHandler", "patmRCVirtPagePfHandler",
227 "PATMMonitorPatchJump", &pVM->patm.s.hMonitorPageType);
228 AssertRCReturn(rc, rc);
229
230 /*
231 * Register save and load state notifiers.
232 */
233 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SAVED_STATE_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
234 NULL, NULL, NULL,
235 NULL, patmR3Save, NULL,
236 NULL, patmR3Load, NULL);
237 AssertRCReturn(rc, rc);
238
239#ifdef VBOX_WITH_DEBUGGER
240 /*
241 * Debugger commands.
242 */
243 static bool s_fRegisteredCmds = false;
244 if (!s_fRegisteredCmds)
245 {
246 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
247 if (RT_SUCCESS(rc2))
248 s_fRegisteredCmds = true;
249 }
250#endif
251
252#ifdef VBOX_WITH_STATISTICS
253 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
254 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
255 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
256 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
257 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
258 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
259 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
260 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
261
262 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
263 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
264
265 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
266 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
267 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
268
269 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
270 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
271 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
272 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
273 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
274
275 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
276 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
277
278 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
279 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
280
281 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
282 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
283 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
284
285 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
286 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
287 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
288
289 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
290 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
291
292 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
293 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
294 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
295 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
296
297 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
298 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
299
300 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
301 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
302
303 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
304 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
305 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
306
307 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
308 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
309 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
310 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
311
312 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
313 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
314 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
315 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
316 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
317
318 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
319#endif /* VBOX_WITH_STATISTICS */
320
321 Log(("g_patmCallRecord.cbFunction %u\n", g_patmCallRecord.cbFunction));
322 Log(("g_patmCallIndirectRecord.cbFunction %u\n", g_patmCallIndirectRecord.cbFunction));
323 Log(("g_patmRetRecord.cbFunction %u\n", g_patmRetRecord.cbFunction));
324 Log(("g_patmJumpIndirectRecord.cbFunction %u\n", g_patmJumpIndirectRecord.cbFunction));
325 Log(("g_patmPopf32Record.cbFunction %u\n", g_patmPopf32Record.cbFunction));
326 Log(("g_patmIretRecord.cbFunction %u\n", g_patmIretRecord.cbFunction));
327 Log(("g_patmStiRecord.cbFunction %u\n", g_patmStiRecord.cbFunction));
328 Log(("g_patmCheckIFRecord.cbFunction %u\n", g_patmCheckIFRecord.cbFunction));
329
330 return rc;
331}
332
333/**
334 * Finalizes HMA page attributes.
335 *
336 * @returns VBox status code.
337 * @param pVM The cross context VM structure.
338 */
339VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
340{
341 if (HMIsEnabled(pVM))
342 return VINF_SUCCESS;
343
344 /*
345 * The GC state, stack and statistics must be read/write for the guest
346 * (supervisor only of course).
347 *
348 * Remember, we run guest code at ring-1 and ring-2 levels, which are
349 * considered supervisor levels by the paging structures. We run the VMM
350 * in ring-0 with CR0.WP=0 and mapping all VMM structures as read-only
351 * pages. The following structures are exceptions and must be mapped with
352 * write access so the ring-1 and ring-2 code can modify them.
353 */
354 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
355 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCState accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
356
357 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
358 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCStack accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
359
360 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
361 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the stats struct accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
362
363 /*
364 * Find the patch helper segment so we can identify code running there as patch code.
365 */
366 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpBegin", &pVM->patm.s.pbPatchHelpersRC);
367 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpBegin: %Rrc\n", rc), rc);
368 pVM->patm.s.pbPatchHelpersR3 = (uint8_t *)MMHyperRCToR3(pVM, pVM->patm.s.pbPatchHelpersRC);
369 AssertLogRelReturn(pVM->patm.s.pbPatchHelpersR3 != NULL, VERR_INTERNAL_ERROR_3);
370
371 RTRCPTR RCPtrEnd;
372 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpEnd", &RCPtrEnd);
373 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpEnd: %Rrc\n", rc), rc);
374
375 pVM->patm.s.cbPatchHelpers = RCPtrEnd - pVM->patm.s.pbPatchHelpersRC;
376 AssertLogRelMsgReturn(pVM->patm.s.cbPatchHelpers < _128K,
377 ("%RRv-%RRv => %#x\n", pVM->patm.s.pbPatchHelpersRC, RCPtrEnd, pVM->patm.s.cbPatchHelpers),
378 VERR_INTERNAL_ERROR_4);
379
380
381 return VINF_SUCCESS;
382}
383
384/**
385 * (Re)initializes PATM
386 *
387 * @param pVM The cross context VM structure.
388 */
389static int patmReinit(PVM pVM)
390{
391 int rc;
392
393 /*
394 * Assert alignment and sizes.
395 */
396 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
397 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
398
399 /*
400 * Setup any fixed pointers and offsets.
401 */
402 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
403
404#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
405#ifndef PATM_DISABLE_ALL
406 pVM->fPATMEnabled = true;
407#endif
408#endif
409
410 Assert(pVM->patm.s.pGCStateHC);
411 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
412 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
413
414 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
415 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
416
417 Assert(pVM->patm.s.pGCStackHC);
418 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
419 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
420 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
421 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
422
423 Assert(pVM->patm.s.pStatsHC);
424 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
425 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
426
427 Assert(pVM->patm.s.pPatchMemHC);
428 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
429 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
430 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
431
432 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
433 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
434
435 Assert(pVM->patm.s.PatchLookupTreeHC);
436 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
437
438 /*
439 * (Re)Initialize PATM structure
440 */
441 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
442 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
443 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
444 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
445 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
446 pVM->patm.s.pvFaultMonitor = 0;
447 pVM->patm.s.deltaReloc = 0;
448
449 /* Lowest and highest patched instruction */
450 pVM->patm.s.pPatchedInstrGCLowest = ~0;
451 pVM->patm.s.pPatchedInstrGCHighest = 0;
452
453 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
454 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
455 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
456
457 pVM->patm.s.pfnSysEnterPatchGC = 0;
458 pVM->patm.s.pfnSysEnterGC = 0;
459
460 pVM->patm.s.fOutOfMemory = false;
461
462 pVM->patm.s.pfnHelperCallGC = 0;
463 patmR3DbgReset(pVM);
464
465 /* Generate all global functions to be used by future patches. */
466 /* We generate a fake patch in order to use the existing code for relocation. */
467 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
468 if (RT_FAILURE(rc))
469 {
470 Log(("Out of memory!!!!\n"));
471 return VERR_NO_MEMORY;
472 }
473 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
474 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
475 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
476
477 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
478 AssertRC(rc);
479
480 /* Update free pointer in patch memory. */
481 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
482 /* Round to next 8 byte boundary. */
483 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
484
485
486 return rc;
487}
488
489
490/**
491 * Applies relocations to data and code managed by this
492 * component. This function will be called at init and
493 * whenever the VMM need to relocate it self inside the GC.
494 *
495 * The PATM will update the addresses used by the switcher.
496 *
497 * @param pVM The cross context VM structure.
498 * @param offDelta The relocation delta.
499 */
500VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM, RTRCINTPTR offDelta)
501{
502 if (HMIsEnabled(pVM))
503 return;
504
505 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
506 Assert((RTRCINTPTR)(GCPtrNew - pVM->patm.s.pGCStateGC) == offDelta);
507
508 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, offDelta));
509 if (offDelta)
510 {
511 PCPUMCTX pCtx;
512
513 /* Update CPUMCTX guest context pointer. */
514 pVM->patm.s.pCPUMCtxGC += offDelta;
515
516 pVM->patm.s.deltaReloc = offDelta;
517 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmR3RelocatePatches, (void *)pVM);
518
519 pVM->patm.s.pGCStateGC = GCPtrNew;
520 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
521 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
522 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
523 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
524
525 if (pVM->patm.s.pfnSysEnterPatchGC)
526 pVM->patm.s.pfnSysEnterPatchGC += offDelta;
527
528 /* If we are running patch code right now, then also adjust EIP. */
529 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
530 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
531 pCtx->eip += offDelta;
532
533 /* Deal with the global patch functions. */
534 pVM->patm.s.pfnHelperCallGC += offDelta;
535 pVM->patm.s.pfnHelperRetGC += offDelta;
536 pVM->patm.s.pfnHelperIretGC += offDelta;
537 pVM->patm.s.pfnHelperJumpGC += offDelta;
538
539 pVM->patm.s.pbPatchHelpersRC += offDelta;
540
541 patmR3RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
542 }
543}
544
545
546/**
547 * Terminates the PATM.
548 *
549 * Termination means cleaning up and freeing all resources,
550 * the VM it self is at this point powered off or suspended.
551 *
552 * @returns VBox status code.
553 * @param pVM The cross context VM structure.
554 */
555VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
556{
557 if (HMIsEnabled(pVM))
558 return VINF_SUCCESS;
559
560 patmR3DbgTerm(pVM);
561
562 /* Memory was all allocated from the two MM heaps and requires no freeing. */
563 return VINF_SUCCESS;
564}
565
566
567/**
568 * PATM reset callback.
569 *
570 * @returns VBox status code.
571 * @param pVM The cross context VM structure.
572 */
573VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
574{
575 Log(("PATMR3Reset\n"));
576 if (HMIsEnabled(pVM))
577 return VINF_SUCCESS;
578
579 /* Free all patches. */
580 for (;;)
581 {
582 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
583 if (pPatchRec)
584 patmR3RemovePatch(pVM, pPatchRec, true);
585 else
586 break;
587 }
588 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
589 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
590 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
591 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
592
593 int rc = patmReinit(pVM);
594 if (RT_SUCCESS(rc))
595 rc = PATMR3InitFinalize(pVM); /* paranoia */
596
597 return rc;
598}
599
600/**
601 * @callback_method_impl{FNDISREADBYTES}
602 */
603static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
604{
605 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
606
607/** @todo change this to read more! */
608 /*
609 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
610 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
611 */
612 /** @todo could change in the future! */
613 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
614 {
615 size_t cbRead = cbMaxRead;
616 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
617 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
618 if (RT_SUCCESS(rc))
619 {
620 if (cbRead >= cbMinRead)
621 {
622 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
623 return VINF_SUCCESS;
624 }
625
626 cbMinRead -= (uint8_t)cbRead;
627 cbMaxRead -= (uint8_t)cbRead;
628 offInstr += (uint8_t)cbRead;
629 uSrcAddr += cbRead;
630 }
631
632#ifdef VBOX_STRICT
633 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
634 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
635 {
636 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
637 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
638 }
639#endif
640 }
641
642 int rc = VINF_SUCCESS;
643 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
644 if ( !pDisInfo->pbInstrHC
645 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
646 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
647 {
648 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
649 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
650 offInstr += cbMinRead;
651 }
652 else
653 {
654 /*
655 * pbInstrHC is the base address; adjust according to the GC pointer.
656 *
657 * Try read the max number of bytes here. Since the disassembler only
658 * ever uses these bytes for the current instruction, it doesn't matter
659 * much if we accidentally read the start of the next instruction even
660 * if it happens to be a patch jump or int3.
661 */
662 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
663 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
664
665 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
666 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
667 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
668 if (cbToRead > cbMaxRead)
669 cbToRead = cbMaxRead;
670
671 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
672 offInstr += (uint8_t)cbToRead;
673 }
674
675 pDis->cbCachedInstr = offInstr;
676 return rc;
677}
678
679
680DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
681 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
682{
683 PATMDISASM disinfo;
684 disinfo.pVM = pVM;
685 disinfo.pPatchInfo = pPatch;
686 disinfo.pbInstrHC = pbInstrHC;
687 disinfo.pInstrGC = InstrGCPtr32;
688 disinfo.fReadFlags = fReadFlags;
689 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
690 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
691 patmReadBytes, &disinfo,
692 pCpu, pcbInstr, pszOutput, cbOutput));
693}
694
695
696DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
697 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
698{
699 PATMDISASM disinfo;
700 disinfo.pVM = pVM;
701 disinfo.pPatchInfo = pPatch;
702 disinfo.pbInstrHC = pbInstrHC;
703 disinfo.pInstrGC = InstrGCPtr32;
704 disinfo.fReadFlags = fReadFlags;
705 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
706 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
707 patmReadBytes, &disinfo,
708 pCpu, pcbInstr));
709}
710
711
712DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
713 uint32_t fReadFlags,
714 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
715{
716 PATMDISASM disinfo;
717 disinfo.pVM = pVM;
718 disinfo.pPatchInfo = pPatch;
719 disinfo.pbInstrHC = pbInstrHC;
720 disinfo.pInstrGC = InstrGCPtr32;
721 disinfo.fReadFlags = fReadFlags;
722 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
723 pCpu, pcbInstr));
724}
725
726#ifdef LOG_ENABLED
727# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
728 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
729# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
730 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
731
732# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
733 do { \
734 if (LogIsEnabled()) \
735 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
736 } while (0)
737
738static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
739 const char *pszComment1, const char *pszComment2)
740{
741 DISCPUSTATE DisState;
742 char szOutput[128];
743 szOutput[0] = '\0';
744 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
745 &DisState, NULL, szOutput, sizeof(szOutput));
746 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
747}
748
749#else
750# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
751# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
752# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
753#endif
754
755
756/**
757 * Callback function for RTAvloU32DoWithAll
758 *
759 * Updates all fixups in the patches
760 *
761 * @returns VBox status code.
762 * @param pNode Current node
763 * @param pParam Pointer to the VM.
764 */
765static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
766{
767 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
768 PVM pVM = (PVM)pParam;
769 RTRCINTPTR delta;
770 int rc;
771
772 /* Nothing to do if the patch is not active. */
773 if (pPatch->patch.uState == PATCH_REFUSED)
774 return 0;
775
776 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
777 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
778
779 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
780 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
781
782 /*
783 * Apply fixups.
784 */
785 AVLPVKEY key = NULL;
786 for (;;)
787 {
788 /* Get the record that's closest from above (after or equal to key). */
789 PRELOCREC pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
790 if (!pRec)
791 break;
792
793 key = (uint8_t *)pRec->Core.Key + 1; /* search for the next record during the next round. */
794
795 switch (pRec->uType)
796 {
797 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
798 Assert(pRec->pDest == pRec->pSource); Assert(PATM_IS_ASMFIX(pRec->pSource));
799 Log(("Absolute patch template fixup type %#x at %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
800 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
801 break;
802
803 case FIXUP_ABSOLUTE:
804 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
805 if ( !pRec->pSource
806 || PATMIsPatchGCAddr(pVM, pRec->pSource))
807 {
808 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
809 }
810 else
811 {
812 uint8_t curInstr[15];
813 uint8_t oldInstr[15];
814 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
815
816 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
817
818 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
819 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
820
821 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
822 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
823
824 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
825
826 if ( rc == VERR_PAGE_NOT_PRESENT
827 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
828 {
829 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
830
831 Log(("PATM: Patch page not present -> check later!\n"));
832 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
833 pPage,
834 pPage + (PAGE_SIZE - 1) /* inclusive! */,
835 (void *)(uintptr_t)pPage, pPage, NULL /*pszDesc*/);
836 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
837 }
838 else
839 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
840 {
841 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
842 /*
843 * Disable patch; this is not a good solution
844 */
845 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
846 pPatch->patch.uState = PATCH_DISABLED;
847 }
848 else
849 if (RT_SUCCESS(rc))
850 {
851 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
852 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
853 AssertRC(rc);
854 }
855 }
856 break;
857
858 case FIXUP_REL_JMPTOPATCH:
859 {
860 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
861
862 if ( pPatch->patch.uState == PATCH_ENABLED
863 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
864 {
865 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
866 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
867 RTRCPTR pJumpOffGC;
868 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
869 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
870
871#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
872 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
873#else
874 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
875#endif
876
877 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
878#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
879 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
880 {
881 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
882
883 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
884 oldJump[0] = pPatch->patch.aPrivInstr[0];
885 oldJump[1] = pPatch->patch.aPrivInstr[1];
886 *(RTRCUINTPTR *)&oldJump[2] = displOld;
887 }
888 else
889#endif
890 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
891 {
892 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
893 oldJump[0] = 0xE9;
894 *(RTRCUINTPTR *)&oldJump[1] = displOld;
895 }
896 else
897 {
898 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
899 continue; //this should never happen!!
900 }
901 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
902
903 /*
904 * Read old patch jump and compare it to the one we previously installed
905 */
906 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
907 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
908
909 if ( rc == VERR_PAGE_NOT_PRESENT
910 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
911 {
912 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
913 Log(("PATM: Patch page not present -> check later!\n"));
914 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
915 pPage,
916 pPage + (PAGE_SIZE - 1) /* inclusive! */,
917 (void *)(uintptr_t)pPage, pPage, NULL /*pszDesc*/);
918 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
919 }
920 else
921 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
922 {
923 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
924 /*
925 * Disable patch; this is not a good solution
926 */
927 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
928 pPatch->patch.uState = PATCH_DISABLED;
929 }
930 else
931 if (RT_SUCCESS(rc))
932 {
933 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
934 AssertRC(rc);
935 }
936 else
937 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
938 }
939 else
940 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
941
942 pRec->pDest = pTarget;
943 break;
944 }
945
946 case FIXUP_REL_JMPTOGUEST:
947 {
948 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
949 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
950
951 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
952 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
953 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
954 pRec->pSource = pSource;
955 break;
956 }
957
958 case FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL:
959 case FIXUP_CONSTANT_IN_PATCH_ASM_TMPL:
960 /* Only applicable when loading state. */
961 Assert(pRec->pDest == pRec->pSource);
962 Assert(PATM_IS_ASMFIX(pRec->pSource));
963 break;
964
965 default:
966 AssertMsg(0, ("Invalid fixup type!!\n"));
967 return VERR_INVALID_PARAMETER;
968 }
969 }
970
971 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
972 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
973 return 0;
974}
975
976#ifdef VBOX_WITH_DEBUGGER
977
978/**
979 * Callback function for RTAvloU32DoWithAll
980 *
981 * Enables the patch that's being enumerated
982 *
983 * @returns 0 (continue enumeration).
984 * @param pNode Current node
985 * @param pVM The cross context VM structure.
986 */
987static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
988{
989 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
990
991 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
992 return 0;
993}
994
995
996/**
997 * Callback function for RTAvloU32DoWithAll
998 *
999 * Disables the patch that's being enumerated
1000 *
1001 * @returns 0 (continue enumeration).
1002 * @param pNode Current node
1003 * @param pVM The cross context VM structure.
1004 */
1005static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
1006{
1007 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
1008
1009 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
1010 return 0;
1011}
1012
1013#endif /* VBOX_WITH_DEBUGGER */
1014
1015/**
1016 * Returns the host context pointer of the GC context structure
1017 *
1018 * @returns VBox status code.
1019 * @param pVM The cross context VM structure.
1020 */
1021VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1022{
1023 AssertReturn(!HMIsEnabled(pVM), NULL);
1024 return pVM->patm.s.pGCStateHC;
1025}
1026
1027
1028/**
1029 * Allows or disallow patching of privileged instructions executed by the guest OS
1030 *
1031 * @returns VBox status code.
1032 * @param pUVM The user mode VM handle.
1033 * @param fAllowPatching Allow/disallow patching
1034 */
1035VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1036{
1037 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1038 PVM pVM = pUVM->pVM;
1039 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1040
1041 if (!HMIsEnabled(pVM))
1042 pVM->fPATMEnabled = fAllowPatching;
1043 else
1044 Assert(!pVM->fPATMEnabled);
1045 return VINF_SUCCESS;
1046}
1047
1048
1049/**
1050 * Checks if the patch manager is enabled or not.
1051 *
1052 * @returns true if enabled, false if not (or if invalid handle).
1053 * @param pUVM The user mode VM handle.
1054 */
1055VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1056{
1057 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1058 PVM pVM = pUVM->pVM;
1059 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1060 return PATMIsEnabled(pVM);
1061}
1062
1063
1064/**
1065 * Convert a GC patch block pointer to a HC patch pointer
1066 *
1067 * @returns HC pointer or NULL if it's not a GC patch pointer
1068 * @param pVM The cross context VM structure.
1069 * @param pAddrGC GC pointer
1070 */
1071VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1072{
1073 AssertReturn(!HMIsEnabled(pVM), NULL);
1074 RTRCUINTPTR offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1075 if (offPatch >= pVM->patm.s.cbPatchMem)
1076 {
1077 offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC;
1078 if (offPatch >= pVM->patm.s.cbPatchHelpers)
1079 return NULL;
1080 return pVM->patm.s.pbPatchHelpersR3 + offPatch;
1081 }
1082 return pVM->patm.s.pPatchMemHC + offPatch;
1083}
1084
1085
1086/**
1087 * Convert guest context address to host context pointer
1088 *
1089 * @returns VBox status code.
1090 * @param pVM The cross context VM structure.
1091 * @param pCacheRec Address conversion cache record
1092 * @param pGCPtr Guest context pointer
1093 *
1094 * @returns Host context pointer or NULL in case of an error
1095 *
1096 */
1097R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1098{
1099 int rc;
1100 R3PTRTYPE(uint8_t *) pHCPtr;
1101 uint32_t offset;
1102
1103 offset = (RTRCUINTPTR)pGCPtr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1104 if (offset < pVM->patm.s.cbPatchMem)
1105 {
1106#ifdef VBOX_STRICT
1107 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1108 Assert(pPatch); Assert(offset - pPatch->pPatchBlockOffset < pPatch->cbPatchBlockSize);
1109#endif
1110 return pVM->patm.s.pPatchMemHC + offset;
1111 }
1112 /* Note! We're _not_ including the patch helpers here. */
1113
1114 offset = pGCPtr & PAGE_OFFSET_MASK;
1115 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1116 return pCacheRec->pPageLocStartHC + offset;
1117
1118 /* Release previous lock if any. */
1119 if (pCacheRec->Lock.pvMap)
1120 {
1121 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1122 pCacheRec->Lock.pvMap = NULL;
1123 }
1124
1125 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1126 if (rc != VINF_SUCCESS)
1127 {
1128 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1129 return NULL;
1130 }
1131 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1132 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1133 return pHCPtr;
1134}
1135
1136
1137/**
1138 * Calculates and fills in all branch targets
1139 *
1140 * @returns VBox status code.
1141 * @param pVM The cross context VM structure.
1142 * @param pPatch Current patch block pointer
1143 *
1144 */
1145static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1146{
1147 int32_t displ;
1148
1149 PJUMPREC pRec = 0;
1150 unsigned nrJumpRecs = 0;
1151
1152 /*
1153 * Set all branch targets inside the patch block.
1154 * We remove all jump records as they are no longer needed afterwards.
1155 */
1156 while (true)
1157 {
1158 RCPTRTYPE(uint8_t *) pInstrGC;
1159 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1160
1161 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1162 if (pRec == 0)
1163 break;
1164
1165 nrJumpRecs++;
1166
1167 /* HC in patch block to GC in patch block. */
1168 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1169
1170 if (pRec->opcode == OP_CALL)
1171 {
1172 /* Special case: call function replacement patch from this patch block.
1173 */
1174 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1175 if (!pFunctionRec)
1176 {
1177 int rc;
1178
1179 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1180 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1181 else
1182 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1183
1184 if (RT_FAILURE(rc))
1185 {
1186 uint8_t *pPatchHC;
1187 RTRCPTR pPatchGC;
1188 RTRCPTR pOrgInstrGC;
1189
1190 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1191 Assert(pOrgInstrGC);
1192
1193 /* Failure for some reason -> mark exit point with int 3. */
1194 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1195
1196 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1197 Assert(pPatchGC);
1198
1199 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1200
1201 /* Set a breakpoint at the very beginning of the recompiled instruction */
1202 *pPatchHC = 0xCC;
1203
1204 continue;
1205 }
1206 }
1207 else
1208 {
1209 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1210 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1211 }
1212
1213 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1214 }
1215 else
1216 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1217
1218 if (pBranchTargetGC == 0)
1219 {
1220 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1221 return VERR_PATCHING_REFUSED;
1222 }
1223 /* Our jumps *always* have a dword displacement (to make things easier). */
1224 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1225 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1226 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1227 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1228 }
1229 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1230 Assert(pPatch->JumpTree == 0);
1231 return VINF_SUCCESS;
1232}
1233
1234/**
1235 * Add an illegal instruction record
1236 *
1237 * @param pVM The cross context VM structure.
1238 * @param pPatch Patch structure ptr
1239 * @param pInstrGC Guest context pointer to privileged instruction
1240 *
1241 */
1242static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1243{
1244 PAVLPVNODECORE pRec;
1245
1246 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1247 Assert(pRec);
1248 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1249
1250 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1251 Assert(ret); NOREF(ret);
1252 pPatch->pTempInfo->nrIllegalInstr++;
1253}
1254
1255static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1256{
1257 PAVLPVNODECORE pRec;
1258
1259 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1260 if (pRec)
1261 return true;
1262 else
1263 return false;
1264}
1265
1266/**
1267 * Add a patch to guest lookup record
1268 *
1269 * @param pVM The cross context VM structure.
1270 * @param pPatch Patch structure ptr
1271 * @param pPatchInstrHC Guest context pointer to patch block
1272 * @param pInstrGC Guest context pointer to privileged instruction
1273 * @param enmType Lookup type
1274 * @param fDirty Dirty flag
1275 *
1276 * @note Be extremely careful with this function. Make absolutely sure the guest
1277 * address is correct! (to avoid executing instructions twice!)
1278 */
1279void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1280{
1281 bool ret;
1282 PRECPATCHTOGUEST pPatchToGuestRec;
1283 PRECGUESTTOPATCH pGuestToPatchRec;
1284 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1285
1286 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1287 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1288
1289 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1290 {
1291 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1292 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1293 return; /* already there */
1294
1295 Assert(!pPatchToGuestRec);
1296 }
1297#ifdef VBOX_STRICT
1298 else
1299 {
1300 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1301 Assert(!pPatchToGuestRec);
1302 }
1303#endif
1304
1305 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1306 Assert(pPatchToGuestRec);
1307 pPatchToGuestRec->Core.Key = PatchOffset;
1308 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1309 pPatchToGuestRec->enmType = enmType;
1310 pPatchToGuestRec->fDirty = fDirty;
1311
1312 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1313 Assert(ret);
1314
1315 /* GC to patch address */
1316 if (enmType == PATM_LOOKUP_BOTHDIR)
1317 {
1318 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1319 if (!pGuestToPatchRec)
1320 {
1321 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1322 pGuestToPatchRec->Core.Key = pInstrGC;
1323 pGuestToPatchRec->PatchOffset = PatchOffset;
1324
1325 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1326 Assert(ret);
1327 }
1328 }
1329
1330 pPatch->nrPatch2GuestRecs++;
1331}
1332
1333
1334/**
1335 * Removes a patch to guest lookup record
1336 *
1337 * @param pVM The cross context VM structure.
1338 * @param pPatch Patch structure ptr
1339 * @param pPatchInstrGC Guest context pointer to patch block
1340 */
1341void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1342{
1343 PAVLU32NODECORE pNode;
1344 PAVLU32NODECORE pNode2;
1345 PRECPATCHTOGUEST pPatchToGuestRec;
1346 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1347
1348 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1349 Assert(pPatchToGuestRec);
1350 if (pPatchToGuestRec)
1351 {
1352 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1353 {
1354 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1355
1356 Assert(pGuestToPatchRec->Core.Key);
1357 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1358 Assert(pNode2);
1359 }
1360 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1361 Assert(pNode);
1362
1363 MMR3HeapFree(pPatchToGuestRec);
1364 pPatch->nrPatch2GuestRecs--;
1365 }
1366}
1367
1368
1369/**
1370 * RTAvlPVDestroy callback.
1371 */
1372static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1373{
1374 MMR3HeapFree(pNode);
1375 return 0;
1376}
1377
1378/**
1379 * Empty the specified tree (PV tree, MMR3 heap)
1380 *
1381 * @param pVM The cross context VM structure.
1382 * @param ppTree Tree to empty
1383 */
1384static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1385{
1386 NOREF(pVM);
1387 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1388}
1389
1390
1391/**
1392 * RTAvlU32Destroy callback.
1393 */
1394static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1395{
1396 MMR3HeapFree(pNode);
1397 return 0;
1398}
1399
1400/**
1401 * Empty the specified tree (U32 tree, MMR3 heap)
1402 *
1403 * @param pVM The cross context VM structure.
1404 * @param ppTree Tree to empty
1405 */
1406static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1407{
1408 NOREF(pVM);
1409 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1410}
1411
1412
1413/**
1414 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1415 *
1416 * @returns VBox status code.
1417 * @param pVM The cross context VM structure.
1418 * @param pCpu CPU disassembly state
1419 * @param pInstrGC Guest context pointer to privileged instruction
1420 * @param pCurInstrGC Guest context pointer to the current instruction
1421 * @param pCacheRec Cache record ptr
1422 *
1423 */
1424static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1425{
1426 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1427 bool fIllegalInstr = false;
1428
1429 /*
1430 * Preliminary heuristics:
1431 *- no call instructions without a fixed displacement between cli and sti/popf
1432 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1433 *- no nested pushf/cli
1434 *- sti/popf should be the (eventual) target of all branches
1435 *- no near or far returns; no int xx, no into
1436 *
1437 * Note: Later on we can impose less stricter guidelines if the need arises
1438 */
1439
1440 /* Bail out if the patch gets too big. */
1441 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1442 {
1443 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1444 fIllegalInstr = true;
1445 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1446 }
1447 else
1448 {
1449 /* No unconditional jumps or calls without fixed displacements. */
1450 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1451 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1452 )
1453 {
1454 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1455 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1456 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1457 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1458 )
1459 {
1460 fIllegalInstr = true;
1461 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1462 }
1463 }
1464
1465 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1466 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1467 {
1468 if ( pCurInstrGC > pPatch->pPrivInstrGC
1469 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1470 {
1471 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1472 /* We turn this one into a int 3 callable patch. */
1473 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1474 }
1475 }
1476 else
1477 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1478 if (pPatch->opcode == OP_PUSHF)
1479 {
1480 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1481 {
1482 fIllegalInstr = true;
1483 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1484 }
1485 }
1486
1487 /* no far returns */
1488 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1489 {
1490 pPatch->pTempInfo->nrRetInstr++;
1491 fIllegalInstr = true;
1492 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1493 }
1494 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1495 || pCpu->pCurInstr->uOpcode == OP_INT
1496 || pCpu->pCurInstr->uOpcode == OP_INTO)
1497 {
1498 /* No int xx or into either. */
1499 fIllegalInstr = true;
1500 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1501 }
1502 }
1503
1504 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1505
1506 /* Illegal instruction -> end of analysis phase for this code block */
1507 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1508 return VINF_SUCCESS;
1509
1510 /* Check for exit points. */
1511 switch (pCpu->pCurInstr->uOpcode)
1512 {
1513 case OP_SYSEXIT:
1514 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1515
1516 case OP_SYSENTER:
1517 case OP_ILLUD2:
1518 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1519 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1520 return VINF_SUCCESS;
1521
1522 case OP_STI:
1523 case OP_POPF:
1524 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1525 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1526 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1527 {
1528 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1529 return VERR_PATCHING_REFUSED;
1530 }
1531 if (pPatch->opcode == OP_PUSHF)
1532 {
1533 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1534 {
1535 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1536 return VINF_SUCCESS;
1537
1538 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1539 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1540 pPatch->flags |= PATMFL_CHECK_SIZE;
1541 }
1542 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1543 }
1544 /* else: fall through. */
1545 case OP_RETN: /* exit point for function replacement */
1546 return VINF_SUCCESS;
1547
1548 case OP_IRET:
1549 return VINF_SUCCESS; /* exitpoint */
1550
1551 case OP_CPUID:
1552 case OP_CALL:
1553 case OP_JMP:
1554 break;
1555
1556#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1557 case OP_STR:
1558 break;
1559#endif
1560
1561 default:
1562 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1563 {
1564 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1565 return VINF_SUCCESS; /* exit point */
1566 }
1567 break;
1568 }
1569
1570 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1571 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1572 {
1573 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1574 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1575 return VINF_SUCCESS;
1576 }
1577
1578 return VWRN_CONTINUE_ANALYSIS;
1579}
1580
1581/**
1582 * Analyses the instructions inside a function for compliance
1583 *
1584 * @returns VBox status code.
1585 * @param pVM The cross context VM structure.
1586 * @param pCpu CPU disassembly state
1587 * @param pInstrGC Guest context pointer to privileged instruction
1588 * @param pCurInstrGC Guest context pointer to the current instruction
1589 * @param pCacheRec Cache record ptr
1590 *
1591 */
1592static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1593{
1594 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1595 bool fIllegalInstr = false;
1596 NOREF(pInstrGC);
1597
1598 //Preliminary heuristics:
1599 //- no call instructions
1600 //- ret ends a block
1601
1602 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1603
1604 // bail out if the patch gets too big
1605 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1606 {
1607 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1608 fIllegalInstr = true;
1609 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1610 }
1611 else
1612 {
1613 // no unconditional jumps or calls without fixed displacements
1614 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1615 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1616 )
1617 {
1618 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1619 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1620 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1621 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1622 )
1623 {
1624 fIllegalInstr = true;
1625 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1626 }
1627 }
1628 else /* no far returns */
1629 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1630 {
1631 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1632 fIllegalInstr = true;
1633 }
1634 else /* no int xx or into either */
1635 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1636 {
1637 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1638 fIllegalInstr = true;
1639 }
1640
1641 #if 0
1642 ///@todo we can handle certain in/out and privileged instructions in the guest context
1643 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1644 {
1645 Log(("Illegal instructions for function patch!!\n"));
1646 return VERR_PATCHING_REFUSED;
1647 }
1648 #endif
1649 }
1650
1651 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1652
1653 /* Illegal instruction -> end of analysis phase for this code block */
1654 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1655 {
1656 return VINF_SUCCESS;
1657 }
1658
1659 // Check for exit points
1660 switch (pCpu->pCurInstr->uOpcode)
1661 {
1662 case OP_ILLUD2:
1663 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1664 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1665 return VINF_SUCCESS;
1666
1667 case OP_IRET:
1668 case OP_SYSEXIT: /* will fault or emulated in GC */
1669 case OP_RETN:
1670 return VINF_SUCCESS;
1671
1672#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1673 case OP_STR:
1674 break;
1675#endif
1676
1677 case OP_POPF:
1678 case OP_STI:
1679 return VWRN_CONTINUE_ANALYSIS;
1680 default:
1681 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1682 {
1683 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1684 return VINF_SUCCESS; /* exit point */
1685 }
1686 return VWRN_CONTINUE_ANALYSIS;
1687 }
1688
1689 return VWRN_CONTINUE_ANALYSIS;
1690}
1691
1692/**
1693 * Recompiles the instructions in a code block
1694 *
1695 * @returns VBox status code.
1696 * @param pVM The cross context VM structure.
1697 * @param pCpu CPU disassembly state
1698 * @param pInstrGC Guest context pointer to privileged instruction
1699 * @param pCurInstrGC Guest context pointer to the current instruction
1700 * @param pCacheRec Cache record ptr
1701 *
1702 */
1703static DECLCALLBACK(int) patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1704{
1705 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1706 int rc = VINF_SUCCESS;
1707 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1708
1709 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1710
1711 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1712 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1713 {
1714 /*
1715 * Been there, done that; so insert a jump (we don't want to duplicate code)
1716 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1717 */
1718 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1719 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1720 }
1721
1722 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1723 {
1724 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1725 }
1726 else
1727 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1728
1729 if (RT_FAILURE(rc))
1730 return rc;
1731
1732 /* Note: Never do a direct return unless a failure is encountered! */
1733
1734 /* Clear recompilation of next instruction flag; we are doing that right here. */
1735 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1736 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1737
1738 /* Add lookup record for patch to guest address translation */
1739 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1740
1741 /* Update lowest and highest instruction address for this patch */
1742 if (pCurInstrGC < pPatch->pInstrGCLowest)
1743 pPatch->pInstrGCLowest = pCurInstrGC;
1744 else
1745 if (pCurInstrGC > pPatch->pInstrGCHighest)
1746 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1747
1748 /* Illegal instruction -> end of recompile phase for this code block. */
1749 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1750 {
1751 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1752 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1753 goto end;
1754 }
1755
1756 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1757 * Indirect calls are handled below.
1758 */
1759 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1760 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1761 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1762 {
1763 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1764 if (pTargetGC == 0)
1765 {
1766 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1767 return VERR_PATCHING_REFUSED;
1768 }
1769
1770 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1771 {
1772 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1773 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1774 if (RT_FAILURE(rc))
1775 goto end;
1776 }
1777 else
1778 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1779
1780 if (RT_SUCCESS(rc))
1781 rc = VWRN_CONTINUE_RECOMPILE;
1782
1783 goto end;
1784 }
1785
1786 switch (pCpu->pCurInstr->uOpcode)
1787 {
1788 case OP_CLI:
1789 {
1790 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1791 * until we've found the proper exit point(s).
1792 */
1793 if ( pCurInstrGC != pInstrGC
1794 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1795 )
1796 {
1797 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1798 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1799 }
1800 /* Set by irq inhibition; no longer valid now. */
1801 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1802
1803 rc = patmPatchGenCli(pVM, pPatch);
1804 if (RT_SUCCESS(rc))
1805 rc = VWRN_CONTINUE_RECOMPILE;
1806 break;
1807 }
1808
1809 case OP_MOV:
1810 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1811 {
1812 /* mov ss, src? */
1813 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1814 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1815 {
1816 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1817 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1818 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1819 }
1820#if 0 /* necessary for Haiku */
1821 else
1822 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1823 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1824 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1825 {
1826 /* mov GPR, ss */
1827 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1828 if (RT_SUCCESS(rc))
1829 rc = VWRN_CONTINUE_RECOMPILE;
1830 break;
1831 }
1832#endif
1833 }
1834 goto duplicate_instr;
1835
1836 case OP_POP:
1837 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1838 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1839 {
1840 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1841
1842 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1843 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1844 }
1845 goto duplicate_instr;
1846
1847 case OP_STI:
1848 {
1849 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1850
1851 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1852 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1853 {
1854 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1855 fInhibitIRQInstr = true;
1856 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1857 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1858 }
1859 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1860
1861 if (RT_SUCCESS(rc))
1862 {
1863 DISCPUSTATE cpu = *pCpu;
1864 unsigned cbInstr;
1865 int disret;
1866 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1867
1868 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1869
1870 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1871 { /* Force pNextInstrHC out of scope after using it */
1872 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1873 if (pNextInstrHC == NULL)
1874 {
1875 AssertFailed();
1876 return VERR_PATCHING_REFUSED;
1877 }
1878
1879 // Disassemble the next instruction
1880 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1881 }
1882 if (disret == false)
1883 {
1884 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1885 return VERR_PATCHING_REFUSED;
1886 }
1887 pReturnInstrGC = pNextInstrGC + cbInstr;
1888
1889 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1890 || pReturnInstrGC <= pInstrGC
1891 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1892 )
1893 {
1894 /* Not an exit point for function duplication patches */
1895 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1896 && RT_SUCCESS(rc))
1897 {
1898 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1899 rc = VWRN_CONTINUE_RECOMPILE;
1900 }
1901 else
1902 rc = VINF_SUCCESS; //exit point
1903 }
1904 else {
1905 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1906 rc = VERR_PATCHING_REFUSED; //not allowed!!
1907 }
1908 }
1909 break;
1910 }
1911
1912 case OP_POPF:
1913 {
1914 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1915
1916 /* Not an exit point for IDT handler or function replacement patches */
1917 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1918 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1919 fGenerateJmpBack = false;
1920
1921 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1922 if (RT_SUCCESS(rc))
1923 {
1924 if (fGenerateJmpBack == false)
1925 {
1926 /* Not an exit point for IDT handler or function replacement patches */
1927 rc = VWRN_CONTINUE_RECOMPILE;
1928 }
1929 else
1930 {
1931 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1932 rc = VINF_SUCCESS; /* exit point! */
1933 }
1934 }
1935 break;
1936 }
1937
1938 case OP_PUSHF:
1939 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1940 if (RT_SUCCESS(rc))
1941 rc = VWRN_CONTINUE_RECOMPILE;
1942 break;
1943
1944 case OP_PUSH:
1945 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1946 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1947 {
1948 rc = patmPatchGenPushCS(pVM, pPatch);
1949 if (RT_SUCCESS(rc))
1950 rc = VWRN_CONTINUE_RECOMPILE;
1951 break;
1952 }
1953 goto duplicate_instr;
1954
1955 case OP_IRET:
1956 Log(("IRET at %RRv\n", pCurInstrGC));
1957 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1958 if (RT_SUCCESS(rc))
1959 {
1960 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1961 rc = VINF_SUCCESS; /* exit point by definition */
1962 }
1963 break;
1964
1965 case OP_ILLUD2:
1966 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1967 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1968 if (RT_SUCCESS(rc))
1969 rc = VINF_SUCCESS; /* exit point by definition */
1970 Log(("Illegal opcode (0xf 0xb)\n"));
1971 break;
1972
1973 case OP_CPUID:
1974 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1975 if (RT_SUCCESS(rc))
1976 rc = VWRN_CONTINUE_RECOMPILE;
1977 break;
1978
1979 case OP_STR:
1980#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
1981 /* Now safe because our shadow TR entry is identical to the guest's. */
1982 goto duplicate_instr;
1983#endif
1984 case OP_SLDT:
1985 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1986 if (RT_SUCCESS(rc))
1987 rc = VWRN_CONTINUE_RECOMPILE;
1988 break;
1989
1990 case OP_SGDT:
1991 case OP_SIDT:
1992 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1993 if (RT_SUCCESS(rc))
1994 rc = VWRN_CONTINUE_RECOMPILE;
1995 break;
1996
1997 case OP_RETN:
1998 /* retn is an exit point for function patches */
1999 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
2000 if (RT_SUCCESS(rc))
2001 rc = VINF_SUCCESS; /* exit point by definition */
2002 break;
2003
2004 case OP_SYSEXIT:
2005 /* Duplicate it, so it can be emulated in GC (or fault). */
2006 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2007 if (RT_SUCCESS(rc))
2008 rc = VINF_SUCCESS; /* exit point by definition */
2009 break;
2010
2011 case OP_CALL:
2012 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2013 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2014 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2015 */
2016 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2017 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
2018 {
2019 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2020 if (RT_SUCCESS(rc))
2021 {
2022 rc = VWRN_CONTINUE_RECOMPILE;
2023 }
2024 break;
2025 }
2026 goto gen_illegal_instr;
2027
2028 case OP_JMP:
2029 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2030 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2031 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2032 */
2033 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2034 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2035 {
2036 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2037 if (RT_SUCCESS(rc))
2038 rc = VINF_SUCCESS; /* end of branch */
2039 break;
2040 }
2041 goto gen_illegal_instr;
2042
2043 case OP_INT3:
2044 case OP_INT:
2045 case OP_INTO:
2046 goto gen_illegal_instr;
2047
2048 case OP_MOV_DR:
2049 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2050 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2051 {
2052 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2053 if (RT_SUCCESS(rc))
2054 rc = VWRN_CONTINUE_RECOMPILE;
2055 break;
2056 }
2057 goto duplicate_instr;
2058
2059 case OP_MOV_CR:
2060 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2061 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2062 {
2063 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2064 if (RT_SUCCESS(rc))
2065 rc = VWRN_CONTINUE_RECOMPILE;
2066 break;
2067 }
2068 goto duplicate_instr;
2069
2070 default:
2071 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2072 {
2073gen_illegal_instr:
2074 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2075 if (RT_SUCCESS(rc))
2076 rc = VINF_SUCCESS; /* exit point by definition */
2077 }
2078 else
2079 {
2080duplicate_instr:
2081 Log(("patmPatchGenDuplicate\n"));
2082 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2083 if (RT_SUCCESS(rc))
2084 rc = VWRN_CONTINUE_RECOMPILE;
2085 }
2086 break;
2087 }
2088
2089end:
2090
2091 if ( !fInhibitIRQInstr
2092 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2093 {
2094 int rc2;
2095 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2096
2097 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2098 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2099 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2100 {
2101 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2102
2103 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2104 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2105 rc = VINF_SUCCESS; /* end of the line */
2106 }
2107 else
2108 {
2109 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2110 }
2111 if (RT_FAILURE(rc2))
2112 rc = rc2;
2113 }
2114
2115 if (RT_SUCCESS(rc))
2116 {
2117 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2118 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2119 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2120 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2121 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2122 )
2123 {
2124 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2125
2126 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2127 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2128
2129 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2130 AssertRC(rc);
2131 }
2132 }
2133 return rc;
2134}
2135
2136
2137#ifdef LOG_ENABLED
2138
2139/**
2140 * Add a disasm jump record (temporary for prevent duplicate analysis)
2141 *
2142 * @param pVM The cross context VM structure.
2143 * @param pPatch Patch structure ptr
2144 * @param pInstrGC Guest context pointer to privileged instruction
2145 *
2146 */
2147static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2148{
2149 PAVLPVNODECORE pRec;
2150
2151 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2152 Assert(pRec);
2153 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2154
2155 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2156 Assert(ret);
2157}
2158
2159/**
2160 * Checks if jump target has been analysed before.
2161 *
2162 * @returns VBox status code.
2163 * @param pPatch Patch struct
2164 * @param pInstrGC Jump target
2165 *
2166 */
2167static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2168{
2169 PAVLPVNODECORE pRec;
2170
2171 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2172 if (pRec)
2173 return true;
2174 return false;
2175}
2176
2177/**
2178 * For proper disassembly of the final patch block
2179 *
2180 * @returns VBox status code.
2181 * @param pVM The cross context VM structure.
2182 * @param pCpu CPU disassembly state
2183 * @param pInstrGC Guest context pointer to privileged instruction
2184 * @param pCurInstrGC Guest context pointer to the current instruction
2185 * @param pCacheRec Cache record ptr
2186 *
2187 */
2188DECLCALLBACK(int) patmR3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC,
2189 RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2190{
2191 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2192 NOREF(pInstrGC);
2193
2194 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2195 {
2196 /* Could be an int3 inserted in a call patch. Check to be sure */
2197 DISCPUSTATE cpu;
2198 RTRCPTR pOrgJumpGC;
2199
2200 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2201
2202 { /* Force pOrgJumpHC out of scope after using it */
2203 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2204
2205 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2206 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2207 return VINF_SUCCESS;
2208 }
2209 return VWRN_CONTINUE_ANALYSIS;
2210 }
2211
2212 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2213 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2214 {
2215 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2216 return VWRN_CONTINUE_ANALYSIS;
2217 }
2218
2219 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2220 || pCpu->pCurInstr->uOpcode == OP_INT
2221 || pCpu->pCurInstr->uOpcode == OP_IRET
2222 || pCpu->pCurInstr->uOpcode == OP_RETN
2223 || pCpu->pCurInstr->uOpcode == OP_RETF
2224 )
2225 {
2226 return VINF_SUCCESS;
2227 }
2228
2229 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2230 return VINF_SUCCESS;
2231
2232 return VWRN_CONTINUE_ANALYSIS;
2233}
2234
2235
2236/**
2237 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2238 *
2239 * @returns VBox status code.
2240 * @param pVM The cross context VM structure.
2241 * @param pInstrGC Guest context pointer to the initial privileged instruction
2242 * @param pCurInstrGC Guest context pointer to the current instruction
2243 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2244 * @param pCacheRec Cache record ptr
2245 *
2246 */
2247int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2248{
2249 DISCPUSTATE cpu;
2250 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2251 int rc = VWRN_CONTINUE_ANALYSIS;
2252 uint32_t cbInstr, delta;
2253 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2254 bool disret;
2255 char szOutput[256];
2256
2257 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2258
2259 /* We need this to determine branch targets (and for disassembling). */
2260 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2261
2262 while (rc == VWRN_CONTINUE_ANALYSIS)
2263 {
2264 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2265 if (pCurInstrHC == NULL)
2266 {
2267 rc = VERR_PATCHING_REFUSED;
2268 goto end;
2269 }
2270
2271 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2272 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2273 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2274 {
2275 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2276
2277 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2278 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2279 else
2280 Log(("DIS %s", szOutput));
2281
2282 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2283 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2284 {
2285 rc = VINF_SUCCESS;
2286 goto end;
2287 }
2288 }
2289 else
2290 Log(("DIS: %s", szOutput));
2291
2292 if (disret == false)
2293 {
2294 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2295 rc = VINF_SUCCESS;
2296 goto end;
2297 }
2298
2299 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2300 if (rc != VWRN_CONTINUE_ANALYSIS) {
2301 break; //done!
2302 }
2303
2304 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2305 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2306 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2307 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2308 )
2309 {
2310 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2311 RTRCPTR pOrgTargetGC;
2312
2313 if (pTargetGC == 0)
2314 {
2315 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2316 rc = VERR_PATCHING_REFUSED;
2317 break;
2318 }
2319
2320 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2321 {
2322 //jump back to guest code
2323 rc = VINF_SUCCESS;
2324 goto end;
2325 }
2326 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2327
2328 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2329 {
2330 rc = VINF_SUCCESS;
2331 goto end;
2332 }
2333
2334 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2335 {
2336 /* New jump, let's check it. */
2337 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2338
2339 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2340 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2341 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2342
2343 if (rc != VINF_SUCCESS) {
2344 break; //done!
2345 }
2346 }
2347 if (cpu.pCurInstr->uOpcode == OP_JMP)
2348 {
2349 /* Unconditional jump; return to caller. */
2350 rc = VINF_SUCCESS;
2351 goto end;
2352 }
2353
2354 rc = VWRN_CONTINUE_ANALYSIS;
2355 }
2356 pCurInstrGC += cbInstr;
2357 }
2358end:
2359 return rc;
2360}
2361
2362/**
2363 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2364 *
2365 * @returns VBox status code.
2366 * @param pVM The cross context VM structure.
2367 * @param pInstrGC Guest context pointer to the initial privileged instruction
2368 * @param pCurInstrGC Guest context pointer to the current instruction
2369 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2370 * @param pCacheRec Cache record ptr
2371 *
2372 */
2373int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2374{
2375 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2376
2377 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2378 /* Free all disasm jump records. */
2379 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2380 return rc;
2381}
2382
2383#endif /* LOG_ENABLED */
2384
2385/**
2386 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2387 * If so, this patch is permanently disabled.
2388 *
2389 * @param pVM The cross context VM structure.
2390 * @param pInstrGC Guest context pointer to instruction
2391 * @param pConflictGC Guest context pointer to check
2392 *
2393 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2394 *
2395 */
2396VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2397{
2398 AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
2399 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2400 if (pTargetPatch)
2401 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2402 return VERR_PATCH_NO_CONFLICT;
2403}
2404
2405/**
2406 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2407 *
2408 * @returns VBox status code.
2409 * @param pVM The cross context VM structure.
2410 * @param pInstrGC Guest context pointer to privileged instruction
2411 * @param pCurInstrGC Guest context pointer to the current instruction
2412 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2413 * @param pCacheRec Cache record ptr
2414 *
2415 */
2416static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2417{
2418 DISCPUSTATE cpu;
2419 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2420 int rc = VWRN_CONTINUE_ANALYSIS;
2421 uint32_t cbInstr;
2422 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2423 bool disret;
2424#ifdef LOG_ENABLED
2425 char szOutput[256];
2426#endif
2427
2428 while (rc == VWRN_CONTINUE_RECOMPILE)
2429 {
2430 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2431 if (pCurInstrHC == NULL)
2432 {
2433 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2434 goto end;
2435 }
2436#ifdef LOG_ENABLED
2437 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2438 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2439 Log(("Recompile: %s", szOutput));
2440#else
2441 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2442#endif
2443 if (disret == false)
2444 {
2445 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2446
2447 /* Add lookup record for patch to guest address translation */
2448 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2449 patmPatchGenIllegalInstr(pVM, pPatch);
2450 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2451 goto end;
2452 }
2453
2454 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2455 if (rc != VWRN_CONTINUE_RECOMPILE)
2456 {
2457 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2458 if ( rc == VINF_SUCCESS
2459 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2460 {
2461 DISCPUSTATE cpunext;
2462 uint32_t opsizenext;
2463 uint8_t *pNextInstrHC;
2464 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2465
2466 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2467
2468 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2469 * Recompile the next instruction as well
2470 */
2471 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2472 if (pNextInstrHC == NULL)
2473 {
2474 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2475 goto end;
2476 }
2477 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2478 if (disret == false)
2479 {
2480 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2481 goto end;
2482 }
2483 switch(cpunext.pCurInstr->uOpcode)
2484 {
2485 case OP_IRET: /* inhibit cleared in generated code */
2486 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2487 case OP_HLT:
2488 break; /* recompile these */
2489
2490 default:
2491 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2492 {
2493 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2494
2495 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2496 AssertRC(rc);
2497 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2498 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2499 }
2500 break;
2501 }
2502
2503 /* Note: after a cli we must continue to a proper exit point */
2504 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2505 {
2506 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2507 if (RT_SUCCESS(rc))
2508 {
2509 rc = VINF_SUCCESS;
2510 goto end;
2511 }
2512 break;
2513 }
2514 else
2515 rc = VWRN_CONTINUE_RECOMPILE;
2516 }
2517 else
2518 break; /* done! */
2519 }
2520
2521 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2522
2523
2524 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2525 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2526 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2527 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2528 )
2529 {
2530 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2531 if (addr == 0)
2532 {
2533 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2534 rc = VERR_PATCHING_REFUSED;
2535 break;
2536 }
2537
2538 Log(("Jump encountered target %RRv\n", addr));
2539
2540 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2541 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2542 {
2543 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2544 /* First we need to finish this linear code stream until the next exit point. */
2545 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2546 if (RT_FAILURE(rc))
2547 {
2548 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2549 break; //fatal error
2550 }
2551 }
2552
2553 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2554 {
2555 /* New code; let's recompile it. */
2556 Log(("patmRecompileCodeStream continue with jump\n"));
2557
2558 /*
2559 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2560 * this patch so we can continue our analysis
2561 *
2562 * We rely on CSAM to detect and resolve conflicts
2563 */
2564 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2565 if(pTargetPatch)
2566 {
2567 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2568 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2569 }
2570
2571 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2572 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2573 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2574
2575 if(pTargetPatch)
2576 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2577
2578 if (RT_FAILURE(rc))
2579 {
2580 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2581 break; //done!
2582 }
2583 }
2584 /* Always return to caller here; we're done! */
2585 rc = VINF_SUCCESS;
2586 goto end;
2587 }
2588 else
2589 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2590 {
2591 rc = VINF_SUCCESS;
2592 goto end;
2593 }
2594 pCurInstrGC += cbInstr;
2595 }
2596end:
2597 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2598 return rc;
2599}
2600
2601
2602/**
2603 * Generate the jump from guest to patch code
2604 *
2605 * @returns VBox status code.
2606 * @param pVM The cross context VM structure.
2607 * @param pPatch Patch record
2608 * @param pCacheRec Guest translation lookup cache record
2609 * @param fAddFixup Whether to add a fixup record.
2610 */
2611static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2612{
2613 uint8_t temp[8];
2614 uint8_t *pPB;
2615 int rc;
2616
2617 Assert(pPatch->cbPatchJump <= sizeof(temp));
2618 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2619
2620 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2621 Assert(pPB);
2622
2623#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2624 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2625 {
2626 Assert(pPatch->pPatchJumpDestGC);
2627
2628 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2629 {
2630 // jmp [PatchCode]
2631 if (fAddFixup)
2632 {
2633 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2634 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2635 {
2636 Log(("Relocation failed for the jump in the guest code!!\n"));
2637 return VERR_PATCHING_REFUSED;
2638 }
2639 }
2640
2641 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2642 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2643 }
2644 else
2645 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2646 {
2647 // jmp [PatchCode]
2648 if (fAddFixup)
2649 {
2650 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2651 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2652 {
2653 Log(("Relocation failed for the jump in the guest code!!\n"));
2654 return VERR_PATCHING_REFUSED;
2655 }
2656 }
2657
2658 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2659 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2660 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2661 }
2662 else
2663 {
2664 Assert(0);
2665 return VERR_PATCHING_REFUSED;
2666 }
2667 }
2668 else
2669#endif
2670 {
2671 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2672
2673 // jmp [PatchCode]
2674 if (fAddFixup)
2675 {
2676 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32,
2677 PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2678 {
2679 Log(("Relocation failed for the jump in the guest code!!\n"));
2680 return VERR_PATCHING_REFUSED;
2681 }
2682 }
2683 temp[0] = 0xE9; //jmp
2684 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2685 }
2686 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2687 AssertRC(rc);
2688
2689 if (rc == VINF_SUCCESS)
2690 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2691
2692 return rc;
2693}
2694
2695/**
2696 * Remove the jump from guest to patch code
2697 *
2698 * @returns VBox status code.
2699 * @param pVM The cross context VM structure.
2700 * @param pPatch Patch record
2701 */
2702static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2703{
2704#ifdef DEBUG
2705 DISCPUSTATE cpu;
2706 char szOutput[256];
2707 uint32_t cbInstr, i = 0;
2708 bool disret;
2709
2710 while (i < pPatch->cbPrivInstr)
2711 {
2712 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2713 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2714 if (disret == false)
2715 break;
2716
2717 Log(("Org patch jump: %s", szOutput));
2718 Assert(cbInstr);
2719 i += cbInstr;
2720 }
2721#endif
2722
2723 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2724 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2725#ifdef DEBUG
2726 if (rc == VINF_SUCCESS)
2727 {
2728 i = 0;
2729 while (i < pPatch->cbPrivInstr)
2730 {
2731 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2732 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2733 if (disret == false)
2734 break;
2735
2736 Log(("Org instr: %s", szOutput));
2737 Assert(cbInstr);
2738 i += cbInstr;
2739 }
2740 }
2741#endif
2742 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2743 return rc;
2744}
2745
2746/**
2747 * Generate the call from guest to patch code
2748 *
2749 * @returns VBox status code.
2750 * @param pVM The cross context VM structure.
2751 * @param pPatch Patch record
2752 * @param pTargetGC The target of the fixup (i.e. the patch code we're
2753 * calling into).
2754 * @param pCacheRec Guest translation cache record
2755 * @param fAddFixup Whether to add a fixup record.
2756 */
2757static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2758{
2759 uint8_t temp[8];
2760 uint8_t *pPB;
2761 int rc;
2762
2763 Assert(pPatch->cbPatchJump <= sizeof(temp));
2764
2765 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2766 Assert(pPB);
2767
2768 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2769
2770 // jmp [PatchCode]
2771 if (fAddFixup)
2772 {
2773 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH,
2774 pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2775 {
2776 Log(("Relocation failed for the jump in the guest code!!\n"));
2777 return VERR_PATCHING_REFUSED;
2778 }
2779 }
2780
2781 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2782 temp[0] = pPatch->aPrivInstr[0];
2783 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2784
2785 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2786 AssertRC(rc);
2787
2788 return rc;
2789}
2790
2791
2792/**
2793 * Patch cli/sti pushf/popf instruction block at specified location
2794 *
2795 * @returns VBox status code.
2796 * @param pVM The cross context VM structure.
2797 * @param pInstrGC Guest context point to privileged instruction
2798 * @param pInstrHC Host context point to privileged instruction
2799 * @param uOpcode Instruction opcode
2800 * @param uOpSize Size of starting instruction
2801 * @param pPatchRec Patch record
2802 *
2803 * @note returns failure if patching is not allowed or possible
2804 *
2805 */
2806static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2807 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2808{
2809 PPATCHINFO pPatch = &pPatchRec->patch;
2810 int rc = VERR_PATCHING_REFUSED;
2811 uint32_t orgOffsetPatchMem = ~0;
2812 RTRCPTR pInstrStart;
2813 bool fInserted;
2814 NOREF(pInstrHC); NOREF(uOpSize);
2815
2816 /* Save original offset (in case of failures later on) */
2817 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2818 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2819
2820 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2821 switch (uOpcode)
2822 {
2823 case OP_MOV:
2824 break;
2825
2826 case OP_CLI:
2827 case OP_PUSHF:
2828 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2829 /* Note: special precautions are taken when disabling and enabling such patches. */
2830 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2831 break;
2832
2833 default:
2834 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2835 {
2836 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2837 return VERR_INVALID_PARAMETER;
2838 }
2839 }
2840
2841 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2842 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2843
2844 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2845 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2846 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2847 )
2848 {
2849 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2850 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2851 rc = VERR_PATCHING_REFUSED;
2852 goto failure;
2853 }
2854
2855 pPatch->nrPatch2GuestRecs = 0;
2856 pInstrStart = pInstrGC;
2857
2858#ifdef PATM_ENABLE_CALL
2859 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2860#endif
2861
2862 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2863 pPatch->uCurPatchOffset = 0;
2864
2865 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2866 {
2867 Assert(pPatch->flags & PATMFL_INTHANDLER);
2868
2869 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2870 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2871 if (RT_FAILURE(rc))
2872 goto failure;
2873 }
2874
2875 /***************************************************************************************************************************/
2876 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2877 /***************************************************************************************************************************/
2878#ifdef VBOX_WITH_STATISTICS
2879 if (!(pPatch->flags & PATMFL_SYSENTER))
2880 {
2881 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2882 if (RT_FAILURE(rc))
2883 goto failure;
2884 }
2885#endif
2886
2887 PATMP2GLOOKUPREC cacheRec;
2888 RT_ZERO(cacheRec);
2889 cacheRec.pPatch = pPatch;
2890
2891 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2892 /* Free leftover lock if any. */
2893 if (cacheRec.Lock.pvMap)
2894 {
2895 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2896 cacheRec.Lock.pvMap = NULL;
2897 }
2898 if (rc != VINF_SUCCESS)
2899 {
2900 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2901 goto failure;
2902 }
2903
2904 /* Calculated during analysis. */
2905 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2906 {
2907 /* Most likely cause: we encountered an illegal instruction very early on. */
2908 /** @todo could turn it into an int3 callable patch. */
2909 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2910 rc = VERR_PATCHING_REFUSED;
2911 goto failure;
2912 }
2913
2914 /* size of patch block */
2915 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2916
2917
2918 /* Update free pointer in patch memory. */
2919 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2920 /* Round to next 8 byte boundary. */
2921 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2922
2923 /*
2924 * Insert into patch to guest lookup tree
2925 */
2926 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2927 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2928 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2929 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2930 if (!fInserted)
2931 {
2932 rc = VERR_PATCHING_REFUSED;
2933 goto failure;
2934 }
2935
2936 /* Note that patmr3SetBranchTargets can install additional patches!! */
2937 rc = patmr3SetBranchTargets(pVM, pPatch);
2938 if (rc != VINF_SUCCESS)
2939 {
2940 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2941 goto failure;
2942 }
2943
2944#ifdef LOG_ENABLED
2945 Log(("Patch code ----------------------------------------------------------\n"));
2946 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmR3DisasmCallback, &cacheRec);
2947 /* Free leftover lock if any. */
2948 if (cacheRec.Lock.pvMap)
2949 {
2950 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2951 cacheRec.Lock.pvMap = NULL;
2952 }
2953 Log(("Patch code ends -----------------------------------------------------\n"));
2954#endif
2955
2956 /* make a copy of the guest code bytes that will be overwritten */
2957 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2958
2959 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2960 AssertRC(rc);
2961
2962 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2963 {
2964 /*uint8_t bASMInt3 = 0xCC; - unused */
2965
2966 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2967 /* Replace first opcode byte with 'int 3'. */
2968 rc = patmActivateInt3Patch(pVM, pPatch);
2969 if (RT_FAILURE(rc))
2970 goto failure;
2971
2972 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2973 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2974
2975 pPatch->flags &= ~PATMFL_INSTR_HINT;
2976 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2977 }
2978 else
2979 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2980 {
2981 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2982 /* now insert a jump in the guest code */
2983 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2984 AssertRC(rc);
2985 if (RT_FAILURE(rc))
2986 goto failure;
2987
2988 }
2989
2990 patmR3DbgAddPatch(pVM, pPatchRec);
2991
2992 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
2993
2994 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2995 pPatch->pTempInfo->nrIllegalInstr = 0;
2996
2997 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2998
2999 pPatch->uState = PATCH_ENABLED;
3000 return VINF_SUCCESS;
3001
3002failure:
3003 if (pPatchRec->CoreOffset.Key)
3004 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3005
3006 patmEmptyTree(pVM, &pPatch->FixupTree);
3007 pPatch->nrFixups = 0;
3008
3009 patmEmptyTree(pVM, &pPatch->JumpTree);
3010 pPatch->nrJumpRecs = 0;
3011
3012 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3013 pPatch->pTempInfo->nrIllegalInstr = 0;
3014
3015 /* Turn this cli patch into a dummy. */
3016 pPatch->uState = PATCH_REFUSED;
3017 pPatch->pPatchBlockOffset = 0;
3018
3019 // Give back the patch memory we no longer need
3020 Assert(orgOffsetPatchMem != (uint32_t)~0);
3021 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3022
3023 return rc;
3024}
3025
3026/**
3027 * Patch IDT handler
3028 *
3029 * @returns VBox status code.
3030 * @param pVM The cross context VM structure.
3031 * @param pInstrGC Guest context point to privileged instruction
3032 * @param uOpSize Size of starting instruction
3033 * @param pPatchRec Patch record
3034 * @param pCacheRec Cache record ptr
3035 *
3036 * @note returns failure if patching is not allowed or possible
3037 *
3038 */
3039static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3040{
3041 PPATCHINFO pPatch = &pPatchRec->patch;
3042 bool disret;
3043 DISCPUSTATE cpuPush, cpuJmp;
3044 uint32_t cbInstr;
3045 RTRCPTR pCurInstrGC = pInstrGC;
3046 uint8_t *pCurInstrHC, *pInstrHC;
3047 uint32_t orgOffsetPatchMem = ~0;
3048
3049 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3050 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3051
3052 /*
3053 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3054 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3055 * condition here and only patch the common entypoint once.
3056 */
3057 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3058 Assert(disret);
3059 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3060 {
3061 RTRCPTR pJmpInstrGC;
3062 int rc;
3063 pCurInstrGC += cbInstr;
3064
3065 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3066 if ( disret
3067 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3068 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3069 )
3070 {
3071 bool fInserted;
3072 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3073 if (pJmpPatch == 0)
3074 {
3075 /* Patch it first! */
3076 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3077 if (rc != VINF_SUCCESS)
3078 goto failure;
3079 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3080 Assert(pJmpPatch);
3081 }
3082 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3083 goto failure;
3084
3085 /* save original offset (in case of failures later on) */
3086 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3087
3088 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3089 pPatch->uCurPatchOffset = 0;
3090 pPatch->nrPatch2GuestRecs = 0;
3091
3092#ifdef VBOX_WITH_STATISTICS
3093 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3094 if (RT_FAILURE(rc))
3095 goto failure;
3096#endif
3097
3098 /* Install fake cli patch (to clear the virtual IF) */
3099 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3100 if (RT_FAILURE(rc))
3101 goto failure;
3102
3103 /* Add lookup record for patch to guest address translation (for the push) */
3104 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3105
3106 /* Duplicate push. */
3107 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3108 if (RT_FAILURE(rc))
3109 goto failure;
3110
3111 /* Generate jump to common entrypoint. */
3112 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3113 if (RT_FAILURE(rc))
3114 goto failure;
3115
3116 /* size of patch block */
3117 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3118
3119 /* Update free pointer in patch memory. */
3120 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3121 /* Round to next 8 byte boundary */
3122 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3123
3124 /* There's no jump from guest to patch code. */
3125 pPatch->cbPatchJump = 0;
3126
3127
3128#ifdef LOG_ENABLED
3129 Log(("Patch code ----------------------------------------------------------\n"));
3130 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmR3DisasmCallback, pCacheRec);
3131 Log(("Patch code ends -----------------------------------------------------\n"));
3132#endif
3133 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3134
3135 /*
3136 * Insert into patch to guest lookup tree
3137 */
3138 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3139 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3140 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3141 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3142 patmR3DbgAddPatch(pVM, pPatchRec);
3143
3144 pPatch->uState = PATCH_ENABLED;
3145
3146 return VINF_SUCCESS;
3147 }
3148 }
3149failure:
3150 /* Give back the patch memory we no longer need */
3151 if (orgOffsetPatchMem != (uint32_t)~0)
3152 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3153
3154 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3155}
3156
3157/**
3158 * Install a trampoline to call a guest trap handler directly
3159 *
3160 * @returns VBox status code.
3161 * @param pVM The cross context VM structure.
3162 * @param pInstrGC Guest context point to privileged instruction
3163 * @param pPatchRec Patch record
3164 * @param pCacheRec Cache record ptr
3165 *
3166 */
3167static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3168{
3169 PPATCHINFO pPatch = &pPatchRec->patch;
3170 int rc = VERR_PATCHING_REFUSED;
3171 uint32_t orgOffsetPatchMem = ~0;
3172 bool fInserted;
3173
3174 // save original offset (in case of failures later on)
3175 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3176
3177 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3178 pPatch->uCurPatchOffset = 0;
3179 pPatch->nrPatch2GuestRecs = 0;
3180
3181#ifdef VBOX_WITH_STATISTICS
3182 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3183 if (RT_FAILURE(rc))
3184 goto failure;
3185#endif
3186
3187 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3188 if (RT_FAILURE(rc))
3189 goto failure;
3190
3191 /* size of patch block */
3192 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3193
3194 /* Update free pointer in patch memory. */
3195 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3196 /* Round to next 8 byte boundary */
3197 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3198
3199 /* There's no jump from guest to patch code. */
3200 pPatch->cbPatchJump = 0;
3201
3202#ifdef LOG_ENABLED
3203 Log(("Patch code ----------------------------------------------------------\n"));
3204 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmR3DisasmCallback, pCacheRec);
3205 Log(("Patch code ends -----------------------------------------------------\n"));
3206#endif
3207 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3208 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3209
3210 /*
3211 * Insert into patch to guest lookup tree
3212 */
3213 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3214 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3215 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3216 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3217 patmR3DbgAddPatch(pVM, pPatchRec);
3218
3219 pPatch->uState = PATCH_ENABLED;
3220 return VINF_SUCCESS;
3221
3222failure:
3223 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3224
3225 /* Turn this cli patch into a dummy. */
3226 pPatch->uState = PATCH_REFUSED;
3227 pPatch->pPatchBlockOffset = 0;
3228
3229 /* Give back the patch memory we no longer need */
3230 Assert(orgOffsetPatchMem != (uint32_t)~0);
3231 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3232
3233 return rc;
3234}
3235
3236
3237#ifdef LOG_ENABLED
3238/**
3239 * Check if the instruction is patched as a common idt handler
3240 *
3241 * @returns true or false
3242 * @param pVM The cross context VM structure.
3243 * @param pInstrGC Guest context point to the instruction
3244 *
3245 */
3246static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3247{
3248 PPATMPATCHREC pRec;
3249
3250 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3251 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3252 return true;
3253 return false;
3254}
3255#endif //DEBUG
3256
3257
3258/**
3259 * Duplicates a complete function
3260 *
3261 * @returns VBox status code.
3262 * @param pVM The cross context VM structure.
3263 * @param pInstrGC Guest context point to privileged instruction
3264 * @param pPatchRec Patch record
3265 * @param pCacheRec Cache record ptr
3266 *
3267 */
3268static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3269{
3270 PPATCHINFO pPatch = &pPatchRec->patch;
3271 int rc = VERR_PATCHING_REFUSED;
3272 uint32_t orgOffsetPatchMem = ~0;
3273 bool fInserted;
3274
3275 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3276 /* Save original offset (in case of failures later on). */
3277 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3278
3279 /* We will not go on indefinitely with call instruction handling. */
3280 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3281 {
3282 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3283 return VERR_PATCHING_REFUSED;
3284 }
3285
3286 pVM->patm.s.ulCallDepth++;
3287
3288#ifdef PATM_ENABLE_CALL
3289 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3290#endif
3291
3292 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3293
3294 pPatch->nrPatch2GuestRecs = 0;
3295 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3296 pPatch->uCurPatchOffset = 0;
3297
3298 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3299 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3300 if (RT_FAILURE(rc))
3301 goto failure;
3302
3303#ifdef VBOX_WITH_STATISTICS
3304 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3305 if (RT_FAILURE(rc))
3306 goto failure;
3307#endif
3308
3309 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3310 if (rc != VINF_SUCCESS)
3311 {
3312 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3313 goto failure;
3314 }
3315
3316 //size of patch block
3317 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3318
3319 //update free pointer in patch memory
3320 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3321 /* Round to next 8 byte boundary. */
3322 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3323
3324 pPatch->uState = PATCH_ENABLED;
3325
3326 /*
3327 * Insert into patch to guest lookup tree
3328 */
3329 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3330 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3331 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3332 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3333 if (!fInserted)
3334 {
3335 rc = VERR_PATCHING_REFUSED;
3336 goto failure;
3337 }
3338
3339 /* Note that patmr3SetBranchTargets can install additional patches!! */
3340 rc = patmr3SetBranchTargets(pVM, pPatch);
3341 if (rc != VINF_SUCCESS)
3342 {
3343 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3344 goto failure;
3345 }
3346
3347 patmR3DbgAddPatch(pVM, pPatchRec);
3348
3349#ifdef LOG_ENABLED
3350 Log(("Patch code ----------------------------------------------------------\n"));
3351 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmR3DisasmCallback, pCacheRec);
3352 Log(("Patch code ends -----------------------------------------------------\n"));
3353#endif
3354
3355 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3356
3357 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3358 pPatch->pTempInfo->nrIllegalInstr = 0;
3359
3360 pVM->patm.s.ulCallDepth--;
3361 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3362 return VINF_SUCCESS;
3363
3364failure:
3365 if (pPatchRec->CoreOffset.Key)
3366 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3367
3368 patmEmptyTree(pVM, &pPatch->FixupTree);
3369 pPatch->nrFixups = 0;
3370
3371 patmEmptyTree(pVM, &pPatch->JumpTree);
3372 pPatch->nrJumpRecs = 0;
3373
3374 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3375 pPatch->pTempInfo->nrIllegalInstr = 0;
3376
3377 /* Turn this cli patch into a dummy. */
3378 pPatch->uState = PATCH_REFUSED;
3379 pPatch->pPatchBlockOffset = 0;
3380
3381 // Give back the patch memory we no longer need
3382 Assert(orgOffsetPatchMem != (uint32_t)~0);
3383 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3384
3385 pVM->patm.s.ulCallDepth--;
3386 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3387 return rc;
3388}
3389
3390/**
3391 * Creates trampoline code to jump inside an existing patch
3392 *
3393 * @returns VBox status code.
3394 * @param pVM The cross context VM structure.
3395 * @param pInstrGC Guest context point to privileged instruction
3396 * @param pPatchRec Patch record
3397 *
3398 */
3399static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3400{
3401 PPATCHINFO pPatch = &pPatchRec->patch;
3402 RTRCPTR pPage, pPatchTargetGC = 0;
3403 uint32_t orgOffsetPatchMem = ~0;
3404 int rc = VERR_PATCHING_REFUSED;
3405 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3406 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3407 bool fInserted = false;
3408
3409 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3410 /* Save original offset (in case of failures later on). */
3411 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3412
3413 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3414 /** @todo we already checked this before */
3415 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3416
3417 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3418 if (pPatchPage)
3419 {
3420 uint32_t i;
3421
3422 for (i=0;i<pPatchPage->cCount;i++)
3423 {
3424 if (pPatchPage->papPatch[i])
3425 {
3426 pPatchToJmp = pPatchPage->papPatch[i];
3427
3428 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3429 && pPatchToJmp->uState == PATCH_ENABLED)
3430 {
3431 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3432 if (pPatchTargetGC)
3433 {
3434 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3435 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3436 Assert(pPatchToGuestRec);
3437
3438 pPatchToGuestRec->fJumpTarget = true;
3439 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3440 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3441 break;
3442 }
3443 }
3444 }
3445 }
3446 }
3447 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3448
3449 /*
3450 * Only record the trampoline patch if this is the first patch to the target
3451 * or we recorded other patches already.
3452 * The goal is to refuse refreshing function duplicates if the guest
3453 * modifies code after a saved state was loaded because it is not possible
3454 * to save the relation between trampoline and target without changing the
3455 * saved satte version.
3456 */
3457 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3458 || pPatchToJmp->pTrampolinePatchesHead)
3459 {
3460 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3461 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3462 if (!pTrampRec)
3463 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3464
3465 pTrampRec->pPatchTrampoline = pPatchRec;
3466 }
3467
3468 pPatch->nrPatch2GuestRecs = 0;
3469 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3470 pPatch->uCurPatchOffset = 0;
3471
3472 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3473 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3474 if (RT_FAILURE(rc))
3475 goto failure;
3476
3477#ifdef VBOX_WITH_STATISTICS
3478 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3479 if (RT_FAILURE(rc))
3480 goto failure;
3481#endif
3482
3483 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3484 if (RT_FAILURE(rc))
3485 goto failure;
3486
3487 /*
3488 * Insert into patch to guest lookup tree
3489 */
3490 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3491 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3492 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3493 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3494 if (!fInserted)
3495 {
3496 rc = VERR_PATCHING_REFUSED;
3497 goto failure;
3498 }
3499 patmR3DbgAddPatch(pVM, pPatchRec);
3500
3501 /* size of patch block */
3502 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3503
3504 /* Update free pointer in patch memory. */
3505 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3506 /* Round to next 8 byte boundary */
3507 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3508
3509 /* There's no jump from guest to patch code. */
3510 pPatch->cbPatchJump = 0;
3511
3512 /* Enable the patch. */
3513 pPatch->uState = PATCH_ENABLED;
3514 /* We allow this patch to be called as a function. */
3515 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3516
3517 if (pTrampRec)
3518 {
3519 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3520 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3521 }
3522 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3523 return VINF_SUCCESS;
3524
3525failure:
3526 if (pPatchRec->CoreOffset.Key)
3527 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3528
3529 patmEmptyTree(pVM, &pPatch->FixupTree);
3530 pPatch->nrFixups = 0;
3531
3532 patmEmptyTree(pVM, &pPatch->JumpTree);
3533 pPatch->nrJumpRecs = 0;
3534
3535 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3536 pPatch->pTempInfo->nrIllegalInstr = 0;
3537
3538 /* Turn this cli patch into a dummy. */
3539 pPatch->uState = PATCH_REFUSED;
3540 pPatch->pPatchBlockOffset = 0;
3541
3542 // Give back the patch memory we no longer need
3543 Assert(orgOffsetPatchMem != (uint32_t)~0);
3544 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3545
3546 if (pTrampRec)
3547 MMR3HeapFree(pTrampRec);
3548
3549 return rc;
3550}
3551
3552
3553/**
3554 * Patch branch target function for call/jump at specified location.
3555 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3556 *
3557 * @returns VBox status code.
3558 * @param pVM The cross context VM structure.
3559 * @param pCtx Pointer to the guest CPU context.
3560 *
3561 */
3562VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3563{
3564 RTRCPTR pBranchTarget, pPage;
3565 int rc;
3566 RTRCPTR pPatchTargetGC = 0;
3567 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
3568
3569 pBranchTarget = pCtx->edx;
3570 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3571
3572 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3573 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3574
3575 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3576 if (pPatchPage)
3577 {
3578 uint32_t i;
3579
3580 for (i=0;i<pPatchPage->cCount;i++)
3581 {
3582 if (pPatchPage->papPatch[i])
3583 {
3584 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3585
3586 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3587 && pPatch->uState == PATCH_ENABLED)
3588 {
3589 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3590 if (pPatchTargetGC)
3591 {
3592 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3593 break;
3594 }
3595 }
3596 }
3597 }
3598 }
3599
3600 if (pPatchTargetGC)
3601 {
3602 /* Create a trampoline that also sets PATM_ASMFIX_INTERRUPTFLAG. */
3603 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3604 }
3605 else
3606 {
3607 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3608 }
3609
3610 if (rc == VINF_SUCCESS)
3611 {
3612 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3613 Assert(pPatchTargetGC);
3614 }
3615
3616 if (pPatchTargetGC)
3617 {
3618 pCtx->eax = pPatchTargetGC;
3619 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3620 }
3621 else
3622 {
3623 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3624 pCtx->eax = 0;
3625 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3626 }
3627 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3628 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3629 AssertRC(rc);
3630
3631 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3632 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3633 return VINF_SUCCESS;
3634}
3635
3636/**
3637 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3638 *
3639 * @returns VBox status code.
3640 * @param pVM The cross context VM structure.
3641 * @param pCpu Disassembly CPU structure ptr
3642 * @param pInstrGC Guest context point to privileged instruction
3643 * @param pCacheRec Cache record ptr
3644 *
3645 */
3646static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3647{
3648 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3649 int rc = VERR_PATCHING_REFUSED;
3650 DISCPUSTATE cpu;
3651 RTRCPTR pTargetGC;
3652 PPATMPATCHREC pPatchFunction;
3653 uint32_t cbInstr;
3654 bool disret;
3655
3656 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3657 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3658
3659 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3660 {
3661 rc = VERR_PATCHING_REFUSED;
3662 goto failure;
3663 }
3664
3665 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3666 if (pTargetGC == 0)
3667 {
3668 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3669 rc = VERR_PATCHING_REFUSED;
3670 goto failure;
3671 }
3672
3673 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3674 if (pPatchFunction == NULL)
3675 {
3676 for(;;)
3677 {
3678 /* It could be an indirect call (call -> jmp dest).
3679 * Note that it's dangerous to assume the jump will never change...
3680 */
3681 uint8_t *pTmpInstrHC;
3682
3683 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3684 Assert(pTmpInstrHC);
3685 if (pTmpInstrHC == 0)
3686 break;
3687
3688 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3689 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3690 break;
3691
3692 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3693 if (pTargetGC == 0)
3694 {
3695 break;
3696 }
3697
3698 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3699 break;
3700 }
3701 if (pPatchFunction == 0)
3702 {
3703 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3704 rc = VERR_PATCHING_REFUSED;
3705 goto failure;
3706 }
3707 }
3708
3709 // make a copy of the guest code bytes that will be overwritten
3710 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3711
3712 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3713 AssertRC(rc);
3714
3715 /* Now replace the original call in the guest code */
3716 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3717 AssertRC(rc);
3718 if (RT_FAILURE(rc))
3719 goto failure;
3720
3721 /* Lowest and highest address for write monitoring. */
3722 pPatch->pInstrGCLowest = pInstrGC;
3723 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3724 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3725
3726 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3727
3728 pPatch->uState = PATCH_ENABLED;
3729 return VINF_SUCCESS;
3730
3731failure:
3732 /* Turn this patch into a dummy. */
3733 pPatch->uState = PATCH_REFUSED;
3734
3735 return rc;
3736}
3737
3738/**
3739 * Replace the address in an MMIO instruction with the cached version.
3740 *
3741 * @returns VBox status code.
3742 * @param pVM The cross context VM structure.
3743 * @param pInstrGC Guest context point to privileged instruction
3744 * @param pCpu Disassembly CPU structure ptr
3745 * @param pCacheRec Cache record ptr
3746 *
3747 * @note returns failure if patching is not allowed or possible
3748 *
3749 */
3750static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3751{
3752 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3753 uint8_t *pPB;
3754 int rc = VERR_PATCHING_REFUSED;
3755
3756 Assert(pVM->patm.s.mmio.pCachedData);
3757 if (!pVM->patm.s.mmio.pCachedData)
3758 goto failure;
3759
3760 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3761 goto failure;
3762
3763 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3764 if (pPB == 0)
3765 goto failure;
3766
3767 /* Add relocation record for cached data access. */
3768 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC,
3769 pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3770 {
3771 Log(("Relocation failed for cached mmio address!!\n"));
3772 return VERR_PATCHING_REFUSED;
3773 }
3774 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3775
3776 /* Save original instruction. */
3777 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3778 AssertRC(rc);
3779
3780 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3781
3782 /* Replace address with that of the cached item. */
3783 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR),
3784 &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3785 AssertRC(rc);
3786 if (RT_FAILURE(rc))
3787 {
3788 goto failure;
3789 }
3790
3791 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3792 pVM->patm.s.mmio.pCachedData = 0;
3793 pVM->patm.s.mmio.GCPhys = 0;
3794 pPatch->uState = PATCH_ENABLED;
3795 return VINF_SUCCESS;
3796
3797failure:
3798 /* Turn this patch into a dummy. */
3799 pPatch->uState = PATCH_REFUSED;
3800
3801 return rc;
3802}
3803
3804
3805/**
3806 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3807 *
3808 * @returns VBox status code.
3809 * @param pVM The cross context VM structure.
3810 * @param pInstrGC Guest context point to privileged instruction
3811 * @param pPatch Patch record
3812 *
3813 * @note returns failure if patching is not allowed or possible
3814 *
3815 */
3816static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3817{
3818 DISCPUSTATE cpu;
3819 uint32_t cbInstr;
3820 bool disret;
3821 uint8_t *pInstrHC;
3822
3823 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3824
3825 /* Convert GC to HC address. */
3826 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3827 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3828
3829 /* Disassemble mmio instruction. */
3830 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3831 &cpu, &cbInstr);
3832 if (disret == false)
3833 {
3834 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3835 return VERR_PATCHING_REFUSED;
3836 }
3837
3838 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3839 if (cbInstr > MAX_INSTR_SIZE)
3840 return VERR_PATCHING_REFUSED;
3841 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3842 return VERR_PATCHING_REFUSED;
3843
3844 /* Add relocation record for cached data access. */
3845 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3846 {
3847 Log(("Relocation failed for cached mmio address!!\n"));
3848 return VERR_PATCHING_REFUSED;
3849 }
3850 /* Replace address with that of the cached item. */
3851 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3852
3853 /* Lowest and highest address for write monitoring. */
3854 pPatch->pInstrGCLowest = pInstrGC;
3855 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3856
3857 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3858 pVM->patm.s.mmio.pCachedData = 0;
3859 pVM->patm.s.mmio.GCPhys = 0;
3860 return VINF_SUCCESS;
3861}
3862
3863/**
3864 * Activates an int3 patch
3865 *
3866 * @returns VBox status code.
3867 * @param pVM The cross context VM structure.
3868 * @param pPatch Patch record
3869 */
3870static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3871{
3872 uint8_t bASMInt3 = 0xCC;
3873 int rc;
3874
3875 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3876 Assert(pPatch->uState != PATCH_ENABLED);
3877
3878 /* Replace first opcode byte with 'int 3'. */
3879 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3880 AssertRC(rc);
3881
3882 pPatch->cbPatchJump = sizeof(bASMInt3);
3883
3884 return rc;
3885}
3886
3887/**
3888 * Deactivates an int3 patch
3889 *
3890 * @returns VBox status code.
3891 * @param pVM The cross context VM structure.
3892 * @param pPatch Patch record
3893 */
3894static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3895{
3896 uint8_t ASMInt3 = 0xCC;
3897 int rc;
3898
3899 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3900 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3901
3902 /* Restore first opcode byte. */
3903 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3904 AssertRC(rc);
3905 return rc;
3906}
3907
3908/**
3909 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3910 * in the raw-mode context.
3911 *
3912 * @returns VBox status code.
3913 * @param pVM The cross context VM structure.
3914 * @param pInstrGC Guest context point to privileged instruction
3915 * @param pInstrHC Host context point to privileged instruction
3916 * @param pCpu Disassembly CPU structure ptr
3917 * @param pPatch Patch record
3918 *
3919 * @note returns failure if patching is not allowed or possible
3920 *
3921 */
3922int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3923{
3924 uint8_t bASMInt3 = 0xCC;
3925 int rc;
3926
3927 /* Note: Do not use patch memory here! It might called during patch installation too. */
3928 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3929
3930 /* Save the original instruction. */
3931 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3932 AssertRC(rc);
3933 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3934
3935 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3936
3937 /* Replace first opcode byte with 'int 3'. */
3938 rc = patmActivateInt3Patch(pVM, pPatch);
3939 if (RT_FAILURE(rc))
3940 goto failure;
3941
3942 /* Lowest and highest address for write monitoring. */
3943 pPatch->pInstrGCLowest = pInstrGC;
3944 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3945
3946 pPatch->uState = PATCH_ENABLED;
3947 return VINF_SUCCESS;
3948
3949failure:
3950 /* Turn this patch into a dummy. */
3951 return VERR_PATCHING_REFUSED;
3952}
3953
3954#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3955/**
3956 * Patch a jump instruction at specified location
3957 *
3958 * @returns VBox status code.
3959 * @param pVM The cross context VM structure.
3960 * @param pInstrGC Guest context point to privileged instruction
3961 * @param pInstrHC Host context point to privileged instruction
3962 * @param pCpu Disassembly CPU structure ptr
3963 * @param pPatchRec Patch record
3964 *
3965 * @note returns failure if patching is not allowed or possible
3966 *
3967 */
3968int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3969{
3970 PPATCHINFO pPatch = &pPatchRec->patch;
3971 int rc = VERR_PATCHING_REFUSED;
3972
3973 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3974 pPatch->uCurPatchOffset = 0;
3975 pPatch->cbPatchBlockSize = 0;
3976 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3977
3978 /*
3979 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3980 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3981 */
3982 switch (pCpu->pCurInstr->uOpcode)
3983 {
3984 case OP_JO:
3985 case OP_JNO:
3986 case OP_JC:
3987 case OP_JNC:
3988 case OP_JE:
3989 case OP_JNE:
3990 case OP_JBE:
3991 case OP_JNBE:
3992 case OP_JS:
3993 case OP_JNS:
3994 case OP_JP:
3995 case OP_JNP:
3996 case OP_JL:
3997 case OP_JNL:
3998 case OP_JLE:
3999 case OP_JNLE:
4000 case OP_JMP:
4001 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
4002 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
4003 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
4004 goto failure;
4005
4006 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4007 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
4008 goto failure;
4009
4010 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
4011 {
4012 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4013 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4014 rc = VERR_PATCHING_REFUSED;
4015 goto failure;
4016 }
4017
4018 break;
4019
4020 default:
4021 goto failure;
4022 }
4023
4024 // make a copy of the guest code bytes that will be overwritten
4025 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4026 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4027 pPatch->cbPatchJump = pCpu->cbInstr;
4028
4029 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
4030 AssertRC(rc);
4031
4032 /* Now insert a jump in the guest code. */
4033 /*
4034 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4035 * references the target instruction in the conflict patch.
4036 */
4037 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4038
4039 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4040 pPatch->pPatchJumpDestGC = pJmpDest;
4041
4042 PATMP2GLOOKUPREC cacheRec;
4043 RT_ZERO(cacheRec);
4044 cacheRec.pPatch = pPatch;
4045
4046 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4047 /* Free leftover lock if any. */
4048 if (cacheRec.Lock.pvMap)
4049 {
4050 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4051 cacheRec.Lock.pvMap = NULL;
4052 }
4053 AssertRC(rc);
4054 if (RT_FAILURE(rc))
4055 goto failure;
4056
4057 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4058
4059 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4060 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4061
4062 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4063
4064 /* Lowest and highest address for write monitoring. */
4065 pPatch->pInstrGCLowest = pInstrGC;
4066 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4067
4068 pPatch->uState = PATCH_ENABLED;
4069 return VINF_SUCCESS;
4070
4071failure:
4072 /* Turn this cli patch into a dummy. */
4073 pPatch->uState = PATCH_REFUSED;
4074
4075 return rc;
4076}
4077#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4078
4079
4080/**
4081 * Gives hint to PATM about supervisor guest instructions
4082 *
4083 * @returns VBox status code.
4084 * @param pVM The cross context VM structure.
4085 * @param pInstrGC Guest context point to privileged instruction
4086 * @param flags Patch flags
4087 */
4088VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4089{
4090 Assert(pInstrGC);
4091 Assert(flags == PATMFL_CODE32);
4092
4093 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4094 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4095}
4096
4097/**
4098 * Patch privileged instruction at specified location
4099 *
4100 * @returns VBox status code.
4101 * @param pVM The cross context VM structure.
4102 * @param pInstrGC Guest context point to privileged instruction (0:32 flat
4103 * address)
4104 * @param flags Patch flags
4105 *
4106 * @note returns failure if patching is not allowed or possible
4107 */
4108VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4109{
4110 DISCPUSTATE cpu;
4111 R3PTRTYPE(uint8_t *) pInstrHC;
4112 uint32_t cbInstr;
4113 PPATMPATCHREC pPatchRec;
4114 PCPUMCTX pCtx = 0;
4115 bool disret;
4116 int rc;
4117 PVMCPU pVCpu = VMMGetCpu0(pVM);
4118 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4119
4120 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4121
4122 if ( !pVM
4123 || pInstrGC == 0
4124 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4125 {
4126 AssertFailed();
4127 return VERR_INVALID_PARAMETER;
4128 }
4129
4130 if (PATMIsEnabled(pVM) == false)
4131 return VERR_PATCHING_REFUSED;
4132
4133 /* Test for patch conflict only with patches that actually change guest code. */
4134 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4135 {
4136 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4137 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4138 if (pConflictPatch != 0)
4139 return VERR_PATCHING_REFUSED;
4140 }
4141
4142 if (!(flags & PATMFL_CODE32))
4143 {
4144 /** @todo Only 32 bits code right now */
4145 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4146 return VERR_NOT_IMPLEMENTED;
4147 }
4148
4149 /* We ran out of patch memory; don't bother anymore. */
4150 if (pVM->patm.s.fOutOfMemory == true)
4151 return VERR_PATCHING_REFUSED;
4152
4153#if 1 /* DONT COMMIT ENABLED! */
4154 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4155 if ( 0
4156 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4157 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4158 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4159 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4160 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4161 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4162 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4163 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4164 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4165 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4166 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4167 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4168 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4169 || pInstrGC == 0x80014447 /* KfLowerIrql */
4170 || 0)
4171 {
4172 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4173 return VERR_PATCHING_REFUSED;
4174 }
4175#endif
4176
4177 /* Make sure the code selector is wide open; otherwise refuse. */
4178 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4179 if (CPUMGetGuestCPL(pVCpu) == 0)
4180 {
4181 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4182 if (pInstrGCFlat != pInstrGC)
4183 {
4184 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4185 return VERR_PATCHING_REFUSED;
4186 }
4187 }
4188
4189 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4190 if (!(flags & PATMFL_GUEST_SPECIFIC))
4191 {
4192 /* New code. Make sure CSAM has a go at it first. */
4193 CSAMR3CheckCode(pVM, pInstrGC);
4194 }
4195
4196 /* Note: obsolete */
4197 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4198 && (flags & PATMFL_MMIO_ACCESS))
4199 {
4200 RTRCUINTPTR offset;
4201 void *pvPatchCoreOffset;
4202
4203 /* Find the patch record. */
4204 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4205 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4206 if (pvPatchCoreOffset == NULL)
4207 {
4208 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4209 return VERR_PATCH_NOT_FOUND; //fatal error
4210 }
4211 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4212
4213 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4214 }
4215
4216 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4217
4218 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4219 if (pPatchRec)
4220 {
4221 Assert(!(flags & PATMFL_TRAMPOLINE));
4222
4223 /* Hints about existing patches are ignored. */
4224 if (flags & PATMFL_INSTR_HINT)
4225 return VERR_PATCHING_REFUSED;
4226
4227 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4228 {
4229 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4230 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4231 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4232 }
4233
4234 if (pPatchRec->patch.uState == PATCH_DISABLED)
4235 {
4236 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4237 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4238 {
4239 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4240 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4241 }
4242 else
4243 Log(("Enabling patch %RRv again\n", pInstrGC));
4244
4245 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4246 rc = PATMR3EnablePatch(pVM, pInstrGC);
4247 if (RT_SUCCESS(rc))
4248 return VWRN_PATCH_ENABLED;
4249
4250 return rc;
4251 }
4252 if ( pPatchRec->patch.uState == PATCH_ENABLED
4253 || pPatchRec->patch.uState == PATCH_DIRTY)
4254 {
4255 /*
4256 * The patch might have been overwritten.
4257 */
4258 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4259 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4260 {
4261 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4262 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4263 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4264 {
4265 if (flags & PATMFL_IDTHANDLER)
4266 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4267
4268 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4269 }
4270 }
4271 rc = PATMR3RemovePatch(pVM, pInstrGC);
4272 if (RT_FAILURE(rc))
4273 return VERR_PATCHING_REFUSED;
4274 }
4275 else
4276 {
4277 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4278 /* already tried it once! */
4279 return VERR_PATCHING_REFUSED;
4280 }
4281 }
4282
4283 RTGCPHYS GCPhys;
4284 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4285 if (rc != VINF_SUCCESS)
4286 {
4287 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4288 return rc;
4289 }
4290 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4291 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4292 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4293 {
4294 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4295 return VERR_PATCHING_REFUSED;
4296 }
4297
4298 /* Initialize cache record for guest address translations. */
4299 bool fInserted;
4300 PATMP2GLOOKUPREC cacheRec;
4301 RT_ZERO(cacheRec);
4302
4303 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4304 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4305
4306 /* Allocate patch record. */
4307 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4308 if (RT_FAILURE(rc))
4309 {
4310 Log(("Out of memory!!!!\n"));
4311 return VERR_NO_MEMORY;
4312 }
4313 pPatchRec->Core.Key = pInstrGC;
4314 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4315 /* Insert patch record into the lookup tree. */
4316 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4317 Assert(fInserted);
4318
4319 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4320 pPatchRec->patch.flags = flags;
4321 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4322 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4323
4324 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4325 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4326
4327 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4328 {
4329 /*
4330 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4331 */
4332 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4333 if (pPatchNear)
4334 {
4335 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4336 {
4337 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4338
4339 pPatchRec->patch.uState = PATCH_UNUSABLE;
4340 /*
4341 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4342 */
4343 return VERR_PATCHING_REFUSED;
4344 }
4345 }
4346 }
4347
4348 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4349 if (pPatchRec->patch.pTempInfo == 0)
4350 {
4351 Log(("Out of memory!!!!\n"));
4352 return VERR_NO_MEMORY;
4353 }
4354
4355 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4356 if (disret == false)
4357 {
4358 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4359 return VERR_PATCHING_REFUSED;
4360 }
4361
4362 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4363 if (cbInstr > MAX_INSTR_SIZE)
4364 return VERR_PATCHING_REFUSED;
4365
4366 pPatchRec->patch.cbPrivInstr = cbInstr;
4367 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4368
4369 /* Restricted hinting for now. */
4370 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4371
4372 /* Initialize cache record patch pointer. */
4373 cacheRec.pPatch = &pPatchRec->patch;
4374
4375 /* Allocate statistics slot */
4376 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4377 {
4378 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4379 }
4380 else
4381 {
4382 Log(("WARNING: Patch index wrap around!!\n"));
4383 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4384 }
4385
4386 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4387 {
4388 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4389 }
4390 else
4391 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4392 {
4393 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4394 }
4395 else
4396 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4397 {
4398 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4399 }
4400 else
4401 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4402 {
4403 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4404 }
4405 else
4406 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4407 {
4408 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4409 }
4410 else
4411 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4412 {
4413 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4414 }
4415 else
4416 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4417 {
4418 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4419 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4420
4421 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4422#ifdef VBOX_WITH_STATISTICS
4423 if ( rc == VINF_SUCCESS
4424 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4425 {
4426 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4427 }
4428#endif
4429 }
4430 else
4431 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4432 {
4433 switch (cpu.pCurInstr->uOpcode)
4434 {
4435 case OP_SYSENTER:
4436 case OP_PUSH:
4437 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4438 if (rc == VINF_SUCCESS)
4439 {
4440 if (rc == VINF_SUCCESS)
4441 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4442 return rc;
4443 }
4444 break;
4445
4446 default:
4447 rc = VERR_NOT_IMPLEMENTED;
4448 break;
4449 }
4450 }
4451 else
4452 {
4453 switch (cpu.pCurInstr->uOpcode)
4454 {
4455 case OP_SYSENTER:
4456 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4457 if (rc == VINF_SUCCESS)
4458 {
4459 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4460 return VINF_SUCCESS;
4461 }
4462 break;
4463
4464#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4465 case OP_JO:
4466 case OP_JNO:
4467 case OP_JC:
4468 case OP_JNC:
4469 case OP_JE:
4470 case OP_JNE:
4471 case OP_JBE:
4472 case OP_JNBE:
4473 case OP_JS:
4474 case OP_JNS:
4475 case OP_JP:
4476 case OP_JNP:
4477 case OP_JL:
4478 case OP_JNL:
4479 case OP_JLE:
4480 case OP_JNLE:
4481 case OP_JECXZ:
4482 case OP_LOOP:
4483 case OP_LOOPNE:
4484 case OP_LOOPE:
4485 case OP_JMP:
4486 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4487 {
4488 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4489 break;
4490 }
4491 return VERR_NOT_IMPLEMENTED;
4492#endif
4493
4494 case OP_PUSHF:
4495 case OP_CLI:
4496 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4497 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4498 break;
4499
4500#ifndef VBOX_WITH_SAFE_STR
4501 case OP_STR:
4502#endif
4503 case OP_SGDT:
4504 case OP_SLDT:
4505 case OP_SIDT:
4506 case OP_CPUID:
4507 case OP_LSL:
4508 case OP_LAR:
4509 case OP_SMSW:
4510 case OP_VERW:
4511 case OP_VERR:
4512 case OP_IRET:
4513#ifdef VBOX_WITH_RAW_RING1
4514 case OP_MOV:
4515#endif
4516 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4517 break;
4518
4519 default:
4520 return VERR_NOT_IMPLEMENTED;
4521 }
4522 }
4523
4524 if (rc != VINF_SUCCESS)
4525 {
4526 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4527 {
4528 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4529 pPatchRec->patch.nrPatch2GuestRecs = 0;
4530 }
4531 pVM->patm.s.uCurrentPatchIdx--;
4532 }
4533 else
4534 {
4535 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4536 AssertRCReturn(rc, rc);
4537
4538 /* Keep track upper and lower boundaries of patched instructions */
4539 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4540 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4541 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4542 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4543
4544 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4545 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4546
4547 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4548 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4549
4550 rc = VINF_SUCCESS;
4551
4552 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4553 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4554 {
4555 rc = PATMR3DisablePatch(pVM, pInstrGC);
4556 AssertRCReturn(rc, rc);
4557 }
4558
4559#ifdef VBOX_WITH_STATISTICS
4560 /* Register statistics counter */
4561 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4562 {
4563 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4564 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4565#ifndef DEBUG_sandervl
4566 /* Full breakdown for the GUI. */
4567 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4568 "/PATM/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4569 STAMR3RegisterF(pVM, &pPatchRec->patch.pPatchBlockOffset,STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/offPatchBlock", pPatchRec->patch.pPrivInstrGC);
4570 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4571 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4572 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4573 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4574 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4575 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4576 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4577 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4578 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4579 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4580 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4581 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4582 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4583 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4584#endif
4585 }
4586#endif
4587
4588 /* Add debug symbol. */
4589 patmR3DbgAddPatch(pVM, pPatchRec);
4590 }
4591 /* Free leftover lock if any. */
4592 if (cacheRec.Lock.pvMap)
4593 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4594 return rc;
4595}
4596
4597/**
4598 * Query instruction size
4599 *
4600 * @returns VBox status code.
4601 * @param pVM The cross context VM structure.
4602 * @param pPatch Patch record
4603 * @param pInstrGC Instruction address
4604 */
4605static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4606{
4607 uint8_t *pInstrHC;
4608 PGMPAGEMAPLOCK Lock;
4609
4610 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4611 if (rc == VINF_SUCCESS)
4612 {
4613 DISCPUSTATE cpu;
4614 bool disret;
4615 uint32_t cbInstr;
4616
4617 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4618 PGMPhysReleasePageMappingLock(pVM, &Lock);
4619 if (disret)
4620 return cbInstr;
4621 }
4622 return 0;
4623}
4624
4625/**
4626 * Add patch to page record
4627 *
4628 * @returns VBox status code.
4629 * @param pVM The cross context VM structure.
4630 * @param pPage Page address
4631 * @param pPatch Patch record
4632 */
4633int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4634{
4635 PPATMPATCHPAGE pPatchPage;
4636 int rc;
4637
4638 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4639
4640 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4641 if (pPatchPage)
4642 {
4643 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4644 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4645 {
4646 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4647 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4648
4649 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4650 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4651 (void **)&pPatchPage->papPatch);
4652 if (RT_FAILURE(rc))
4653 {
4654 Log(("Out of memory!!!!\n"));
4655 return VERR_NO_MEMORY;
4656 }
4657 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4658 MMHyperFree(pVM, papPatchOld);
4659 }
4660 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4661 pPatchPage->cCount++;
4662 }
4663 else
4664 {
4665 bool fInserted;
4666
4667 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4668 if (RT_FAILURE(rc))
4669 {
4670 Log(("Out of memory!!!!\n"));
4671 return VERR_NO_MEMORY;
4672 }
4673 pPatchPage->Core.Key = pPage;
4674 pPatchPage->cCount = 1;
4675 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4676
4677 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4678 (void **)&pPatchPage->papPatch);
4679 if (RT_FAILURE(rc))
4680 {
4681 Log(("Out of memory!!!!\n"));
4682 MMHyperFree(pVM, pPatchPage);
4683 return VERR_NO_MEMORY;
4684 }
4685 pPatchPage->papPatch[0] = pPatch;
4686
4687 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4688 Assert(fInserted);
4689 pVM->patm.s.cPageRecords++;
4690
4691 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4692 }
4693 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4694
4695 /* Get the closest guest instruction (from below) */
4696 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4697 Assert(pGuestToPatchRec);
4698 if (pGuestToPatchRec)
4699 {
4700 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4701 if ( pPatchPage->pLowestAddrGC == 0
4702 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4703 {
4704 RTRCUINTPTR offset;
4705
4706 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4707
4708 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4709 /* If we're too close to the page boundary, then make sure an
4710 instruction from the previous page doesn't cross the
4711 boundary itself. */
4712 if (offset && offset < MAX_INSTR_SIZE)
4713 {
4714 /* Get the closest guest instruction (from above) */
4715 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4716
4717 if (pGuestToPatchRec)
4718 {
4719 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4720 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4721 {
4722 pPatchPage->pLowestAddrGC = pPage;
4723 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4724 }
4725 }
4726 }
4727 }
4728 }
4729
4730 /* Get the closest guest instruction (from above) */
4731 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4732 Assert(pGuestToPatchRec);
4733 if (pGuestToPatchRec)
4734 {
4735 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4736 if ( pPatchPage->pHighestAddrGC == 0
4737 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4738 {
4739 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4740 /* Increase by instruction size. */
4741 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4742//// Assert(size);
4743 pPatchPage->pHighestAddrGC += size;
4744 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4745 }
4746 }
4747
4748 return VINF_SUCCESS;
4749}
4750
4751/**
4752 * Remove patch from page record
4753 *
4754 * @returns VBox status code.
4755 * @param pVM The cross context VM structure.
4756 * @param pPage Page address
4757 * @param pPatch Patch record
4758 */
4759int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4760{
4761 PPATMPATCHPAGE pPatchPage;
4762 int rc;
4763
4764 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4765 Assert(pPatchPage);
4766
4767 if (!pPatchPage)
4768 return VERR_INVALID_PARAMETER;
4769
4770 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4771
4772 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4773 if (pPatchPage->cCount > 1)
4774 {
4775 uint32_t i;
4776
4777 /* Used by multiple patches */
4778 for (i = 0; i < pPatchPage->cCount; i++)
4779 {
4780 if (pPatchPage->papPatch[i] == pPatch)
4781 {
4782 /* close the gap between the remaining pointers. */
4783 uint32_t cNew = --pPatchPage->cCount;
4784 if (i < cNew)
4785 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4786 pPatchPage->papPatch[cNew] = NULL;
4787 return VINF_SUCCESS;
4788 }
4789 }
4790 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4791 }
4792 else
4793 {
4794 PPATMPATCHPAGE pPatchNode;
4795
4796 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4797
4798 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4799 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4800 Assert(pPatchNode && pPatchNode == pPatchPage);
4801
4802 Assert(pPatchPage->papPatch);
4803 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4804 AssertRC(rc);
4805 rc = MMHyperFree(pVM, pPatchPage);
4806 AssertRC(rc);
4807 pVM->patm.s.cPageRecords--;
4808 }
4809 return VINF_SUCCESS;
4810}
4811
4812/**
4813 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4814 *
4815 * @returns VBox status code.
4816 * @param pVM The cross context VM structure.
4817 * @param pPatch Patch record
4818 */
4819int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4820{
4821 int rc;
4822 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4823
4824 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4825 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4826 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4827
4828 /** @todo optimize better (large gaps between current and next used page) */
4829 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4830 {
4831 /* Get the closest guest instruction (from above) */
4832 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4833 if ( pGuestToPatchRec
4834 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4835 )
4836 {
4837 /* Code in page really patched -> add record */
4838 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4839 AssertRC(rc);
4840 }
4841 }
4842 pPatch->flags |= PATMFL_CODE_MONITORED;
4843 return VINF_SUCCESS;
4844}
4845
4846/**
4847 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4848 *
4849 * @returns VBox status code.
4850 * @param pVM The cross context VM structure.
4851 * @param pPatch Patch record
4852 */
4853static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4854{
4855 int rc;
4856 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4857
4858 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4859 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4860 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4861
4862 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4863 {
4864 /* Get the closest guest instruction (from above) */
4865 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4866 if ( pGuestToPatchRec
4867 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4868 )
4869 {
4870 /* Code in page really patched -> remove record */
4871 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4872 AssertRC(rc);
4873 }
4874 }
4875 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4876 return VINF_SUCCESS;
4877}
4878
4879/**
4880 * Notifies PATM about a (potential) write to code that has been patched.
4881 *
4882 * @returns VBox status code.
4883 * @param pVM The cross context VM structure.
4884 * @param GCPtr GC pointer to write address
4885 * @param cbWrite Nr of bytes to write
4886 *
4887 */
4888VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4889{
4890 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4891
4892 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4893
4894 Assert(VM_IS_EMT(pVM));
4895 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4896
4897 /* Quick boundary check */
4898 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4899 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4900 )
4901 return VINF_SUCCESS;
4902
4903 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4904
4905 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4906 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4907
4908 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4909 {
4910loop_start:
4911 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4912 if (pPatchPage)
4913 {
4914 uint32_t i;
4915 bool fValidPatchWrite = false;
4916
4917 /* Quick check to see if the write is in the patched part of the page */
4918 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4919 || pPatchPage->pHighestAddrGC < GCPtr)
4920 {
4921 break;
4922 }
4923
4924 for (i=0;i<pPatchPage->cCount;i++)
4925 {
4926 if (pPatchPage->papPatch[i])
4927 {
4928 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4929 RTRCPTR pPatchInstrGC;
4930 //unused: bool fForceBreak = false;
4931
4932 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4933 /** @todo inefficient and includes redundant checks for multiple pages. */
4934 for (uint32_t j=0; j<cbWrite; j++)
4935 {
4936 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4937
4938 if ( pPatch->cbPatchJump
4939 && pGuestPtrGC >= pPatch->pPrivInstrGC
4940 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4941 {
4942 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4943 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4944 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4945 if (rc == VINF_SUCCESS)
4946 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4947 goto loop_start;
4948
4949 continue;
4950 }
4951
4952 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4953 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4954 if (!pPatchInstrGC)
4955 {
4956 RTRCPTR pClosestInstrGC;
4957 uint32_t size;
4958
4959 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4960 if (pPatchInstrGC)
4961 {
4962 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4963 Assert(pClosestInstrGC <= pGuestPtrGC);
4964 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4965 /* Check if this is not a write into a gap between two patches */
4966 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4967 pPatchInstrGC = 0;
4968 }
4969 }
4970 if (pPatchInstrGC)
4971 {
4972 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4973
4974 fValidPatchWrite = true;
4975
4976 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4977 Assert(pPatchToGuestRec);
4978 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4979 {
4980 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4981
4982 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4983 {
4984 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4985
4986 patmR3MarkDirtyPatch(pVM, pPatch);
4987
4988 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4989 goto loop_start;
4990 }
4991 else
4992 {
4993 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4994 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4995
4996 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4997 pPatchToGuestRec->fDirty = true;
4998
4999 *pInstrHC = 0xCC;
5000
5001 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
5002 }
5003 }
5004 /* else already marked dirty */
5005 }
5006 }
5007 }
5008 } /* for each patch */
5009
5010 if (fValidPatchWrite == false)
5011 {
5012 /* Write to a part of the page that either:
5013 * - doesn't contain any code (shared code/data); rather unlikely
5014 * - old code page that's no longer in active use.
5015 */
5016invalid_write_loop_start:
5017 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
5018
5019 if (pPatchPage)
5020 {
5021 for (i=0;i<pPatchPage->cCount;i++)
5022 {
5023 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5024
5025 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
5026 {
5027 /* Note: possibly dangerous assumption that all future writes will be harmless. */
5028 if (pPatch->flags & PATMFL_IDTHANDLER)
5029 {
5030 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5031
5032 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5033 int rc = patmRemovePatchPages(pVM, pPatch);
5034 AssertRC(rc);
5035 }
5036 else
5037 {
5038 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5039 patmR3MarkDirtyPatch(pVM, pPatch);
5040 }
5041 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5042 goto invalid_write_loop_start;
5043 }
5044 } /* for */
5045 }
5046 }
5047 }
5048 }
5049 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5050 return VINF_SUCCESS;
5051
5052}
5053
5054/**
5055 * Disable all patches in a flushed page
5056 *
5057 * @returns VBox status code
5058 * @param pVM The cross context VM structure.
5059 * @param addr GC address of the page to flush
5060 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5061 * having to double check if the physical address has changed
5062 */
5063VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5064{
5065 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5066
5067 addr &= PAGE_BASE_GC_MASK;
5068
5069 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5070 if (pPatchPage)
5071 {
5072 int i;
5073
5074 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5075 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5076 {
5077 if (pPatchPage->papPatch[i])
5078 {
5079 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5080
5081 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5082 patmR3MarkDirtyPatch(pVM, pPatch);
5083 }
5084 }
5085 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5086 }
5087 return VINF_SUCCESS;
5088}
5089
5090/**
5091 * Checks if the instructions at the specified address has been patched already.
5092 *
5093 * @returns boolean, patched or not
5094 * @param pVM The cross context VM structure.
5095 * @param pInstrGC Guest context pointer to instruction
5096 */
5097VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5098{
5099 Assert(!HMIsEnabled(pVM));
5100 PPATMPATCHREC pPatchRec;
5101 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5102 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5103 return true;
5104 return false;
5105}
5106
5107/**
5108 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5109 *
5110 * @returns VBox status code.
5111 * @param pVM The cross context VM structure.
5112 * @param pInstrGC GC address of instr
5113 * @param pByte opcode byte pointer (OUT)
5114 *
5115 */
5116VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5117{
5118 PPATMPATCHREC pPatchRec;
5119
5120 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5121
5122 /* Shortcut. */
5123 if (!PATMIsEnabled(pVM))
5124 return VERR_PATCH_NOT_FOUND;
5125 Assert(!HMIsEnabled(pVM));
5126 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5127 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5128 return VERR_PATCH_NOT_FOUND;
5129
5130 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5131 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5132 if ( pPatchRec
5133 && pPatchRec->patch.uState == PATCH_ENABLED
5134 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5135 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5136 {
5137 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5138 *pByte = pPatchRec->patch.aPrivInstr[offset];
5139
5140 if (pPatchRec->patch.cbPatchJump == 1)
5141 {
5142 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5143 }
5144 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5145 return VINF_SUCCESS;
5146 }
5147 return VERR_PATCH_NOT_FOUND;
5148}
5149
5150/**
5151 * Read instruction bytes of the original code that was overwritten by the 5
5152 * bytes patch jump.
5153 *
5154 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5155 * @param pVM The cross context VM structure.
5156 * @param GCPtrInstr GC address of instr
5157 * @param pbDst The output buffer.
5158 * @param cbToRead The maximum number bytes to read.
5159 * @param pcbRead Where to return the acutal number of bytes read.
5160 */
5161VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5162{
5163 /* Shortcut. */
5164 if (!PATMIsEnabled(pVM))
5165 return VERR_PATCH_NOT_FOUND;
5166 Assert(!HMIsEnabled(pVM));
5167 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5168 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5169 return VERR_PATCH_NOT_FOUND;
5170
5171 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5172
5173 /*
5174 * If the patch is enabled and the pointer lies within 5 bytes of this
5175 * priv instr ptr, then we've got a hit!
5176 */
5177 RTGCPTR32 off;
5178 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5179 GCPtrInstr, false /*fAbove*/);
5180 if ( pPatchRec
5181 && pPatchRec->patch.uState == PATCH_ENABLED
5182 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5183 {
5184 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5185 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5186 if (cbToRead > cbMax)
5187 cbToRead = cbMax;
5188 switch (cbToRead)
5189 {
5190 case 5: pbDst[4] = pbSrc[4];
5191 case 4: pbDst[3] = pbSrc[3];
5192 case 3: pbDst[2] = pbSrc[2];
5193 case 2: pbDst[1] = pbSrc[1];
5194 case 1: pbDst[0] = pbSrc[0];
5195 break;
5196 default:
5197 memcpy(pbDst, pbSrc, cbToRead);
5198 }
5199 *pcbRead = cbToRead;
5200
5201 if (pPatchRec->patch.cbPatchJump == 1)
5202 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5203 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5204 return VINF_SUCCESS;
5205 }
5206
5207 return VERR_PATCH_NOT_FOUND;
5208}
5209
5210/**
5211 * Disable patch for privileged instruction at specified location
5212 *
5213 * @returns VBox status code.
5214 * @param pVM The cross context VM structure.
5215 * @param pInstrGC Guest context point to privileged instruction
5216 *
5217 * @note returns failure if patching is not allowed or possible
5218 *
5219 */
5220VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5221{
5222 PPATMPATCHREC pPatchRec;
5223 PPATCHINFO pPatch;
5224
5225 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5226 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5227 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5228 if (pPatchRec)
5229 {
5230 int rc = VINF_SUCCESS;
5231
5232 pPatch = &pPatchRec->patch;
5233
5234 /* Already disabled? */
5235 if (pPatch->uState == PATCH_DISABLED)
5236 return VINF_SUCCESS;
5237
5238 /* Clear the IDT entries for the patch we're disabling. */
5239 /* Note: very important as we clear IF in the patch itself */
5240 /** @todo this needs to be changed */
5241 if (pPatch->flags & PATMFL_IDTHANDLER)
5242 {
5243 uint32_t iGate;
5244
5245 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5246 if (iGate != (uint32_t)~0)
5247 {
5248 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5249 if (++cIDTHandlersDisabled < 256)
5250 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5251 }
5252 }
5253
5254 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5255 if ( pPatch->pPatchBlockOffset
5256 && pPatch->uState == PATCH_ENABLED)
5257 {
5258 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5259 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5260 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5261 }
5262
5263 /* IDT or function patches haven't changed any guest code. */
5264 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5265 {
5266 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5267 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5268
5269 if (pPatch->uState != PATCH_REFUSED)
5270 {
5271 uint8_t temp[16];
5272
5273 Assert(pPatch->cbPatchJump < sizeof(temp));
5274
5275 /* Let's first check if the guest code is still the same. */
5276 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5277 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5278 if (rc == VINF_SUCCESS)
5279 {
5280 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5281
5282 if ( temp[0] != 0xE9 /* jmp opcode */
5283 || *(RTRCINTPTR *)(&temp[1]) != displ
5284 )
5285 {
5286 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5287 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5288 /* Remove it completely */
5289 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5290 rc = PATMR3RemovePatch(pVM, pInstrGC);
5291 AssertRC(rc);
5292 return VWRN_PATCH_REMOVED;
5293 }
5294 patmRemoveJumpToPatch(pVM, pPatch);
5295 }
5296 else
5297 {
5298 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5299 pPatch->uState = PATCH_DISABLE_PENDING;
5300 }
5301 }
5302 else
5303 {
5304 AssertMsgFailed(("Patch was refused!\n"));
5305 return VERR_PATCH_ALREADY_DISABLED;
5306 }
5307 }
5308 else
5309 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5310 {
5311 uint8_t temp[16];
5312
5313 Assert(pPatch->cbPatchJump < sizeof(temp));
5314
5315 /* Let's first check if the guest code is still the same. */
5316 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5317 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5318 if (rc == VINF_SUCCESS)
5319 {
5320 if (temp[0] != 0xCC)
5321 {
5322 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5323 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5324 /* Remove it completely */
5325 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5326 rc = PATMR3RemovePatch(pVM, pInstrGC);
5327 AssertRC(rc);
5328 return VWRN_PATCH_REMOVED;
5329 }
5330 patmDeactivateInt3Patch(pVM, pPatch);
5331 }
5332 }
5333
5334 if (rc == VINF_SUCCESS)
5335 {
5336 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5337 if (pPatch->uState == PATCH_DISABLE_PENDING)
5338 {
5339 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5340 pPatch->uState = PATCH_UNUSABLE;
5341 }
5342 else
5343 if (pPatch->uState != PATCH_DIRTY)
5344 {
5345 pPatch->uOldState = pPatch->uState;
5346 pPatch->uState = PATCH_DISABLED;
5347 }
5348 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5349 }
5350
5351 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5352 return VINF_SUCCESS;
5353 }
5354 Log(("Patch not found!\n"));
5355 return VERR_PATCH_NOT_FOUND;
5356}
5357
5358/**
5359 * Permanently disable patch for privileged instruction at specified location
5360 *
5361 * @returns VBox status code.
5362 * @param pVM The cross context VM structure.
5363 * @param pInstrGC Guest context instruction pointer
5364 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5365 * @param pConflictPatch Conflicting patch
5366 *
5367 */
5368static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5369{
5370 NOREF(pConflictAddr);
5371#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5372 PATCHINFO patch;
5373 DISCPUSTATE cpu;
5374 R3PTRTYPE(uint8_t *) pInstrHC;
5375 uint32_t cbInstr;
5376 bool disret;
5377 int rc;
5378
5379 RT_ZERO(patch);
5380 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5381 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5382 /*
5383 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5384 * with one that jumps right into the conflict patch.
5385 * Otherwise we must disable the conflicting patch to avoid serious problems.
5386 */
5387 if ( disret == true
5388 && (pConflictPatch->flags & PATMFL_CODE32)
5389 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5390 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5391 {
5392 /* Hint patches must be enabled first. */
5393 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5394 {
5395 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5396 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5397 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5398 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5399 /* Enabling might fail if the patched code has changed in the meantime. */
5400 if (rc != VINF_SUCCESS)
5401 return rc;
5402 }
5403
5404 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5405 if (RT_SUCCESS(rc))
5406 {
5407 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5408 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5409 return VINF_SUCCESS;
5410 }
5411 }
5412#endif
5413
5414 if (pConflictPatch->opcode == OP_CLI)
5415 {
5416 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5417 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5418 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5419 if (rc == VWRN_PATCH_REMOVED)
5420 return VINF_SUCCESS;
5421 if (RT_SUCCESS(rc))
5422 {
5423 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5424 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5425 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5426 if (rc == VERR_PATCH_NOT_FOUND)
5427 return VINF_SUCCESS; /* removed already */
5428
5429 AssertRC(rc);
5430 if (RT_SUCCESS(rc))
5431 {
5432 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5433 return VINF_SUCCESS;
5434 }
5435 }
5436 /* else turned into unusable patch (see below) */
5437 }
5438 else
5439 {
5440 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5441 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5442 if (rc == VWRN_PATCH_REMOVED)
5443 return VINF_SUCCESS;
5444 }
5445
5446 /* No need to monitor the code anymore. */
5447 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5448 {
5449 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5450 AssertRC(rc);
5451 }
5452 pConflictPatch->uState = PATCH_UNUSABLE;
5453 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5454 return VERR_PATCH_DISABLED;
5455}
5456
5457/**
5458 * Enable patch for privileged instruction at specified location
5459 *
5460 * @returns VBox status code.
5461 * @param pVM The cross context VM structure.
5462 * @param pInstrGC Guest context point to privileged instruction
5463 *
5464 * @note returns failure if patching is not allowed or possible
5465 *
5466 */
5467VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5468{
5469 PPATMPATCHREC pPatchRec;
5470 PPATCHINFO pPatch;
5471
5472 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5473 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5474 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5475 if (pPatchRec)
5476 {
5477 int rc = VINF_SUCCESS;
5478
5479 pPatch = &pPatchRec->patch;
5480
5481 if (pPatch->uState == PATCH_DISABLED)
5482 {
5483 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5484 {
5485 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5486 uint8_t temp[16];
5487
5488 Assert(pPatch->cbPatchJump < sizeof(temp));
5489
5490 /* Let's first check if the guest code is still the same. */
5491 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5492 AssertRC(rc2);
5493 if (rc2 == VINF_SUCCESS)
5494 {
5495 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5496 {
5497 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5498 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5499 /* Remove it completely */
5500 rc = PATMR3RemovePatch(pVM, pInstrGC);
5501 AssertRC(rc);
5502 return VERR_PATCH_NOT_FOUND;
5503 }
5504
5505 PATMP2GLOOKUPREC cacheRec;
5506 RT_ZERO(cacheRec);
5507 cacheRec.pPatch = pPatch;
5508
5509 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5510 /* Free leftover lock if any. */
5511 if (cacheRec.Lock.pvMap)
5512 {
5513 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5514 cacheRec.Lock.pvMap = NULL;
5515 }
5516 AssertRC(rc2);
5517 if (RT_FAILURE(rc2))
5518 return rc2;
5519
5520#ifdef DEBUG
5521 {
5522 DISCPUSTATE cpu;
5523 char szOutput[256];
5524 uint32_t cbInstr;
5525 uint32_t i = 0;
5526 bool disret;
5527 while(i < pPatch->cbPatchJump)
5528 {
5529 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5530 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5531 Log(("Renewed patch instr: %s", szOutput));
5532 i += cbInstr;
5533 }
5534 }
5535#endif
5536 }
5537 }
5538 else
5539 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5540 {
5541 uint8_t temp[16];
5542
5543 Assert(pPatch->cbPatchJump < sizeof(temp));
5544
5545 /* Let's first check if the guest code is still the same. */
5546 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5547 AssertRC(rc2);
5548
5549 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5550 {
5551 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5552 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5553 rc = PATMR3RemovePatch(pVM, pInstrGC);
5554 AssertRC(rc);
5555 return VERR_PATCH_NOT_FOUND;
5556 }
5557
5558 rc2 = patmActivateInt3Patch(pVM, pPatch);
5559 if (RT_FAILURE(rc2))
5560 return rc2;
5561 }
5562
5563 pPatch->uState = pPatch->uOldState; //restore state
5564
5565 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5566 if (pPatch->pPatchBlockOffset)
5567 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5568
5569 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5570 }
5571 else
5572 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5573
5574 return rc;
5575 }
5576 return VERR_PATCH_NOT_FOUND;
5577}
5578
5579/**
5580 * Remove patch for privileged instruction at specified location
5581 *
5582 * @returns VBox status code.
5583 * @param pVM The cross context VM structure.
5584 * @param pPatchRec Patch record
5585 * @param fForceRemove Remove *all* patches
5586 */
5587int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5588{
5589 PPATCHINFO pPatch;
5590
5591 pPatch = &pPatchRec->patch;
5592
5593 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5594 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5595 {
5596 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5597 return VERR_ACCESS_DENIED;
5598 }
5599 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5600
5601 /* Note: NEVER EVER REUSE PATCH MEMORY */
5602 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5603
5604 if (pPatchRec->patch.pPatchBlockOffset)
5605 {
5606 PAVLOU32NODECORE pNode;
5607
5608 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5609 Assert(pNode);
5610 }
5611
5612 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5613 {
5614 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5615 AssertRC(rc);
5616 }
5617
5618#ifdef VBOX_WITH_STATISTICS
5619 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5620 {
5621 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5622 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5623 }
5624#endif
5625
5626 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5627 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5628 pPatch->nrPatch2GuestRecs = 0;
5629 Assert(pPatch->Patch2GuestAddrTree == 0);
5630
5631 patmEmptyTree(pVM, &pPatch->FixupTree);
5632 pPatch->nrFixups = 0;
5633 Assert(pPatch->FixupTree == 0);
5634
5635 if (pPatchRec->patch.pTempInfo)
5636 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5637
5638 /* Note: might fail, because it has already been removed (e.g. during reset). */
5639 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5640
5641 /* Free the patch record */
5642 MMHyperFree(pVM, pPatchRec);
5643 return VINF_SUCCESS;
5644}
5645
5646/**
5647 * RTAvlU32DoWithAll() worker.
5648 * Checks whether the current trampoline instruction is the jump to the target patch
5649 * and updates the displacement to jump to the new target.
5650 *
5651 * @returns VBox status code.
5652 * @retval VERR_ALREADY_EXISTS if the jump was found.
5653 * @param pNode The current patch to guest record to check.
5654 * @param pvUser The refresh state.
5655 */
5656static DECLCALLBACK(int) patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5657{
5658 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5659 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5660 PVM pVM = pRefreshPatchState->pVM;
5661
5662 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5663
5664 /*
5665 * Check if the patch instruction starts with a jump.
5666 * ASSUMES that there is no other patch to guest record that starts
5667 * with a jump.
5668 */
5669 if (*pPatchInstr == 0xE9)
5670 {
5671 /* Jump found, update the displacement. */
5672 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5673 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5674 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5675
5676 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5677 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5678
5679 *(uint32_t *)&pPatchInstr[1] = displ;
5680 return VERR_ALREADY_EXISTS; /** @todo better return code */
5681 }
5682
5683 return VINF_SUCCESS;
5684}
5685
5686/**
5687 * Attempt to refresh the patch by recompiling its entire code block
5688 *
5689 * @returns VBox status code.
5690 * @param pVM The cross context VM structure.
5691 * @param pPatchRec Patch record
5692 */
5693int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5694{
5695 PPATCHINFO pPatch;
5696 int rc;
5697 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5698 PTRAMPREC pTrampolinePatchesHead = NULL;
5699
5700 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5701
5702 pPatch = &pPatchRec->patch;
5703 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5704 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5705 {
5706 if (!pPatch->pTrampolinePatchesHead)
5707 {
5708 /*
5709 * It is sometimes possible that there are trampoline patches to this patch
5710 * but they are not recorded (after a saved state load for example).
5711 * Refuse to refresh those patches.
5712 * Can hurt performance in theory if the patched code is modified by the guest
5713 * and is executed often. However most of the time states are saved after the guest
5714 * code was modified and is not updated anymore afterwards so this shouldn't be a
5715 * big problem.
5716 */
5717 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5718 return VERR_PATCHING_REFUSED;
5719 }
5720 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5721 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5722 }
5723
5724 /* Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5725
5726 rc = PATMR3DisablePatch(pVM, pInstrGC);
5727 AssertRC(rc);
5728
5729 /* Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5730 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5731#ifdef VBOX_WITH_STATISTICS
5732 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5733 {
5734 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5735 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5736 }
5737#endif
5738
5739 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5740
5741 /* Attempt to install a new patch. */
5742 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5743 if (RT_SUCCESS(rc))
5744 {
5745 RTRCPTR pPatchTargetGC;
5746 PPATMPATCHREC pNewPatchRec;
5747
5748 /* Determine target address in new patch */
5749 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5750 Assert(pPatchTargetGC);
5751 if (!pPatchTargetGC)
5752 {
5753 rc = VERR_PATCHING_REFUSED;
5754 goto failure;
5755 }
5756
5757 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5758 pPatch->uCurPatchOffset = 0;
5759
5760 /* insert jump to new patch in old patch block */
5761 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5762 if (RT_FAILURE(rc))
5763 goto failure;
5764
5765 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5766 Assert(pNewPatchRec); /* can't fail */
5767
5768 /* Remove old patch (only do that when everything is finished) */
5769 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5770 AssertRC(rc2);
5771
5772 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5773 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5774 Assert(fInserted); NOREF(fInserted);
5775
5776 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5777 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5778
5779 /* Used by another patch, so don't remove it! */
5780 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5781
5782 if (pTrampolinePatchesHead)
5783 {
5784 /* Update all trampoline patches to jump to the new patch. */
5785 PTRAMPREC pTrampRec = NULL;
5786 PATMREFRESHPATCH RefreshPatch;
5787
5788 RefreshPatch.pVM = pVM;
5789 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5790
5791 pTrampRec = pTrampolinePatchesHead;
5792
5793 while (pTrampRec)
5794 {
5795 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5796
5797 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5798 /*
5799 * We have to find the right patch2guest record because there might be others
5800 * for statistics.
5801 */
5802 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5803 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5804 Assert(rc == VERR_ALREADY_EXISTS);
5805 rc = VINF_SUCCESS;
5806 pTrampRec = pTrampRec->pNext;
5807 }
5808 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5809 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5810 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5811 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5812 }
5813 }
5814
5815failure:
5816 if (RT_FAILURE(rc))
5817 {
5818 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5819
5820 /* Remove the new inactive patch */
5821 rc = PATMR3RemovePatch(pVM, pInstrGC);
5822 AssertRC(rc);
5823
5824 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5825 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5826 Assert(fInserted); NOREF(fInserted);
5827
5828 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5829 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5830 AssertRC(rc2);
5831
5832 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5833 }
5834 return rc;
5835}
5836
5837/**
5838 * Find patch for privileged instruction at specified location
5839 *
5840 * @returns Patch structure pointer if found; else NULL
5841 * @param pVM The cross context VM structure.
5842 * @param pInstrGC Guest context point to instruction that might lie
5843 * within 5 bytes of an existing patch jump
5844 * @param fIncludeHints Include hinted patches or not
5845 */
5846PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5847{
5848 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5849 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5850 if (pPatchRec)
5851 {
5852 if ( pPatchRec->patch.uState == PATCH_ENABLED
5853 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5854 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5855 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5856 {
5857 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5858 return &pPatchRec->patch;
5859 }
5860 else
5861 if ( fIncludeHints
5862 && pPatchRec->patch.uState == PATCH_DISABLED
5863 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5864 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5865 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5866 {
5867 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5868 return &pPatchRec->patch;
5869 }
5870 }
5871 return NULL;
5872}
5873
5874/**
5875 * Checks whether the GC address is inside a generated patch jump
5876 *
5877 * @returns true -> yes, false -> no
5878 * @param pVM The cross context VM structure.
5879 * @param pAddr Guest context address.
5880 * @param pPatchAddr Guest context patch address (if true).
5881 */
5882VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5883{
5884 RTRCPTR addr;
5885 PPATCHINFO pPatch;
5886
5887 Assert(!HMIsEnabled(pVM));
5888 if (PATMIsEnabled(pVM) == false)
5889 return false;
5890
5891 if (pPatchAddr == NULL)
5892 pPatchAddr = &addr;
5893
5894 *pPatchAddr = 0;
5895
5896 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5897 if (pPatch)
5898 *pPatchAddr = pPatch->pPrivInstrGC;
5899
5900 return *pPatchAddr == 0 ? false : true;
5901}
5902
5903/**
5904 * Remove patch for privileged instruction at specified location
5905 *
5906 * @returns VBox status code.
5907 * @param pVM The cross context VM structure.
5908 * @param pInstrGC Guest context point to privileged instruction
5909 *
5910 * @note returns failure if patching is not allowed or possible
5911 *
5912 */
5913VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5914{
5915 PPATMPATCHREC pPatchRec;
5916
5917 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5918 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5919 if (pPatchRec)
5920 {
5921 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5922 if (rc == VWRN_PATCH_REMOVED)
5923 return VINF_SUCCESS;
5924
5925 return patmR3RemovePatch(pVM, pPatchRec, false);
5926 }
5927 AssertFailed();
5928 return VERR_PATCH_NOT_FOUND;
5929}
5930
5931/**
5932 * Mark patch as dirty
5933 *
5934 * @returns VBox status code.
5935 * @param pVM The cross context VM structure.
5936 * @param pPatch Patch record
5937 *
5938 * @note returns failure if patching is not allowed or possible
5939 *
5940 */
5941static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5942{
5943 if (pPatch->pPatchBlockOffset)
5944 {
5945 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5946 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5947 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5948 }
5949
5950 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5951 /* Put back the replaced instruction. */
5952 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5953 if (rc == VWRN_PATCH_REMOVED)
5954 return VINF_SUCCESS;
5955
5956 /* Note: we don't restore patch pages for patches that are not enabled! */
5957 /* Note: be careful when changing this behaviour!! */
5958
5959 /* The patch pages are no longer marked for self-modifying code detection */
5960 if (pPatch->flags & PATMFL_CODE_MONITORED)
5961 {
5962 rc = patmRemovePatchPages(pVM, pPatch);
5963 AssertRCReturn(rc, rc);
5964 }
5965 pPatch->uState = PATCH_DIRTY;
5966
5967 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5968 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5969
5970 return VINF_SUCCESS;
5971}
5972
5973/**
5974 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5975 *
5976 * @returns VBox status code.
5977 * @param pVM The cross context VM structure.
5978 * @param pPatch Patch block structure pointer
5979 * @param pPatchGC GC address in patch block
5980 */
5981RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5982{
5983 Assert(pPatch->Patch2GuestAddrTree);
5984 /* Get the closest record from below. */
5985 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5986 if (pPatchToGuestRec)
5987 return pPatchToGuestRec->pOrgInstrGC;
5988
5989 return 0;
5990}
5991
5992/**
5993 * Converts Guest code GC ptr to Patch code GC ptr (if found)
5994 *
5995 * @returns corresponding GC pointer in patch block
5996 * @param pVM The cross context VM structure.
5997 * @param pPatch Current patch block pointer
5998 * @param pInstrGC Guest context pointer to privileged instruction
5999 *
6000 */
6001RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6002{
6003 if (pPatch->Guest2PatchAddrTree)
6004 {
6005 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
6006 if (pGuestToPatchRec)
6007 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6008 }
6009
6010 return 0;
6011}
6012
6013/**
6014 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6015 *
6016 * @returns corresponding GC pointer in patch block
6017 * @param pVM The cross context VM structure.
6018 * @param pInstrGC Guest context pointer to privileged instruction
6019 */
6020static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
6021{
6022 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6023 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6024 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6025 return NIL_RTRCPTR;
6026}
6027
6028/**
6029 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6030 * identical match)
6031 *
6032 * @returns corresponding GC pointer in patch block
6033 * @param pVM The cross context VM structure.
6034 * @param pPatch Current patch block pointer
6035 * @param pInstrGC Guest context pointer to privileged instruction
6036 *
6037 */
6038RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6039{
6040 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6041 if (pGuestToPatchRec)
6042 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6043 return NIL_RTRCPTR;
6044}
6045
6046/**
6047 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6048 *
6049 * @returns original GC instruction pointer or 0 if not found
6050 * @param pVM The cross context VM structure.
6051 * @param pPatchGC GC address in patch block
6052 * @param pEnmState State of the translated address (out)
6053 *
6054 */
6055VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6056{
6057 PPATMPATCHREC pPatchRec;
6058 void *pvPatchCoreOffset;
6059 RTRCPTR pPrivInstrGC;
6060
6061 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6062 Assert(!HMIsEnabled(pVM));
6063 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6064 if (pvPatchCoreOffset == 0)
6065 {
6066 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6067 return 0;
6068 }
6069 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6070 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6071 if (pEnmState)
6072 {
6073 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6074 || pPatchRec->patch.uState == PATCH_DIRTY
6075 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6076 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6077 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6078
6079 if ( !pPrivInstrGC
6080 || pPatchRec->patch.uState == PATCH_UNUSABLE
6081 || pPatchRec->patch.uState == PATCH_REFUSED)
6082 {
6083 pPrivInstrGC = 0;
6084 *pEnmState = PATMTRANS_FAILED;
6085 }
6086 else
6087 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6088 {
6089 *pEnmState = PATMTRANS_INHIBITIRQ;
6090 }
6091 else
6092 if ( pPatchRec->patch.uState == PATCH_ENABLED
6093 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6094 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6095 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6096 {
6097 *pEnmState = PATMTRANS_OVERWRITTEN;
6098 }
6099 else
6100 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6101 {
6102 *pEnmState = PATMTRANS_OVERWRITTEN;
6103 }
6104 else
6105 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6106 {
6107 *pEnmState = PATMTRANS_PATCHSTART;
6108 }
6109 else
6110 *pEnmState = PATMTRANS_SAFE;
6111 }
6112 return pPrivInstrGC;
6113}
6114
6115/**
6116 * Returns the GC pointer of the patch for the specified GC address
6117 *
6118 * @returns VBox status code.
6119 * @param pVM The cross context VM structure.
6120 * @param pAddrGC Guest context address
6121 */
6122VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6123{
6124 PPATMPATCHREC pPatchRec;
6125
6126 Assert(!HMIsEnabled(pVM));
6127
6128 /* Find the patch record. */
6129 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6130 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6131 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6132 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6133 return NIL_RTRCPTR;
6134}
6135
6136/**
6137 * Attempt to recover dirty instructions
6138 *
6139 * @returns VBox status code.
6140 * @param pVM The cross context VM structure.
6141 * @param pCtx Pointer to the guest CPU context.
6142 * @param pPatch Patch record.
6143 * @param pPatchToGuestRec Patch to guest address record.
6144 * @param pEip GC pointer of trapping instruction.
6145 */
6146static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6147{
6148 DISCPUSTATE CpuOld, CpuNew;
6149 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6150 int rc;
6151 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6152 uint32_t cbDirty;
6153 PRECPATCHTOGUEST pRec;
6154 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6155 PVMCPU pVCpu = VMMGetCpu0(pVM);
6156 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6157
6158 pRec = pPatchToGuestRec;
6159 pCurInstrGC = pOrgInstrGC;
6160 pCurPatchInstrGC = pEip;
6161 cbDirty = 0;
6162 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6163
6164 /* Find all adjacent dirty instructions */
6165 while (true)
6166 {
6167 if (pRec->fJumpTarget)
6168 {
6169 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6170 pRec->fDirty = false;
6171 return VERR_PATCHING_REFUSED;
6172 }
6173
6174 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6175 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6176 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6177
6178 /* Only harmless instructions are acceptable. */
6179 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6180 if ( RT_FAILURE(rc)
6181 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6182 {
6183 if (RT_SUCCESS(rc))
6184 cbDirty += CpuOld.cbInstr;
6185 else
6186 if (!cbDirty)
6187 cbDirty = 1;
6188 break;
6189 }
6190
6191#ifdef DEBUG
6192 char szBuf[256];
6193 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6194 szBuf, sizeof(szBuf), NULL);
6195 Log(("DIRTY: %s\n", szBuf));
6196#endif
6197 /* Mark as clean; if we fail we'll let it always fault. */
6198 pRec->fDirty = false;
6199
6200 /* Remove old lookup record. */
6201 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6202 pPatchToGuestRec = NULL;
6203
6204 pCurPatchInstrGC += CpuOld.cbInstr;
6205 cbDirty += CpuOld.cbInstr;
6206
6207 /* Let's see if there's another dirty instruction right after. */
6208 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6209 if (!pRec || !pRec->fDirty)
6210 break; /* no more dirty instructions */
6211
6212 /* In case of complex instructions the next guest instruction could be quite far off. */
6213 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6214 }
6215
6216 if ( RT_SUCCESS(rc)
6217 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6218 )
6219 {
6220 uint32_t cbLeft;
6221
6222 pCurPatchInstrHC = pPatchInstrHC;
6223 pCurPatchInstrGC = pEip;
6224 cbLeft = cbDirty;
6225
6226 while (cbLeft && RT_SUCCESS(rc))
6227 {
6228 bool fValidInstr;
6229
6230 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6231
6232 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6233 if ( !fValidInstr
6234 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6235 )
6236 {
6237 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6238
6239 if ( pTargetGC >= pOrgInstrGC
6240 && pTargetGC <= pOrgInstrGC + cbDirty
6241 )
6242 {
6243 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6244 fValidInstr = true;
6245 }
6246 }
6247
6248 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6249 if ( rc == VINF_SUCCESS
6250 && CpuNew.cbInstr <= cbLeft /* must still fit */
6251 && fValidInstr
6252 )
6253 {
6254#ifdef DEBUG
6255 char szBuf[256];
6256 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6257 szBuf, sizeof(szBuf), NULL);
6258 Log(("NEW: %s\n", szBuf));
6259#endif
6260
6261 /* Copy the new instruction. */
6262 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6263 AssertRC(rc);
6264
6265 /* Add a new lookup record for the duplicated instruction. */
6266 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6267 }
6268 else
6269 {
6270#ifdef DEBUG
6271 char szBuf[256];
6272 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6273 szBuf, sizeof(szBuf), NULL);
6274 Log(("NEW: %s (FAILED)\n", szBuf));
6275#endif
6276 /* Restore the old lookup record for the duplicated instruction. */
6277 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6278
6279 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6280 rc = VERR_PATCHING_REFUSED;
6281 break;
6282 }
6283 pCurInstrGC += CpuNew.cbInstr;
6284 pCurPatchInstrHC += CpuNew.cbInstr;
6285 pCurPatchInstrGC += CpuNew.cbInstr;
6286 cbLeft -= CpuNew.cbInstr;
6287
6288 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6289 if (!cbLeft)
6290 {
6291 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6292 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6293 {
6294 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6295 if (pRec)
6296 {
6297 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6298 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6299
6300 Assert(!pRec->fDirty);
6301
6302 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6303 if (cbFiller >= SIZEOF_NEARJUMP32)
6304 {
6305 pPatchFillHC[0] = 0xE9;
6306 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6307#ifdef DEBUG
6308 char szBuf[256];
6309 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6310 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6311 Log(("FILL: %s\n", szBuf));
6312#endif
6313 }
6314 else
6315 {
6316 for (unsigned i = 0; i < cbFiller; i++)
6317 {
6318 pPatchFillHC[i] = 0x90; /* NOP */
6319#ifdef DEBUG
6320 char szBuf[256];
6321 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6322 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6323 Log(("FILL: %s\n", szBuf));
6324#endif
6325 }
6326 }
6327 }
6328 }
6329 }
6330 }
6331 }
6332 else
6333 rc = VERR_PATCHING_REFUSED;
6334
6335 if (RT_SUCCESS(rc))
6336 {
6337 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6338 }
6339 else
6340 {
6341 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6342 Assert(cbDirty);
6343
6344 /* Mark the whole instruction stream with breakpoints. */
6345 if (cbDirty)
6346 memset(pPatchInstrHC, 0xCC, cbDirty);
6347
6348 if ( pVM->patm.s.fOutOfMemory == false
6349 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6350 {
6351 rc = patmR3RefreshPatch(pVM, pPatch);
6352 if (RT_FAILURE(rc))
6353 {
6354 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6355 }
6356 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6357 rc = VERR_PATCHING_REFUSED;
6358 }
6359 }
6360 return rc;
6361}
6362
6363/**
6364 * Handle trap inside patch code
6365 *
6366 * @returns VBox status code.
6367 * @param pVM The cross context VM structure.
6368 * @param pCtx Pointer to the guest CPU context.
6369 * @param pEip GC pointer of trapping instruction.
6370 * @param ppNewEip GC pointer to new instruction.
6371 */
6372VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6373{
6374 PPATMPATCHREC pPatch = 0;
6375 void *pvPatchCoreOffset;
6376 RTRCUINTPTR offset;
6377 RTRCPTR pNewEip;
6378 int rc ;
6379 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6380 PVMCPU pVCpu = VMMGetCpu0(pVM);
6381
6382 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6383 Assert(pVM->cCpus == 1);
6384
6385 pNewEip = 0;
6386 *ppNewEip = 0;
6387
6388 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6389
6390 /* Find the patch record. */
6391 /* Note: there might not be a patch to guest translation record (global function) */
6392 offset = pEip - pVM->patm.s.pPatchMemGC;
6393 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6394 if (pvPatchCoreOffset)
6395 {
6396 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6397
6398 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6399
6400 if (pPatch->patch.uState == PATCH_DIRTY)
6401 {
6402 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6403 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6404 {
6405 /* Function duplication patches set fPIF to 1 on entry */
6406 pVM->patm.s.pGCStateHC->fPIF = 1;
6407 }
6408 }
6409 else
6410 if (pPatch->patch.uState == PATCH_DISABLED)
6411 {
6412 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6413 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6414 {
6415 /* Function duplication patches set fPIF to 1 on entry */
6416 pVM->patm.s.pGCStateHC->fPIF = 1;
6417 }
6418 }
6419 else
6420 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6421 {
6422 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6423
6424 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6425 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6426 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6427 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6428 }
6429
6430 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6431 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6432
6433 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6434 pPatch->patch.cTraps++;
6435 PATM_STAT_FAULT_INC(&pPatch->patch);
6436 }
6437 else
6438 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6439
6440 /* Check if we were interrupted in PATM generated instruction code. */
6441 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6442 {
6443 DISCPUSTATE Cpu;
6444 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6445 AssertRC(rc);
6446
6447 if ( rc == VINF_SUCCESS
6448 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6449 || Cpu.pCurInstr->uOpcode == OP_PUSH
6450 || Cpu.pCurInstr->uOpcode == OP_CALL)
6451 )
6452 {
6453 uint64_t fFlags;
6454
6455 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6456
6457 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6458 {
6459 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6460 if ( rc == VINF_SUCCESS
6461 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6462 {
6463 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6464
6465 /* Reset the PATM stack. */
6466 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6467
6468 pVM->patm.s.pGCStateHC->fPIF = 1;
6469
6470 Log(("Faulting push -> go back to the original instruction\n"));
6471
6472 /* continue at the original instruction */
6473 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6474 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6475 return VINF_SUCCESS;
6476 }
6477 }
6478
6479 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6480 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6481 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6482 if (rc == VINF_SUCCESS)
6483 {
6484 /* The guest page *must* be present. */
6485 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6486 if ( rc == VINF_SUCCESS
6487 && (fFlags & X86_PTE_P))
6488 {
6489 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6490 return VINF_PATCH_CONTINUE;
6491 }
6492 }
6493 }
6494 else
6495 if (pPatch->patch.pPrivInstrGC == pNewEip)
6496 {
6497 /* Invalidated patch or first instruction overwritten.
6498 * We can ignore the fPIF state in this case.
6499 */
6500 /* Reset the PATM stack. */
6501 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6502
6503 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6504
6505 pVM->patm.s.pGCStateHC->fPIF = 1;
6506
6507 /* continue at the original instruction */
6508 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6509 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6510 return VINF_SUCCESS;
6511 }
6512
6513 char szBuf[256];
6514 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6515
6516 /* Very bad. We crashed in emitted code. Probably stack? */
6517 if (pPatch)
6518 {
6519 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6520 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n",
6521 pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags,
6522 pPatchToGuestRec->fDirty, szBuf));
6523 }
6524 else
6525 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6526 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6527 EMR3FatalError(pVCpu, VERR_PATM_IPE_TRAP_IN_PATCH_CODE);
6528 }
6529
6530 /* From here on, we must have a valid patch to guest translation. */
6531 if (pvPatchCoreOffset == 0)
6532 {
6533 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6534 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6535 return VERR_PATCH_NOT_FOUND;
6536 }
6537
6538 /* Take care of dirty/changed instructions. */
6539 if (pPatchToGuestRec->fDirty)
6540 {
6541 Assert(pPatchToGuestRec->Core.Key == offset);
6542 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6543
6544 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6545 if (RT_SUCCESS(rc))
6546 {
6547 /* Retry the current instruction. */
6548 pNewEip = pEip;
6549 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6550 }
6551 else
6552 {
6553 /* Reset the PATM stack. */
6554 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6555
6556 rc = VINF_SUCCESS; /* Continue at original instruction. */
6557 }
6558
6559 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6560 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6561 return rc;
6562 }
6563
6564#ifdef VBOX_STRICT
6565 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6566 {
6567 DISCPUSTATE cpu;
6568 bool disret;
6569 uint32_t cbInstr;
6570 PATMP2GLOOKUPREC cacheRec;
6571 RT_ZERO(cacheRec);
6572 cacheRec.pPatch = &pPatch->patch;
6573
6574 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6575 &cpu, &cbInstr);
6576 if (cacheRec.Lock.pvMap)
6577 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6578
6579 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6580 {
6581 RTRCPTR retaddr;
6582 PCPUMCTX pCtx2;
6583
6584 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6585
6586 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6587 AssertRC(rc);
6588
6589 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6590 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6591 }
6592 }
6593#endif
6594
6595 /* Return original address, correct by subtracting the CS base address. */
6596 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6597
6598 /* Reset the PATM stack. */
6599 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6600
6601 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6602 {
6603 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6604 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6605#ifdef VBOX_STRICT
6606 DISCPUSTATE cpu;
6607 bool disret;
6608 uint32_t cbInstr;
6609 PATMP2GLOOKUPREC cacheRec;
6610 RT_ZERO(cacheRec);
6611 cacheRec.pPatch = &pPatch->patch;
6612
6613 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6614 &cpu, &cbInstr);
6615 if (cacheRec.Lock.pvMap)
6616 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6617
6618 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6619 {
6620 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6621 &cpu, &cbInstr);
6622 if (cacheRec.Lock.pvMap)
6623 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6624
6625 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6626 }
6627#endif
6628 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6629 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6630 }
6631
6632 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6633 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6634 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6635 {
6636 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6637 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6638 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6639 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6640 return VERR_PATCH_DISABLED;
6641 }
6642
6643#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6644 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6645 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6646 {
6647 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6648 //we are only wasting time, back out the patch
6649 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6650 pTrapRec->pNextPatchInstr = 0;
6651 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6652 return VERR_PATCH_DISABLED;
6653 }
6654#endif
6655
6656 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6657 return VINF_SUCCESS;
6658}
6659
6660
6661/**
6662 * Handle page-fault in monitored page
6663 *
6664 * @returns VBox status code.
6665 * @param pVM The cross context VM structure.
6666 */
6667VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6668{
6669 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6670 PVMCPU pVCpu = VMMGetCpu0(pVM);
6671
6672 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6673 addr &= PAGE_BASE_GC_MASK;
6674
6675 int rc = PGMHandlerVirtualDeregister(pVM, pVCpu, addr, false /*fHypervisor*/);
6676 AssertRC(rc); NOREF(rc);
6677
6678 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6679 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6680 {
6681 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6682 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6683 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6684 if (rc == VWRN_PATCH_REMOVED)
6685 return VINF_SUCCESS;
6686
6687 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6688
6689 if (addr == pPatchRec->patch.pPrivInstrGC)
6690 addr++;
6691 }
6692
6693 for(;;)
6694 {
6695 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6696
6697 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6698 break;
6699
6700 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6701 {
6702 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6703 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6704 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6705 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6706 }
6707 addr = pPatchRec->patch.pPrivInstrGC + 1;
6708 }
6709
6710 pVM->patm.s.pvFaultMonitor = 0;
6711 return VINF_SUCCESS;
6712}
6713
6714
6715#ifdef VBOX_WITH_STATISTICS
6716
6717static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6718{
6719 if (pPatch->flags & PATMFL_SYSENTER)
6720 {
6721 return "SYSENT";
6722 }
6723 else
6724 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6725 {
6726 static char szTrap[16];
6727 uint32_t iGate;
6728
6729 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6730 if (iGate < 256)
6731 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6732 else
6733 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6734 return szTrap;
6735 }
6736 else
6737 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6738 return "DUPFUNC";
6739 else
6740 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6741 return "FUNCCALL";
6742 else
6743 if (pPatch->flags & PATMFL_TRAMPOLINE)
6744 return "TRAMP";
6745 else
6746 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6747}
6748
6749static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6750{
6751 NOREF(pVM);
6752 switch(pPatch->uState)
6753 {
6754 case PATCH_ENABLED:
6755 return "ENA";
6756 case PATCH_DISABLED:
6757 return "DIS";
6758 case PATCH_DIRTY:
6759 return "DIR";
6760 case PATCH_UNUSABLE:
6761 return "UNU";
6762 case PATCH_REFUSED:
6763 return "REF";
6764 case PATCH_DISABLE_PENDING:
6765 return "DIP";
6766 default:
6767 AssertFailed();
6768 return " ";
6769 }
6770}
6771
6772/**
6773 * Resets the sample.
6774 * @param pVM The cross context VM structure.
6775 * @param pvSample The sample registered using STAMR3RegisterCallback.
6776 */
6777static void patmResetStat(PVM pVM, void *pvSample)
6778{
6779 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6780 Assert(pPatch);
6781
6782 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6783 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6784}
6785
6786/**
6787 * Prints the sample into the buffer.
6788 *
6789 * @param pVM The cross context VM structure.
6790 * @param pvSample The sample registered using STAMR3RegisterCallback.
6791 * @param pszBuf The buffer to print into.
6792 * @param cchBuf The size of the buffer.
6793 */
6794static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6795{
6796 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6797 Assert(pPatch);
6798
6799 Assert(pPatch->uState != PATCH_REFUSED);
6800 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6801
6802 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6803 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6804 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6805}
6806
6807/**
6808 * Returns the GC address of the corresponding patch statistics counter
6809 *
6810 * @returns Stat address
6811 * @param pVM The cross context VM structure.
6812 * @param pPatch Patch structure
6813 */
6814RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6815{
6816 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6817 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6818}
6819
6820#endif /* VBOX_WITH_STATISTICS */
6821#ifdef VBOX_WITH_DEBUGGER
6822
6823/**
6824 * @callback_method_impl{FNDBGCCMD, The '.patmoff' command.}
6825 */
6826static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6827{
6828 /*
6829 * Validate input.
6830 */
6831 NOREF(cArgs); NOREF(paArgs);
6832 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6833 PVM pVM = pUVM->pVM;
6834 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6835
6836 if (HMIsEnabled(pVM))
6837 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6838
6839 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6840 PATMR3AllowPatching(pVM->pUVM, false);
6841 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6842}
6843
6844/**
6845 * @callback_method_impl{FNDBGCCMD, The '.patmon' command.}
6846 */
6847static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6848{
6849 /*
6850 * Validate input.
6851 */
6852 NOREF(cArgs); NOREF(paArgs);
6853 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6854 PVM pVM = pUVM->pVM;
6855 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6856
6857 if (HMIsEnabled(pVM))
6858 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6859
6860 PATMR3AllowPatching(pVM->pUVM, true);
6861 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6862 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6863}
6864
6865#endif /* VBOX_WITH_DEBUGGER */
6866
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette