VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 57358

Last change on this file since 57358 was 57358, checked in by vboxsync, 10 years ago

*: scm cleanup run.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 262.3 KB
Line 
1/* $Id: PATM.cpp 57358 2015-08-14 15:16:38Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2015 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20
21/*********************************************************************************************************************************
22* Header Files *
23*********************************************************************************************************************************/
24#define LOG_GROUP LOG_GROUP_PATM
25#include <VBox/vmm/patm.h>
26#include <VBox/vmm/stam.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <VBox/vmm/cpumdis.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/mm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/ssm.h>
36#include <VBox/vmm/trpm.h>
37#include <VBox/vmm/cfgm.h>
38#include <VBox/param.h>
39#include <VBox/vmm/selm.h>
40#include <VBox/vmm/csam.h>
41#include <iprt/avl.h>
42#include "PATMInternal.h"
43#include "PATMPatch.h"
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/uvm.h>
46#include <VBox/dbg.h>
47#include <VBox/err.h>
48#include <VBox/log.h>
49#include <iprt/assert.h>
50#include <iprt/asm.h>
51#include <VBox/dis.h>
52#include <VBox/disopcode.h>
53#include "internal/pgm.h"
54
55#include <iprt/string.h>
56#include "PATMA.h"
57
58//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
59//#define PATM_DISABLE_ALL
60
61/**
62 * Refresh trampoline patch state.
63 */
64typedef struct PATMREFRESHPATCH
65{
66 /** Pointer to the VM structure. */
67 PVM pVM;
68 /** The trampoline patch record. */
69 PPATCHINFO pPatchTrampoline;
70 /** The new patch we want to jump to. */
71 PPATCHINFO pPatchRec;
72} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
73
74
75#define PATMREAD_RAWCODE 1 /* read code as-is */
76#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
77#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
78
79/*
80 * Private structure used during disassembly
81 */
82typedef struct
83{
84 PVM pVM;
85 PPATCHINFO pPatchInfo;
86 R3PTRTYPE(uint8_t *) pbInstrHC;
87 RTRCPTR pInstrGC;
88 uint32_t fReadFlags;
89} PATMDISASM, *PPATMDISASM;
90
91
92/*********************************************************************************************************************************
93* Internal Functions *
94*********************************************************************************************************************************/
95static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
96static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
97static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
98
99#ifdef LOG_ENABLED // keep gcc quiet
100static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
101#endif
102#ifdef VBOX_WITH_STATISTICS
103static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
104static void patmResetStat(PVM pVM, void *pvSample);
105static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
106#endif
107
108#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
109#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
110
111static int patmReinit(PVM pVM);
112static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
113static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
114static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
115
116#ifdef VBOX_WITH_DEBUGGER
117static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
118static FNDBGCCMD patmr3CmdOn;
119static FNDBGCCMD patmr3CmdOff;
120
121/** Command descriptors. */
122static const DBGCCMD g_aCmds[] =
123{
124 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
125 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
126 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
127};
128#endif
129
130/* Don't want to break saved states, so put it here as a global variable. */
131static unsigned int cIDTHandlersDisabled = 0;
132
133/**
134 * Initializes the PATM.
135 *
136 * @returns VBox status code.
137 * @param pVM Pointer to the VM.
138 */
139VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
140{
141 int rc;
142
143 /*
144 * We only need a saved state dummy loader if HM is enabled.
145 */
146 if (HMIsEnabled(pVM))
147 {
148 pVM->fPATMEnabled = false;
149 return SSMR3RegisterStub(pVM, "PATM", 0);
150 }
151
152 /*
153 * Raw-mode.
154 */
155 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
156
157 /* These values can't change as they are hardcoded in patch code (old saved states!) */
158 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
159 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
160 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
161 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
162
163 AssertReleaseMsg(g_fPatmInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
164 ("Interrupt flags out of sync!! g_fPatmInterruptFlag=%#x expected %#x. broken assembler?\n", g_fPatmInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
165
166 /* Allocate patch memory and GC patch state memory. */
167 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
168 /* Add another page in case the generated code is much larger than expected. */
169 /** @todo bad safety precaution */
170 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
171 if (RT_FAILURE(rc))
172 {
173 Log(("MMHyperAlloc failed with %Rrc\n", rc));
174 return rc;
175 }
176 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
177
178 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
179 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
180 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
181
182 patmR3DbgInit(pVM);
183
184 /*
185 * Hypervisor memory for GC status data (read/write)
186 *
187 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
188 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
189 *
190 */
191 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
192 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
193 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
194
195 /* Hypervisor memory for patch statistics */
196 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
197 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
198
199 /* Memory for patch lookup trees. */
200 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
201 AssertRCReturn(rc, rc);
202 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
203
204#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
205 /* Check CFGM option. */
206 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
207 if (RT_FAILURE(rc))
208# ifdef PATM_DISABLE_ALL
209 pVM->fPATMEnabled = false;
210# else
211 pVM->fPATMEnabled = true;
212# endif
213#endif
214
215 rc = patmReinit(pVM);
216 AssertRC(rc);
217 if (RT_FAILURE(rc))
218 return rc;
219
220 /*
221 * Register the virtual page access handler type.
222 */
223 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_ALL, false /*fRelocUserRC*/,
224 NULL /*pfnInvalidateR3*/,
225 patmVirtPageHandler,
226 "patmVirtPageHandler", "patmRCVirtPagePfHandler",
227 "PATMMonitorPatchJump", &pVM->patm.s.hMonitorPageType);
228 AssertRCReturn(rc, rc);
229
230 /*
231 * Register save and load state notifiers.
232 */
233 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SAVED_STATE_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
234 NULL, NULL, NULL,
235 NULL, patmR3Save, NULL,
236 NULL, patmR3Load, NULL);
237 AssertRCReturn(rc, rc);
238
239#ifdef VBOX_WITH_DEBUGGER
240 /*
241 * Debugger commands.
242 */
243 static bool s_fRegisteredCmds = false;
244 if (!s_fRegisteredCmds)
245 {
246 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
247 if (RT_SUCCESS(rc2))
248 s_fRegisteredCmds = true;
249 }
250#endif
251
252#ifdef VBOX_WITH_STATISTICS
253 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
254 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
255 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
256 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
257 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
258 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
259 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
260 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
261
262 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
263 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
264
265 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
266 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
267 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
268
269 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
270 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
271 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
272 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
273 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
274
275 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
276 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
277
278 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
279 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
280
281 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
282 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
283 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
284
285 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
286 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
287 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
288
289 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
290 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
291
292 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
293 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
294 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
295 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
296
297 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
298 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
299
300 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
301 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
302
303 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
304 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
305 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
306
307 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
308 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
309 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
310 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
311
312 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
313 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
314 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
315 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
316 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
317
318 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
319#endif /* VBOX_WITH_STATISTICS */
320
321 Log(("g_patmCallRecord.cbFunction %u\n", g_patmCallRecord.cbFunction));
322 Log(("g_patmCallIndirectRecord.cbFunction %u\n", g_patmCallIndirectRecord.cbFunction));
323 Log(("g_patmRetRecord.cbFunction %u\n", g_patmRetRecord.cbFunction));
324 Log(("g_patmJumpIndirectRecord.cbFunction %u\n", g_patmJumpIndirectRecord.cbFunction));
325 Log(("g_patmPopf32Record.cbFunction %u\n", g_patmPopf32Record.cbFunction));
326 Log(("g_patmIretRecord.cbFunction %u\n", g_patmIretRecord.cbFunction));
327 Log(("g_patmStiRecord.cbFunction %u\n", g_patmStiRecord.cbFunction));
328 Log(("g_patmCheckIFRecord.cbFunction %u\n", g_patmCheckIFRecord.cbFunction));
329
330 return rc;
331}
332
333/**
334 * Finalizes HMA page attributes.
335 *
336 * @returns VBox status code.
337 * @param pVM Pointer to the VM.
338 */
339VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
340{
341 if (HMIsEnabled(pVM))
342 return VINF_SUCCESS;
343
344 /*
345 * The GC state, stack and statistics must be read/write for the guest
346 * (supervisor only of course).
347 *
348 * Remember, we run guest code at ring-1 and ring-2 levels, which are
349 * considered supervisor levels by the paging structures. We run the VMM
350 * in ring-0 with CR0.WP=0 and mapping all VMM structures as read-only
351 * pages. The following structures are exceptions and must be mapped with
352 * write access so the ring-1 and ring-2 code can modify them.
353 */
354 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
355 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCState accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
356
357 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
358 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCStack accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
359
360 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
361 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the stats struct accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
362
363 /*
364 * Find the patch helper segment so we can identify code running there as patch code.
365 */
366 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpBegin", &pVM->patm.s.pbPatchHelpersRC);
367 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpBegin: %Rrc\n", rc), rc);
368 pVM->patm.s.pbPatchHelpersR3 = (uint8_t *)MMHyperRCToR3(pVM, pVM->patm.s.pbPatchHelpersRC);
369 AssertLogRelReturn(pVM->patm.s.pbPatchHelpersR3 != NULL, VERR_INTERNAL_ERROR_3);
370
371 RTRCPTR RCPtrEnd;
372 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpEnd", &RCPtrEnd);
373 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpEnd: %Rrc\n", rc), rc);
374
375 pVM->patm.s.cbPatchHelpers = RCPtrEnd - pVM->patm.s.pbPatchHelpersRC;
376 AssertLogRelMsgReturn(pVM->patm.s.cbPatchHelpers < _128K,
377 ("%RRv-%RRv => %#x\n", pVM->patm.s.pbPatchHelpersRC, RCPtrEnd, pVM->patm.s.cbPatchHelpers),
378 VERR_INTERNAL_ERROR_4);
379
380
381 return VINF_SUCCESS;
382}
383
384/**
385 * (Re)initializes PATM
386 *
387 * @param pVM The VM.
388 */
389static int patmReinit(PVM pVM)
390{
391 int rc;
392
393 /*
394 * Assert alignment and sizes.
395 */
396 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
397 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
398
399 /*
400 * Setup any fixed pointers and offsets.
401 */
402 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
403
404#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
405#ifndef PATM_DISABLE_ALL
406 pVM->fPATMEnabled = true;
407#endif
408#endif
409
410 Assert(pVM->patm.s.pGCStateHC);
411 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
412 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
413
414 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
415 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
416
417 Assert(pVM->patm.s.pGCStackHC);
418 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
419 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
420 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
421 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
422
423 Assert(pVM->patm.s.pStatsHC);
424 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
425 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
426
427 Assert(pVM->patm.s.pPatchMemHC);
428 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
429 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
430 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
431
432 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
433 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
434
435 Assert(pVM->patm.s.PatchLookupTreeHC);
436 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
437
438 /*
439 * (Re)Initialize PATM structure
440 */
441 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
442 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
443 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
444 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
445 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
446 pVM->patm.s.pvFaultMonitor = 0;
447 pVM->patm.s.deltaReloc = 0;
448
449 /* Lowest and highest patched instruction */
450 pVM->patm.s.pPatchedInstrGCLowest = ~0;
451 pVM->patm.s.pPatchedInstrGCHighest = 0;
452
453 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
454 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
455 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
456
457 pVM->patm.s.pfnSysEnterPatchGC = 0;
458 pVM->patm.s.pfnSysEnterGC = 0;
459
460 pVM->patm.s.fOutOfMemory = false;
461
462 pVM->patm.s.pfnHelperCallGC = 0;
463 patmR3DbgReset(pVM);
464
465 /* Generate all global functions to be used by future patches. */
466 /* We generate a fake patch in order to use the existing code for relocation. */
467 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
468 if (RT_FAILURE(rc))
469 {
470 Log(("Out of memory!!!!\n"));
471 return VERR_NO_MEMORY;
472 }
473 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
474 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
475 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
476
477 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
478 AssertRC(rc);
479
480 /* Update free pointer in patch memory. */
481 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
482 /* Round to next 8 byte boundary. */
483 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
484
485
486 return rc;
487}
488
489
490/**
491 * Applies relocations to data and code managed by this
492 * component. This function will be called at init and
493 * whenever the VMM need to relocate it self inside the GC.
494 *
495 * The PATM will update the addresses used by the switcher.
496 *
497 * @param pVM The VM.
498 * @param offDelta The relocation delta.
499 */
500VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM, RTRCINTPTR offDelta)
501{
502 if (HMIsEnabled(pVM))
503 return;
504
505 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
506 Assert((RTRCINTPTR)(GCPtrNew - pVM->patm.s.pGCStateGC) == offDelta);
507
508 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, offDelta));
509 if (offDelta)
510 {
511 PCPUMCTX pCtx;
512
513 /* Update CPUMCTX guest context pointer. */
514 pVM->patm.s.pCPUMCtxGC += offDelta;
515
516 pVM->patm.s.deltaReloc = offDelta;
517 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmR3RelocatePatches, (void *)pVM);
518
519 pVM->patm.s.pGCStateGC = GCPtrNew;
520 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
521 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
522 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
523 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
524
525 if (pVM->patm.s.pfnSysEnterPatchGC)
526 pVM->patm.s.pfnSysEnterPatchGC += offDelta;
527
528 /* If we are running patch code right now, then also adjust EIP. */
529 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
530 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
531 pCtx->eip += offDelta;
532
533 /* Deal with the global patch functions. */
534 pVM->patm.s.pfnHelperCallGC += offDelta;
535 pVM->patm.s.pfnHelperRetGC += offDelta;
536 pVM->patm.s.pfnHelperIretGC += offDelta;
537 pVM->patm.s.pfnHelperJumpGC += offDelta;
538
539 pVM->patm.s.pbPatchHelpersRC += offDelta;
540
541 patmR3RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
542 }
543}
544
545
546/**
547 * Terminates the PATM.
548 *
549 * Termination means cleaning up and freeing all resources,
550 * the VM it self is at this point powered off or suspended.
551 *
552 * @returns VBox status code.
553 * @param pVM Pointer to the VM.
554 */
555VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
556{
557 if (HMIsEnabled(pVM))
558 return VINF_SUCCESS;
559
560 patmR3DbgTerm(pVM);
561
562 /* Memory was all allocated from the two MM heaps and requires no freeing. */
563 return VINF_SUCCESS;
564}
565
566
567/**
568 * PATM reset callback.
569 *
570 * @returns VBox status code.
571 * @param pVM The VM which is reset.
572 */
573VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
574{
575 Log(("PATMR3Reset\n"));
576 if (HMIsEnabled(pVM))
577 return VINF_SUCCESS;
578
579 /* Free all patches. */
580 for (;;)
581 {
582 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
583 if (pPatchRec)
584 patmR3RemovePatch(pVM, pPatchRec, true);
585 else
586 break;
587 }
588 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
589 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
590 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
591 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
592
593 int rc = patmReinit(pVM);
594 if (RT_SUCCESS(rc))
595 rc = PATMR3InitFinalize(pVM); /* paranoia */
596
597 return rc;
598}
599
600/**
601 * @callback_method_impl{FNDISREADBYTES}
602 */
603static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
604{
605 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
606
607/** @todo change this to read more! */
608 /*
609 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
610 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
611 */
612 /** @todo could change in the future! */
613 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
614 {
615 size_t cbRead = cbMaxRead;
616 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
617 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
618 if (RT_SUCCESS(rc))
619 {
620 if (cbRead >= cbMinRead)
621 {
622 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
623 return VINF_SUCCESS;
624 }
625
626 cbMinRead -= (uint8_t)cbRead;
627 cbMaxRead -= (uint8_t)cbRead;
628 offInstr += (uint8_t)cbRead;
629 uSrcAddr += cbRead;
630 }
631
632#ifdef VBOX_STRICT
633 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
634 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
635 {
636 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
637 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
638 }
639#endif
640 }
641
642 int rc = VINF_SUCCESS;
643 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
644 if ( !pDisInfo->pbInstrHC
645 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
646 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
647 {
648 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
649 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
650 offInstr += cbMinRead;
651 }
652 else
653 {
654 /*
655 * pbInstrHC is the base address; adjust according to the GC pointer.
656 *
657 * Try read the max number of bytes here. Since the disassembler only
658 * ever uses these bytes for the current instruction, it doesn't matter
659 * much if we accidentally read the start of the next instruction even
660 * if it happens to be a patch jump or int3.
661 */
662 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
663 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
664
665 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
666 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
667 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
668 if (cbToRead > cbMaxRead)
669 cbToRead = cbMaxRead;
670
671 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
672 offInstr += (uint8_t)cbToRead;
673 }
674
675 pDis->cbCachedInstr = offInstr;
676 return rc;
677}
678
679
680DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
681 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
682{
683 PATMDISASM disinfo;
684 disinfo.pVM = pVM;
685 disinfo.pPatchInfo = pPatch;
686 disinfo.pbInstrHC = pbInstrHC;
687 disinfo.pInstrGC = InstrGCPtr32;
688 disinfo.fReadFlags = fReadFlags;
689 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
690 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
691 patmReadBytes, &disinfo,
692 pCpu, pcbInstr, pszOutput, cbOutput));
693}
694
695
696DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
697 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
698{
699 PATMDISASM disinfo;
700 disinfo.pVM = pVM;
701 disinfo.pPatchInfo = pPatch;
702 disinfo.pbInstrHC = pbInstrHC;
703 disinfo.pInstrGC = InstrGCPtr32;
704 disinfo.fReadFlags = fReadFlags;
705 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
706 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
707 patmReadBytes, &disinfo,
708 pCpu, pcbInstr));
709}
710
711
712DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
713 uint32_t fReadFlags,
714 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
715{
716 PATMDISASM disinfo;
717 disinfo.pVM = pVM;
718 disinfo.pPatchInfo = pPatch;
719 disinfo.pbInstrHC = pbInstrHC;
720 disinfo.pInstrGC = InstrGCPtr32;
721 disinfo.fReadFlags = fReadFlags;
722 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
723 pCpu, pcbInstr));
724}
725
726#ifdef LOG_ENABLED
727# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
728 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
729# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
730 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
731
732# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
733 do { \
734 if (LogIsEnabled()) \
735 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
736 } while (0)
737
738static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
739 const char *pszComment1, const char *pszComment2)
740{
741 DISCPUSTATE DisState;
742 char szOutput[128];
743 szOutput[0] = '\0';
744 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
745 &DisState, NULL, szOutput, sizeof(szOutput));
746 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
747}
748
749#else
750# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
751# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
752# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
753#endif
754
755
756/**
757 * Callback function for RTAvloU32DoWithAll
758 *
759 * Updates all fixups in the patches
760 *
761 * @returns VBox status code.
762 * @param pNode Current node
763 * @param pParam Pointer to the VM.
764 */
765static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
766{
767 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
768 PVM pVM = (PVM)pParam;
769 RTRCINTPTR delta;
770 int rc;
771
772 /* Nothing to do if the patch is not active. */
773 if (pPatch->patch.uState == PATCH_REFUSED)
774 return 0;
775
776 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
777 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
778
779 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
780 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
781
782 /*
783 * Apply fixups.
784 */
785 AVLPVKEY key = NULL;
786 for (;;)
787 {
788 /* Get the record that's closest from above (after or equal to key). */
789 PRELOCREC pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
790 if (!pRec)
791 break;
792
793 key = (uint8_t *)pRec->Core.Key + 1; /* search for the next record during the next round. */
794
795 switch (pRec->uType)
796 {
797 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
798 Assert(pRec->pDest == pRec->pSource); Assert(PATM_IS_ASMFIX(pRec->pSource));
799 Log(("Absolute patch template fixup type %#x at %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
800 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
801 break;
802
803 case FIXUP_ABSOLUTE:
804 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
805 if ( !pRec->pSource
806 || PATMIsPatchGCAddr(pVM, pRec->pSource))
807 {
808 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
809 }
810 else
811 {
812 uint8_t curInstr[15];
813 uint8_t oldInstr[15];
814 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
815
816 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
817
818 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
819 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
820
821 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
822 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
823
824 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
825
826 if ( rc == VERR_PAGE_NOT_PRESENT
827 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
828 {
829 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
830
831 Log(("PATM: Patch page not present -> check later!\n"));
832 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
833 pPage,
834 pPage + (PAGE_SIZE - 1) /* inclusive! */,
835 (void *)(uintptr_t)pPage, pPage, NULL /*pszDesc*/);
836 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
837 }
838 else
839 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
840 {
841 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
842 /*
843 * Disable patch; this is not a good solution
844 */
845 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
846 pPatch->patch.uState = PATCH_DISABLED;
847 }
848 else
849 if (RT_SUCCESS(rc))
850 {
851 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
852 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
853 AssertRC(rc);
854 }
855 }
856 break;
857
858 case FIXUP_REL_JMPTOPATCH:
859 {
860 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
861
862 if ( pPatch->patch.uState == PATCH_ENABLED
863 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
864 {
865 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
866 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
867 RTRCPTR pJumpOffGC;
868 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
869 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
870
871#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
872 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
873#else
874 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
875#endif
876
877 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
878#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
879 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
880 {
881 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
882
883 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
884 oldJump[0] = pPatch->patch.aPrivInstr[0];
885 oldJump[1] = pPatch->patch.aPrivInstr[1];
886 *(RTRCUINTPTR *)&oldJump[2] = displOld;
887 }
888 else
889#endif
890 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
891 {
892 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
893 oldJump[0] = 0xE9;
894 *(RTRCUINTPTR *)&oldJump[1] = displOld;
895 }
896 else
897 {
898 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
899 continue; //this should never happen!!
900 }
901 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
902
903 /*
904 * Read old patch jump and compare it to the one we previously installed
905 */
906 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
907 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
908
909 if ( rc == VERR_PAGE_NOT_PRESENT
910 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
911 {
912 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
913 Log(("PATM: Patch page not present -> check later!\n"));
914 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
915 pPage,
916 pPage + (PAGE_SIZE - 1) /* inclusive! */,
917 (void *)(uintptr_t)pPage, pPage, NULL /*pszDesc*/);
918 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
919 }
920 else
921 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
922 {
923 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
924 /*
925 * Disable patch; this is not a good solution
926 */
927 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
928 pPatch->patch.uState = PATCH_DISABLED;
929 }
930 else
931 if (RT_SUCCESS(rc))
932 {
933 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
934 AssertRC(rc);
935 }
936 else
937 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
938 }
939 else
940 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
941
942 pRec->pDest = pTarget;
943 break;
944 }
945
946 case FIXUP_REL_JMPTOGUEST:
947 {
948 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
949 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
950
951 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
952 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
953 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
954 pRec->pSource = pSource;
955 break;
956 }
957
958 case FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL:
959 case FIXUP_CONSTANT_IN_PATCH_ASM_TMPL:
960 /* Only applicable when loading state. */
961 Assert(pRec->pDest == pRec->pSource);
962 Assert(PATM_IS_ASMFIX(pRec->pSource));
963 break;
964
965 default:
966 AssertMsg(0, ("Invalid fixup type!!\n"));
967 return VERR_INVALID_PARAMETER;
968 }
969 }
970
971 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
972 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
973 return 0;
974}
975
976#ifdef VBOX_WITH_DEBUGGER
977
978/**
979 * Callback function for RTAvloU32DoWithAll
980 *
981 * Enables the patch that's being enumerated
982 *
983 * @returns 0 (continue enumeration).
984 * @param pNode Current node
985 * @param pVM Pointer to the VM.
986 */
987static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
988{
989 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
990
991 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
992 return 0;
993}
994
995
996/**
997 * Callback function for RTAvloU32DoWithAll
998 *
999 * Disables the patch that's being enumerated
1000 *
1001 * @returns 0 (continue enumeration).
1002 * @param pNode Current node
1003 * @param pVM Pointer to the VM.
1004 */
1005static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
1006{
1007 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
1008
1009 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
1010 return 0;
1011}
1012
1013#endif /* VBOX_WITH_DEBUGGER */
1014
1015/**
1016 * Returns the host context pointer of the GC context structure
1017 *
1018 * @returns VBox status code.
1019 * @param pVM Pointer to the VM.
1020 */
1021VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1022{
1023 AssertReturn(!HMIsEnabled(pVM), NULL);
1024 return pVM->patm.s.pGCStateHC;
1025}
1026
1027
1028/**
1029 * Allows or disallow patching of privileged instructions executed by the guest OS
1030 *
1031 * @returns VBox status code.
1032 * @param pUVM The user mode VM handle.
1033 * @param fAllowPatching Allow/disallow patching
1034 */
1035VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1036{
1037 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1038 PVM pVM = pUVM->pVM;
1039 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1040
1041 if (!HMIsEnabled(pVM))
1042 pVM->fPATMEnabled = fAllowPatching;
1043 else
1044 Assert(!pVM->fPATMEnabled);
1045 return VINF_SUCCESS;
1046}
1047
1048
1049/**
1050 * Checks if the patch manager is enabled or not.
1051 *
1052 * @returns true if enabled, false if not (or if invalid handle).
1053 * @param pUVM The user mode VM handle.
1054 */
1055VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1056{
1057 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1058 PVM pVM = pUVM->pVM;
1059 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1060 return PATMIsEnabled(pVM);
1061}
1062
1063
1064/**
1065 * Convert a GC patch block pointer to a HC patch pointer
1066 *
1067 * @returns HC pointer or NULL if it's not a GC patch pointer
1068 * @param pVM Pointer to the VM.
1069 * @param pAddrGC GC pointer
1070 */
1071VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1072{
1073 AssertReturn(!HMIsEnabled(pVM), NULL);
1074 RTRCUINTPTR offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1075 if (offPatch >= pVM->patm.s.cbPatchMem)
1076 {
1077 offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC;
1078 if (offPatch >= pVM->patm.s.cbPatchHelpers)
1079 return NULL;
1080 return pVM->patm.s.pbPatchHelpersR3 + offPatch;
1081 }
1082 return pVM->patm.s.pPatchMemHC + offPatch;
1083}
1084
1085
1086/**
1087 * Convert guest context address to host context pointer
1088 *
1089 * @returns VBox status code.
1090 * @param pVM Pointer to the VM.
1091 * @param pCacheRec Address conversion cache record
1092 * @param pGCPtr Guest context pointer
1093 *
1094 * @returns Host context pointer or NULL in case of an error
1095 *
1096 */
1097R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1098{
1099 int rc;
1100 R3PTRTYPE(uint8_t *) pHCPtr;
1101 uint32_t offset;
1102
1103 offset = (RTRCUINTPTR)pGCPtr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1104 if (offset < pVM->patm.s.cbPatchMem)
1105 {
1106#ifdef VBOX_STRICT
1107 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1108 Assert(pPatch); Assert(offset - pPatch->pPatchBlockOffset < pPatch->cbPatchBlockSize);
1109#endif
1110 return pVM->patm.s.pPatchMemHC + offset;
1111 }
1112 /* Note! We're _not_ including the patch helpers here. */
1113
1114 offset = pGCPtr & PAGE_OFFSET_MASK;
1115 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1116 return pCacheRec->pPageLocStartHC + offset;
1117
1118 /* Release previous lock if any. */
1119 if (pCacheRec->Lock.pvMap)
1120 {
1121 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1122 pCacheRec->Lock.pvMap = NULL;
1123 }
1124
1125 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1126 if (rc != VINF_SUCCESS)
1127 {
1128 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1129 return NULL;
1130 }
1131 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1132 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1133 return pHCPtr;
1134}
1135
1136
1137/**
1138 * Calculates and fills in all branch targets
1139 *
1140 * @returns VBox status code.
1141 * @param pVM Pointer to the VM.
1142 * @param pPatch Current patch block pointer
1143 *
1144 */
1145static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1146{
1147 int32_t displ;
1148
1149 PJUMPREC pRec = 0;
1150 unsigned nrJumpRecs = 0;
1151
1152 /*
1153 * Set all branch targets inside the patch block.
1154 * We remove all jump records as they are no longer needed afterwards.
1155 */
1156 while (true)
1157 {
1158 RCPTRTYPE(uint8_t *) pInstrGC;
1159 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1160
1161 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1162 if (pRec == 0)
1163 break;
1164
1165 nrJumpRecs++;
1166
1167 /* HC in patch block to GC in patch block. */
1168 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1169
1170 if (pRec->opcode == OP_CALL)
1171 {
1172 /* Special case: call function replacement patch from this patch block.
1173 */
1174 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1175 if (!pFunctionRec)
1176 {
1177 int rc;
1178
1179 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1180 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1181 else
1182 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1183
1184 if (RT_FAILURE(rc))
1185 {
1186 uint8_t *pPatchHC;
1187 RTRCPTR pPatchGC;
1188 RTRCPTR pOrgInstrGC;
1189
1190 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1191 Assert(pOrgInstrGC);
1192
1193 /* Failure for some reason -> mark exit point with int 3. */
1194 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1195
1196 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1197 Assert(pPatchGC);
1198
1199 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1200
1201 /* Set a breakpoint at the very beginning of the recompiled instruction */
1202 *pPatchHC = 0xCC;
1203
1204 continue;
1205 }
1206 }
1207 else
1208 {
1209 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1210 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1211 }
1212
1213 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1214 }
1215 else
1216 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1217
1218 if (pBranchTargetGC == 0)
1219 {
1220 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1221 return VERR_PATCHING_REFUSED;
1222 }
1223 /* Our jumps *always* have a dword displacement (to make things easier). */
1224 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1225 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1226 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1227 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1228 }
1229 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1230 Assert(pPatch->JumpTree == 0);
1231 return VINF_SUCCESS;
1232}
1233
1234/**
1235 * Add an illegal instruction record
1236 *
1237 * @param pVM Pointer to the VM.
1238 * @param pPatch Patch structure ptr
1239 * @param pInstrGC Guest context pointer to privileged instruction
1240 *
1241 */
1242static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1243{
1244 PAVLPVNODECORE pRec;
1245
1246 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1247 Assert(pRec);
1248 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1249
1250 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1251 Assert(ret); NOREF(ret);
1252 pPatch->pTempInfo->nrIllegalInstr++;
1253}
1254
1255static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1256{
1257 PAVLPVNODECORE pRec;
1258
1259 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1260 if (pRec)
1261 return true;
1262 else
1263 return false;
1264}
1265
1266/**
1267 * Add a patch to guest lookup record
1268 *
1269 * @param pVM Pointer to the VM.
1270 * @param pPatch Patch structure ptr
1271 * @param pPatchInstrHC Guest context pointer to patch block
1272 * @param pInstrGC Guest context pointer to privileged instruction
1273 * @param enmType Lookup type
1274 * @param fDirty Dirty flag
1275 *
1276 * @note Be extremely careful with this function. Make absolutely sure the guest
1277 * address is correct! (to avoid executing instructions twice!)
1278 */
1279void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1280{
1281 bool ret;
1282 PRECPATCHTOGUEST pPatchToGuestRec;
1283 PRECGUESTTOPATCH pGuestToPatchRec;
1284 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1285
1286 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1287 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1288
1289 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1290 {
1291 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1292 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1293 return; /* already there */
1294
1295 Assert(!pPatchToGuestRec);
1296 }
1297#ifdef VBOX_STRICT
1298 else
1299 {
1300 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1301 Assert(!pPatchToGuestRec);
1302 }
1303#endif
1304
1305 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1306 Assert(pPatchToGuestRec);
1307 pPatchToGuestRec->Core.Key = PatchOffset;
1308 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1309 pPatchToGuestRec->enmType = enmType;
1310 pPatchToGuestRec->fDirty = fDirty;
1311
1312 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1313 Assert(ret);
1314
1315 /* GC to patch address */
1316 if (enmType == PATM_LOOKUP_BOTHDIR)
1317 {
1318 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1319 if (!pGuestToPatchRec)
1320 {
1321 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1322 pGuestToPatchRec->Core.Key = pInstrGC;
1323 pGuestToPatchRec->PatchOffset = PatchOffset;
1324
1325 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1326 Assert(ret);
1327 }
1328 }
1329
1330 pPatch->nrPatch2GuestRecs++;
1331}
1332
1333
1334/**
1335 * Removes a patch to guest lookup record
1336 *
1337 * @param pVM Pointer to the VM.
1338 * @param pPatch Patch structure ptr
1339 * @param pPatchInstrGC Guest context pointer to patch block
1340 */
1341void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1342{
1343 PAVLU32NODECORE pNode;
1344 PAVLU32NODECORE pNode2;
1345 PRECPATCHTOGUEST pPatchToGuestRec;
1346 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1347
1348 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1349 Assert(pPatchToGuestRec);
1350 if (pPatchToGuestRec)
1351 {
1352 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1353 {
1354 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1355
1356 Assert(pGuestToPatchRec->Core.Key);
1357 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1358 Assert(pNode2);
1359 }
1360 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1361 Assert(pNode);
1362
1363 MMR3HeapFree(pPatchToGuestRec);
1364 pPatch->nrPatch2GuestRecs--;
1365 }
1366}
1367
1368
1369/**
1370 * RTAvlPVDestroy callback.
1371 */
1372static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1373{
1374 MMR3HeapFree(pNode);
1375 return 0;
1376}
1377
1378/**
1379 * Empty the specified tree (PV tree, MMR3 heap)
1380 *
1381 * @param pVM Pointer to the VM.
1382 * @param ppTree Tree to empty
1383 */
1384static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1385{
1386 NOREF(pVM);
1387 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1388}
1389
1390
1391/**
1392 * RTAvlU32Destroy callback.
1393 */
1394static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1395{
1396 MMR3HeapFree(pNode);
1397 return 0;
1398}
1399
1400/**
1401 * Empty the specified tree (U32 tree, MMR3 heap)
1402 *
1403 * @param pVM Pointer to the VM.
1404 * @param ppTree Tree to empty
1405 */
1406static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1407{
1408 NOREF(pVM);
1409 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1410}
1411
1412
1413/**
1414 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1415 *
1416 * @returns VBox status code.
1417 * @param pVM Pointer to the VM.
1418 * @param pCpu CPU disassembly state
1419 * @param pInstrGC Guest context pointer to privileged instruction
1420 * @param pCurInstrGC Guest context pointer to the current instruction
1421 * @param pCacheRec Cache record ptr
1422 *
1423 */
1424static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1425{
1426 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1427 bool fIllegalInstr = false;
1428
1429 /*
1430 * Preliminary heuristics:
1431 *- no call instructions without a fixed displacement between cli and sti/popf
1432 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1433 *- no nested pushf/cli
1434 *- sti/popf should be the (eventual) target of all branches
1435 *- no near or far returns; no int xx, no into
1436 *
1437 * Note: Later on we can impose less stricter guidelines if the need arises
1438 */
1439
1440 /* Bail out if the patch gets too big. */
1441 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1442 {
1443 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1444 fIllegalInstr = true;
1445 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1446 }
1447 else
1448 {
1449 /* No unconditional jumps or calls without fixed displacements. */
1450 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1451 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1452 )
1453 {
1454 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1455 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1456 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1457 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1458 )
1459 {
1460 fIllegalInstr = true;
1461 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1462 }
1463 }
1464
1465 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1466 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1467 {
1468 if ( pCurInstrGC > pPatch->pPrivInstrGC
1469 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1470 {
1471 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1472 /* We turn this one into a int 3 callable patch. */
1473 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1474 }
1475 }
1476 else
1477 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1478 if (pPatch->opcode == OP_PUSHF)
1479 {
1480 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1481 {
1482 fIllegalInstr = true;
1483 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1484 }
1485 }
1486
1487 /* no far returns */
1488 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1489 {
1490 pPatch->pTempInfo->nrRetInstr++;
1491 fIllegalInstr = true;
1492 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1493 }
1494 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1495 || pCpu->pCurInstr->uOpcode == OP_INT
1496 || pCpu->pCurInstr->uOpcode == OP_INTO)
1497 {
1498 /* No int xx or into either. */
1499 fIllegalInstr = true;
1500 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1501 }
1502 }
1503
1504 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1505
1506 /* Illegal instruction -> end of analysis phase for this code block */
1507 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1508 return VINF_SUCCESS;
1509
1510 /* Check for exit points. */
1511 switch (pCpu->pCurInstr->uOpcode)
1512 {
1513 case OP_SYSEXIT:
1514 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1515
1516 case OP_SYSENTER:
1517 case OP_ILLUD2:
1518 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1519 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1520 return VINF_SUCCESS;
1521
1522 case OP_STI:
1523 case OP_POPF:
1524 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1525 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1526 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1527 {
1528 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1529 return VERR_PATCHING_REFUSED;
1530 }
1531 if (pPatch->opcode == OP_PUSHF)
1532 {
1533 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1534 {
1535 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1536 return VINF_SUCCESS;
1537
1538 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1539 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1540 pPatch->flags |= PATMFL_CHECK_SIZE;
1541 }
1542 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1543 }
1544 /* else: fall through. */
1545 case OP_RETN: /* exit point for function replacement */
1546 return VINF_SUCCESS;
1547
1548 case OP_IRET:
1549 return VINF_SUCCESS; /* exitpoint */
1550
1551 case OP_CPUID:
1552 case OP_CALL:
1553 case OP_JMP:
1554 break;
1555
1556#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1557 case OP_STR:
1558 break;
1559#endif
1560
1561 default:
1562 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1563 {
1564 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1565 return VINF_SUCCESS; /* exit point */
1566 }
1567 break;
1568 }
1569
1570 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1571 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1572 {
1573 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1574 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1575 return VINF_SUCCESS;
1576 }
1577
1578 return VWRN_CONTINUE_ANALYSIS;
1579}
1580
1581/**
1582 * Analyses the instructions inside a function for compliance
1583 *
1584 * @returns VBox status code.
1585 * @param pVM Pointer to the VM.
1586 * @param pCpu CPU disassembly state
1587 * @param pInstrGC Guest context pointer to privileged instruction
1588 * @param pCurInstrGC Guest context pointer to the current instruction
1589 * @param pCacheRec Cache record ptr
1590 *
1591 */
1592static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1593{
1594 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1595 bool fIllegalInstr = false;
1596 NOREF(pInstrGC);
1597
1598 //Preliminary heuristics:
1599 //- no call instructions
1600 //- ret ends a block
1601
1602 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1603
1604 // bail out if the patch gets too big
1605 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1606 {
1607 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1608 fIllegalInstr = true;
1609 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1610 }
1611 else
1612 {
1613 // no unconditional jumps or calls without fixed displacements
1614 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1615 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1616 )
1617 {
1618 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1619 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1620 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1621 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1622 )
1623 {
1624 fIllegalInstr = true;
1625 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1626 }
1627 }
1628 else /* no far returns */
1629 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1630 {
1631 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1632 fIllegalInstr = true;
1633 }
1634 else /* no int xx or into either */
1635 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1636 {
1637 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1638 fIllegalInstr = true;
1639 }
1640
1641 #if 0
1642 ///@todo we can handle certain in/out and privileged instructions in the guest context
1643 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1644 {
1645 Log(("Illegal instructions for function patch!!\n"));
1646 return VERR_PATCHING_REFUSED;
1647 }
1648 #endif
1649 }
1650
1651 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1652
1653 /* Illegal instruction -> end of analysis phase for this code block */
1654 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1655 {
1656 return VINF_SUCCESS;
1657 }
1658
1659 // Check for exit points
1660 switch (pCpu->pCurInstr->uOpcode)
1661 {
1662 case OP_ILLUD2:
1663 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1664 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1665 return VINF_SUCCESS;
1666
1667 case OP_IRET:
1668 case OP_SYSEXIT: /* will fault or emulated in GC */
1669 case OP_RETN:
1670 return VINF_SUCCESS;
1671
1672#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1673 case OP_STR:
1674 break;
1675#endif
1676
1677 case OP_POPF:
1678 case OP_STI:
1679 return VWRN_CONTINUE_ANALYSIS;
1680 default:
1681 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1682 {
1683 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1684 return VINF_SUCCESS; /* exit point */
1685 }
1686 return VWRN_CONTINUE_ANALYSIS;
1687 }
1688
1689 return VWRN_CONTINUE_ANALYSIS;
1690}
1691
1692/**
1693 * Recompiles the instructions in a code block
1694 *
1695 * @returns VBox status code.
1696 * @param pVM Pointer to the VM.
1697 * @param pCpu CPU disassembly state
1698 * @param pInstrGC Guest context pointer to privileged instruction
1699 * @param pCurInstrGC Guest context pointer to the current instruction
1700 * @param pCacheRec Cache record ptr
1701 *
1702 */
1703static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1704{
1705 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1706 int rc = VINF_SUCCESS;
1707 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1708
1709 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1710
1711 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1712 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1713 {
1714 /*
1715 * Been there, done that; so insert a jump (we don't want to duplicate code)
1716 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1717 */
1718 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1719 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1720 }
1721
1722 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1723 {
1724 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1725 }
1726 else
1727 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1728
1729 if (RT_FAILURE(rc))
1730 return rc;
1731
1732 /* Note: Never do a direct return unless a failure is encountered! */
1733
1734 /* Clear recompilation of next instruction flag; we are doing that right here. */
1735 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1736 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1737
1738 /* Add lookup record for patch to guest address translation */
1739 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1740
1741 /* Update lowest and highest instruction address for this patch */
1742 if (pCurInstrGC < pPatch->pInstrGCLowest)
1743 pPatch->pInstrGCLowest = pCurInstrGC;
1744 else
1745 if (pCurInstrGC > pPatch->pInstrGCHighest)
1746 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1747
1748 /* Illegal instruction -> end of recompile phase for this code block. */
1749 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1750 {
1751 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1752 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1753 goto end;
1754 }
1755
1756 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1757 * Indirect calls are handled below.
1758 */
1759 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1760 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1761 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1762 {
1763 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1764 if (pTargetGC == 0)
1765 {
1766 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1767 return VERR_PATCHING_REFUSED;
1768 }
1769
1770 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1771 {
1772 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1773 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1774 if (RT_FAILURE(rc))
1775 goto end;
1776 }
1777 else
1778 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1779
1780 if (RT_SUCCESS(rc))
1781 rc = VWRN_CONTINUE_RECOMPILE;
1782
1783 goto end;
1784 }
1785
1786 switch (pCpu->pCurInstr->uOpcode)
1787 {
1788 case OP_CLI:
1789 {
1790 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1791 * until we've found the proper exit point(s).
1792 */
1793 if ( pCurInstrGC != pInstrGC
1794 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1795 )
1796 {
1797 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1798 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1799 }
1800 /* Set by irq inhibition; no longer valid now. */
1801 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1802
1803 rc = patmPatchGenCli(pVM, pPatch);
1804 if (RT_SUCCESS(rc))
1805 rc = VWRN_CONTINUE_RECOMPILE;
1806 break;
1807 }
1808
1809 case OP_MOV:
1810 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1811 {
1812 /* mov ss, src? */
1813 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1814 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1815 {
1816 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1817 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1818 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1819 }
1820#if 0 /* necessary for Haiku */
1821 else
1822 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1823 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1824 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1825 {
1826 /* mov GPR, ss */
1827 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1828 if (RT_SUCCESS(rc))
1829 rc = VWRN_CONTINUE_RECOMPILE;
1830 break;
1831 }
1832#endif
1833 }
1834 goto duplicate_instr;
1835
1836 case OP_POP:
1837 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1838 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1839 {
1840 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1841
1842 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1843 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1844 }
1845 goto duplicate_instr;
1846
1847 case OP_STI:
1848 {
1849 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1850
1851 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1852 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1853 {
1854 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1855 fInhibitIRQInstr = true;
1856 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1857 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1858 }
1859 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1860
1861 if (RT_SUCCESS(rc))
1862 {
1863 DISCPUSTATE cpu = *pCpu;
1864 unsigned cbInstr;
1865 int disret;
1866 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1867
1868 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1869
1870 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1871 { /* Force pNextInstrHC out of scope after using it */
1872 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1873 if (pNextInstrHC == NULL)
1874 {
1875 AssertFailed();
1876 return VERR_PATCHING_REFUSED;
1877 }
1878
1879 // Disassemble the next instruction
1880 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1881 }
1882 if (disret == false)
1883 {
1884 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1885 return VERR_PATCHING_REFUSED;
1886 }
1887 pReturnInstrGC = pNextInstrGC + cbInstr;
1888
1889 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1890 || pReturnInstrGC <= pInstrGC
1891 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1892 )
1893 {
1894 /* Not an exit point for function duplication patches */
1895 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1896 && RT_SUCCESS(rc))
1897 {
1898 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1899 rc = VWRN_CONTINUE_RECOMPILE;
1900 }
1901 else
1902 rc = VINF_SUCCESS; //exit point
1903 }
1904 else {
1905 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1906 rc = VERR_PATCHING_REFUSED; //not allowed!!
1907 }
1908 }
1909 break;
1910 }
1911
1912 case OP_POPF:
1913 {
1914 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1915
1916 /* Not an exit point for IDT handler or function replacement patches */
1917 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1918 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1919 fGenerateJmpBack = false;
1920
1921 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1922 if (RT_SUCCESS(rc))
1923 {
1924 if (fGenerateJmpBack == false)
1925 {
1926 /* Not an exit point for IDT handler or function replacement patches */
1927 rc = VWRN_CONTINUE_RECOMPILE;
1928 }
1929 else
1930 {
1931 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1932 rc = VINF_SUCCESS; /* exit point! */
1933 }
1934 }
1935 break;
1936 }
1937
1938 case OP_PUSHF:
1939 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1940 if (RT_SUCCESS(rc))
1941 rc = VWRN_CONTINUE_RECOMPILE;
1942 break;
1943
1944 case OP_PUSH:
1945 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1946 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1947 {
1948 rc = patmPatchGenPushCS(pVM, pPatch);
1949 if (RT_SUCCESS(rc))
1950 rc = VWRN_CONTINUE_RECOMPILE;
1951 break;
1952 }
1953 goto duplicate_instr;
1954
1955 case OP_IRET:
1956 Log(("IRET at %RRv\n", pCurInstrGC));
1957 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1958 if (RT_SUCCESS(rc))
1959 {
1960 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1961 rc = VINF_SUCCESS; /* exit point by definition */
1962 }
1963 break;
1964
1965 case OP_ILLUD2:
1966 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1967 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1968 if (RT_SUCCESS(rc))
1969 rc = VINF_SUCCESS; /* exit point by definition */
1970 Log(("Illegal opcode (0xf 0xb)\n"));
1971 break;
1972
1973 case OP_CPUID:
1974 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1975 if (RT_SUCCESS(rc))
1976 rc = VWRN_CONTINUE_RECOMPILE;
1977 break;
1978
1979 case OP_STR:
1980#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
1981 /* Now safe because our shadow TR entry is identical to the guest's. */
1982 goto duplicate_instr;
1983#endif
1984 case OP_SLDT:
1985 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1986 if (RT_SUCCESS(rc))
1987 rc = VWRN_CONTINUE_RECOMPILE;
1988 break;
1989
1990 case OP_SGDT:
1991 case OP_SIDT:
1992 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1993 if (RT_SUCCESS(rc))
1994 rc = VWRN_CONTINUE_RECOMPILE;
1995 break;
1996
1997 case OP_RETN:
1998 /* retn is an exit point for function patches */
1999 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
2000 if (RT_SUCCESS(rc))
2001 rc = VINF_SUCCESS; /* exit point by definition */
2002 break;
2003
2004 case OP_SYSEXIT:
2005 /* Duplicate it, so it can be emulated in GC (or fault). */
2006 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2007 if (RT_SUCCESS(rc))
2008 rc = VINF_SUCCESS; /* exit point by definition */
2009 break;
2010
2011 case OP_CALL:
2012 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2013 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2014 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2015 */
2016 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2017 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
2018 {
2019 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2020 if (RT_SUCCESS(rc))
2021 {
2022 rc = VWRN_CONTINUE_RECOMPILE;
2023 }
2024 break;
2025 }
2026 goto gen_illegal_instr;
2027
2028 case OP_JMP:
2029 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2030 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2031 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2032 */
2033 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2034 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2035 {
2036 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2037 if (RT_SUCCESS(rc))
2038 rc = VINF_SUCCESS; /* end of branch */
2039 break;
2040 }
2041 goto gen_illegal_instr;
2042
2043 case OP_INT3:
2044 case OP_INT:
2045 case OP_INTO:
2046 goto gen_illegal_instr;
2047
2048 case OP_MOV_DR:
2049 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2050 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2051 {
2052 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2053 if (RT_SUCCESS(rc))
2054 rc = VWRN_CONTINUE_RECOMPILE;
2055 break;
2056 }
2057 goto duplicate_instr;
2058
2059 case OP_MOV_CR:
2060 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2061 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2062 {
2063 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2064 if (RT_SUCCESS(rc))
2065 rc = VWRN_CONTINUE_RECOMPILE;
2066 break;
2067 }
2068 goto duplicate_instr;
2069
2070 default:
2071 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2072 {
2073gen_illegal_instr:
2074 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2075 if (RT_SUCCESS(rc))
2076 rc = VINF_SUCCESS; /* exit point by definition */
2077 }
2078 else
2079 {
2080duplicate_instr:
2081 Log(("patmPatchGenDuplicate\n"));
2082 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2083 if (RT_SUCCESS(rc))
2084 rc = VWRN_CONTINUE_RECOMPILE;
2085 }
2086 break;
2087 }
2088
2089end:
2090
2091 if ( !fInhibitIRQInstr
2092 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2093 {
2094 int rc2;
2095 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2096
2097 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2098 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2099 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2100 {
2101 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2102
2103 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2104 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2105 rc = VINF_SUCCESS; /* end of the line */
2106 }
2107 else
2108 {
2109 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2110 }
2111 if (RT_FAILURE(rc2))
2112 rc = rc2;
2113 }
2114
2115 if (RT_SUCCESS(rc))
2116 {
2117 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2118 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2119 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2120 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2121 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2122 )
2123 {
2124 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2125
2126 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2127 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2128
2129 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2130 AssertRC(rc);
2131 }
2132 }
2133 return rc;
2134}
2135
2136
2137#ifdef LOG_ENABLED
2138
2139/**
2140 * Add a disasm jump record (temporary for prevent duplicate analysis)
2141 *
2142 * @param pVM Pointer to the VM.
2143 * @param pPatch Patch structure ptr
2144 * @param pInstrGC Guest context pointer to privileged instruction
2145 *
2146 */
2147static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2148{
2149 PAVLPVNODECORE pRec;
2150
2151 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2152 Assert(pRec);
2153 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2154
2155 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2156 Assert(ret);
2157}
2158
2159/**
2160 * Checks if jump target has been analysed before.
2161 *
2162 * @returns VBox status code.
2163 * @param pPatch Patch struct
2164 * @param pInstrGC Jump target
2165 *
2166 */
2167static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2168{
2169 PAVLPVNODECORE pRec;
2170
2171 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2172 if (pRec)
2173 return true;
2174 return false;
2175}
2176
2177/**
2178 * For proper disassembly of the final patch block
2179 *
2180 * @returns VBox status code.
2181 * @param pVM Pointer to the VM.
2182 * @param pCpu CPU disassembly state
2183 * @param pInstrGC Guest context pointer to privileged instruction
2184 * @param pCurInstrGC Guest context pointer to the current instruction
2185 * @param pCacheRec Cache record ptr
2186 *
2187 */
2188int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2189{
2190 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2191 NOREF(pInstrGC);
2192
2193 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2194 {
2195 /* Could be an int3 inserted in a call patch. Check to be sure */
2196 DISCPUSTATE cpu;
2197 RTRCPTR pOrgJumpGC;
2198
2199 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2200
2201 { /* Force pOrgJumpHC out of scope after using it */
2202 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2203
2204 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2205 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2206 return VINF_SUCCESS;
2207 }
2208 return VWRN_CONTINUE_ANALYSIS;
2209 }
2210
2211 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2212 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2213 {
2214 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2215 return VWRN_CONTINUE_ANALYSIS;
2216 }
2217
2218 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2219 || pCpu->pCurInstr->uOpcode == OP_INT
2220 || pCpu->pCurInstr->uOpcode == OP_IRET
2221 || pCpu->pCurInstr->uOpcode == OP_RETN
2222 || pCpu->pCurInstr->uOpcode == OP_RETF
2223 )
2224 {
2225 return VINF_SUCCESS;
2226 }
2227
2228 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2229 return VINF_SUCCESS;
2230
2231 return VWRN_CONTINUE_ANALYSIS;
2232}
2233
2234
2235/**
2236 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2237 *
2238 * @returns VBox status code.
2239 * @param pVM Pointer to the VM.
2240 * @param pInstrGC Guest context pointer to the initial privileged instruction
2241 * @param pCurInstrGC Guest context pointer to the current instruction
2242 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2243 * @param pCacheRec Cache record ptr
2244 *
2245 */
2246int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2247{
2248 DISCPUSTATE cpu;
2249 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2250 int rc = VWRN_CONTINUE_ANALYSIS;
2251 uint32_t cbInstr, delta;
2252 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2253 bool disret;
2254 char szOutput[256];
2255
2256 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2257
2258 /* We need this to determine branch targets (and for disassembling). */
2259 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2260
2261 while (rc == VWRN_CONTINUE_ANALYSIS)
2262 {
2263 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2264 if (pCurInstrHC == NULL)
2265 {
2266 rc = VERR_PATCHING_REFUSED;
2267 goto end;
2268 }
2269
2270 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2271 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2272 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2273 {
2274 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2275
2276 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2277 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2278 else
2279 Log(("DIS %s", szOutput));
2280
2281 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2282 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2283 {
2284 rc = VINF_SUCCESS;
2285 goto end;
2286 }
2287 }
2288 else
2289 Log(("DIS: %s", szOutput));
2290
2291 if (disret == false)
2292 {
2293 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2294 rc = VINF_SUCCESS;
2295 goto end;
2296 }
2297
2298 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2299 if (rc != VWRN_CONTINUE_ANALYSIS) {
2300 break; //done!
2301 }
2302
2303 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2304 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2305 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2306 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2307 )
2308 {
2309 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2310 RTRCPTR pOrgTargetGC;
2311
2312 if (pTargetGC == 0)
2313 {
2314 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2315 rc = VERR_PATCHING_REFUSED;
2316 break;
2317 }
2318
2319 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2320 {
2321 //jump back to guest code
2322 rc = VINF_SUCCESS;
2323 goto end;
2324 }
2325 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2326
2327 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2328 {
2329 rc = VINF_SUCCESS;
2330 goto end;
2331 }
2332
2333 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2334 {
2335 /* New jump, let's check it. */
2336 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2337
2338 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2339 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2340 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2341
2342 if (rc != VINF_SUCCESS) {
2343 break; //done!
2344 }
2345 }
2346 if (cpu.pCurInstr->uOpcode == OP_JMP)
2347 {
2348 /* Unconditional jump; return to caller. */
2349 rc = VINF_SUCCESS;
2350 goto end;
2351 }
2352
2353 rc = VWRN_CONTINUE_ANALYSIS;
2354 }
2355 pCurInstrGC += cbInstr;
2356 }
2357end:
2358 return rc;
2359}
2360
2361/**
2362 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2363 *
2364 * @returns VBox status code.
2365 * @param pVM Pointer to the VM.
2366 * @param pInstrGC Guest context pointer to the initial privileged instruction
2367 * @param pCurInstrGC Guest context pointer to the current instruction
2368 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2369 * @param pCacheRec Cache record ptr
2370 *
2371 */
2372int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2373{
2374 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2375
2376 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2377 /* Free all disasm jump records. */
2378 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2379 return rc;
2380}
2381
2382#endif /* LOG_ENABLED */
2383
2384/**
2385 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2386 * If so, this patch is permanently disabled.
2387 *
2388 * @param pVM Pointer to the VM.
2389 * @param pInstrGC Guest context pointer to instruction
2390 * @param pConflictGC Guest context pointer to check
2391 *
2392 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2393 *
2394 */
2395VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2396{
2397 AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
2398 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2399 if (pTargetPatch)
2400 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2401 return VERR_PATCH_NO_CONFLICT;
2402}
2403
2404/**
2405 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2406 *
2407 * @returns VBox status code.
2408 * @param pVM Pointer to the VM.
2409 * @param pInstrGC Guest context pointer to privileged instruction
2410 * @param pCurInstrGC Guest context pointer to the current instruction
2411 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2412 * @param pCacheRec Cache record ptr
2413 *
2414 */
2415static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2416{
2417 DISCPUSTATE cpu;
2418 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2419 int rc = VWRN_CONTINUE_ANALYSIS;
2420 uint32_t cbInstr;
2421 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2422 bool disret;
2423#ifdef LOG_ENABLED
2424 char szOutput[256];
2425#endif
2426
2427 while (rc == VWRN_CONTINUE_RECOMPILE)
2428 {
2429 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2430 if (pCurInstrHC == NULL)
2431 {
2432 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2433 goto end;
2434 }
2435#ifdef LOG_ENABLED
2436 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2437 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2438 Log(("Recompile: %s", szOutput));
2439#else
2440 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2441#endif
2442 if (disret == false)
2443 {
2444 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2445
2446 /* Add lookup record for patch to guest address translation */
2447 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2448 patmPatchGenIllegalInstr(pVM, pPatch);
2449 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2450 goto end;
2451 }
2452
2453 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2454 if (rc != VWRN_CONTINUE_RECOMPILE)
2455 {
2456 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2457 if ( rc == VINF_SUCCESS
2458 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2459 {
2460 DISCPUSTATE cpunext;
2461 uint32_t opsizenext;
2462 uint8_t *pNextInstrHC;
2463 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2464
2465 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2466
2467 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2468 * Recompile the next instruction as well
2469 */
2470 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2471 if (pNextInstrHC == NULL)
2472 {
2473 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2474 goto end;
2475 }
2476 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2477 if (disret == false)
2478 {
2479 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2480 goto end;
2481 }
2482 switch(cpunext.pCurInstr->uOpcode)
2483 {
2484 case OP_IRET: /* inhibit cleared in generated code */
2485 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2486 case OP_HLT:
2487 break; /* recompile these */
2488
2489 default:
2490 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2491 {
2492 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2493
2494 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2495 AssertRC(rc);
2496 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2497 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2498 }
2499 break;
2500 }
2501
2502 /* Note: after a cli we must continue to a proper exit point */
2503 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2504 {
2505 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2506 if (RT_SUCCESS(rc))
2507 {
2508 rc = VINF_SUCCESS;
2509 goto end;
2510 }
2511 break;
2512 }
2513 else
2514 rc = VWRN_CONTINUE_RECOMPILE;
2515 }
2516 else
2517 break; /* done! */
2518 }
2519
2520 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2521
2522
2523 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2524 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2525 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2526 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2527 )
2528 {
2529 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2530 if (addr == 0)
2531 {
2532 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2533 rc = VERR_PATCHING_REFUSED;
2534 break;
2535 }
2536
2537 Log(("Jump encountered target %RRv\n", addr));
2538
2539 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2540 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2541 {
2542 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2543 /* First we need to finish this linear code stream until the next exit point. */
2544 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2545 if (RT_FAILURE(rc))
2546 {
2547 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2548 break; //fatal error
2549 }
2550 }
2551
2552 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2553 {
2554 /* New code; let's recompile it. */
2555 Log(("patmRecompileCodeStream continue with jump\n"));
2556
2557 /*
2558 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2559 * this patch so we can continue our analysis
2560 *
2561 * We rely on CSAM to detect and resolve conflicts
2562 */
2563 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2564 if(pTargetPatch)
2565 {
2566 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2567 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2568 }
2569
2570 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2571 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2572 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2573
2574 if(pTargetPatch)
2575 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2576
2577 if (RT_FAILURE(rc))
2578 {
2579 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2580 break; //done!
2581 }
2582 }
2583 /* Always return to caller here; we're done! */
2584 rc = VINF_SUCCESS;
2585 goto end;
2586 }
2587 else
2588 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2589 {
2590 rc = VINF_SUCCESS;
2591 goto end;
2592 }
2593 pCurInstrGC += cbInstr;
2594 }
2595end:
2596 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2597 return rc;
2598}
2599
2600
2601/**
2602 * Generate the jump from guest to patch code
2603 *
2604 * @returns VBox status code.
2605 * @param pVM Pointer to the VM.
2606 * @param pPatch Patch record
2607 * @param pCacheRec Guest translation lookup cache record
2608 */
2609static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2610{
2611 uint8_t temp[8];
2612 uint8_t *pPB;
2613 int rc;
2614
2615 Assert(pPatch->cbPatchJump <= sizeof(temp));
2616 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2617
2618 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2619 Assert(pPB);
2620
2621#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2622 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2623 {
2624 Assert(pPatch->pPatchJumpDestGC);
2625
2626 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2627 {
2628 // jmp [PatchCode]
2629 if (fAddFixup)
2630 {
2631 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2632 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2633 {
2634 Log(("Relocation failed for the jump in the guest code!!\n"));
2635 return VERR_PATCHING_REFUSED;
2636 }
2637 }
2638
2639 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2640 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2641 }
2642 else
2643 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2644 {
2645 // jmp [PatchCode]
2646 if (fAddFixup)
2647 {
2648 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2649 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2650 {
2651 Log(("Relocation failed for the jump in the guest code!!\n"));
2652 return VERR_PATCHING_REFUSED;
2653 }
2654 }
2655
2656 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2657 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2658 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2659 }
2660 else
2661 {
2662 Assert(0);
2663 return VERR_PATCHING_REFUSED;
2664 }
2665 }
2666 else
2667#endif
2668 {
2669 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2670
2671 // jmp [PatchCode]
2672 if (fAddFixup)
2673 {
2674 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32,
2675 PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2676 {
2677 Log(("Relocation failed for the jump in the guest code!!\n"));
2678 return VERR_PATCHING_REFUSED;
2679 }
2680 }
2681 temp[0] = 0xE9; //jmp
2682 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2683 }
2684 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2685 AssertRC(rc);
2686
2687 if (rc == VINF_SUCCESS)
2688 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2689
2690 return rc;
2691}
2692
2693/**
2694 * Remove the jump from guest to patch code
2695 *
2696 * @returns VBox status code.
2697 * @param pVM Pointer to the VM.
2698 * @param pPatch Patch record
2699 */
2700static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2701{
2702#ifdef DEBUG
2703 DISCPUSTATE cpu;
2704 char szOutput[256];
2705 uint32_t cbInstr, i = 0;
2706 bool disret;
2707
2708 while (i < pPatch->cbPrivInstr)
2709 {
2710 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2711 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2712 if (disret == false)
2713 break;
2714
2715 Log(("Org patch jump: %s", szOutput));
2716 Assert(cbInstr);
2717 i += cbInstr;
2718 }
2719#endif
2720
2721 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2722 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2723#ifdef DEBUG
2724 if (rc == VINF_SUCCESS)
2725 {
2726 i = 0;
2727 while (i < pPatch->cbPrivInstr)
2728 {
2729 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2730 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2731 if (disret == false)
2732 break;
2733
2734 Log(("Org instr: %s", szOutput));
2735 Assert(cbInstr);
2736 i += cbInstr;
2737 }
2738 }
2739#endif
2740 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2741 return rc;
2742}
2743
2744/**
2745 * Generate the call from guest to patch code
2746 *
2747 * @returns VBox status code.
2748 * @param pVM Pointer to the VM.
2749 * @param pPatch Patch record
2750 * @param pInstrHC HC address where to insert the jump
2751 * @param pCacheRec Guest translation cache record
2752 */
2753static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2754{
2755 uint8_t temp[8];
2756 uint8_t *pPB;
2757 int rc;
2758
2759 Assert(pPatch->cbPatchJump <= sizeof(temp));
2760
2761 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2762 Assert(pPB);
2763
2764 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2765
2766 // jmp [PatchCode]
2767 if (fAddFixup)
2768 {
2769 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH,
2770 pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2771 {
2772 Log(("Relocation failed for the jump in the guest code!!\n"));
2773 return VERR_PATCHING_REFUSED;
2774 }
2775 }
2776
2777 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2778 temp[0] = pPatch->aPrivInstr[0];
2779 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2780
2781 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2782 AssertRC(rc);
2783
2784 return rc;
2785}
2786
2787
2788/**
2789 * Patch cli/sti pushf/popf instruction block at specified location
2790 *
2791 * @returns VBox status code.
2792 * @param pVM Pointer to the VM.
2793 * @param pInstrGC Guest context point to privileged instruction
2794 * @param pInstrHC Host context point to privileged instruction
2795 * @param uOpcode Instruction opcode
2796 * @param uOpSize Size of starting instruction
2797 * @param pPatchRec Patch record
2798 *
2799 * @note returns failure if patching is not allowed or possible
2800 *
2801 */
2802static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2803 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2804{
2805 PPATCHINFO pPatch = &pPatchRec->patch;
2806 int rc = VERR_PATCHING_REFUSED;
2807 uint32_t orgOffsetPatchMem = ~0;
2808 RTRCPTR pInstrStart;
2809 bool fInserted;
2810 NOREF(pInstrHC); NOREF(uOpSize);
2811
2812 /* Save original offset (in case of failures later on) */
2813 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2814 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2815
2816 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2817 switch (uOpcode)
2818 {
2819 case OP_MOV:
2820 break;
2821
2822 case OP_CLI:
2823 case OP_PUSHF:
2824 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2825 /* Note: special precautions are taken when disabling and enabling such patches. */
2826 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2827 break;
2828
2829 default:
2830 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2831 {
2832 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2833 return VERR_INVALID_PARAMETER;
2834 }
2835 }
2836
2837 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2838 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2839
2840 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2841 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2842 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2843 )
2844 {
2845 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2846 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2847 rc = VERR_PATCHING_REFUSED;
2848 goto failure;
2849 }
2850
2851 pPatch->nrPatch2GuestRecs = 0;
2852 pInstrStart = pInstrGC;
2853
2854#ifdef PATM_ENABLE_CALL
2855 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2856#endif
2857
2858 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2859 pPatch->uCurPatchOffset = 0;
2860
2861 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2862 {
2863 Assert(pPatch->flags & PATMFL_INTHANDLER);
2864
2865 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2866 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2867 if (RT_FAILURE(rc))
2868 goto failure;
2869 }
2870
2871 /***************************************************************************************************************************/
2872 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2873 /***************************************************************************************************************************/
2874#ifdef VBOX_WITH_STATISTICS
2875 if (!(pPatch->flags & PATMFL_SYSENTER))
2876 {
2877 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2878 if (RT_FAILURE(rc))
2879 goto failure;
2880 }
2881#endif
2882
2883 PATMP2GLOOKUPREC cacheRec;
2884 RT_ZERO(cacheRec);
2885 cacheRec.pPatch = pPatch;
2886
2887 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2888 /* Free leftover lock if any. */
2889 if (cacheRec.Lock.pvMap)
2890 {
2891 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2892 cacheRec.Lock.pvMap = NULL;
2893 }
2894 if (rc != VINF_SUCCESS)
2895 {
2896 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2897 goto failure;
2898 }
2899
2900 /* Calculated during analysis. */
2901 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2902 {
2903 /* Most likely cause: we encountered an illegal instruction very early on. */
2904 /** @todo could turn it into an int3 callable patch. */
2905 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2906 rc = VERR_PATCHING_REFUSED;
2907 goto failure;
2908 }
2909
2910 /* size of patch block */
2911 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2912
2913
2914 /* Update free pointer in patch memory. */
2915 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2916 /* Round to next 8 byte boundary. */
2917 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2918
2919 /*
2920 * Insert into patch to guest lookup tree
2921 */
2922 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2923 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2924 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2925 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2926 if (!fInserted)
2927 {
2928 rc = VERR_PATCHING_REFUSED;
2929 goto failure;
2930 }
2931
2932 /* Note that patmr3SetBranchTargets can install additional patches!! */
2933 rc = patmr3SetBranchTargets(pVM, pPatch);
2934 if (rc != VINF_SUCCESS)
2935 {
2936 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2937 goto failure;
2938 }
2939
2940#ifdef LOG_ENABLED
2941 Log(("Patch code ----------------------------------------------------------\n"));
2942 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2943 /* Free leftover lock if any. */
2944 if (cacheRec.Lock.pvMap)
2945 {
2946 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2947 cacheRec.Lock.pvMap = NULL;
2948 }
2949 Log(("Patch code ends -----------------------------------------------------\n"));
2950#endif
2951
2952 /* make a copy of the guest code bytes that will be overwritten */
2953 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2954
2955 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2956 AssertRC(rc);
2957
2958 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2959 {
2960 /*uint8_t bASMInt3 = 0xCC; - unused */
2961
2962 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2963 /* Replace first opcode byte with 'int 3'. */
2964 rc = patmActivateInt3Patch(pVM, pPatch);
2965 if (RT_FAILURE(rc))
2966 goto failure;
2967
2968 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2969 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2970
2971 pPatch->flags &= ~PATMFL_INSTR_HINT;
2972 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2973 }
2974 else
2975 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2976 {
2977 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2978 /* now insert a jump in the guest code */
2979 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2980 AssertRC(rc);
2981 if (RT_FAILURE(rc))
2982 goto failure;
2983
2984 }
2985
2986 patmR3DbgAddPatch(pVM, pPatchRec);
2987
2988 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
2989
2990 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2991 pPatch->pTempInfo->nrIllegalInstr = 0;
2992
2993 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2994
2995 pPatch->uState = PATCH_ENABLED;
2996 return VINF_SUCCESS;
2997
2998failure:
2999 if (pPatchRec->CoreOffset.Key)
3000 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3001
3002 patmEmptyTree(pVM, &pPatch->FixupTree);
3003 pPatch->nrFixups = 0;
3004
3005 patmEmptyTree(pVM, &pPatch->JumpTree);
3006 pPatch->nrJumpRecs = 0;
3007
3008 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3009 pPatch->pTempInfo->nrIllegalInstr = 0;
3010
3011 /* Turn this cli patch into a dummy. */
3012 pPatch->uState = PATCH_REFUSED;
3013 pPatch->pPatchBlockOffset = 0;
3014
3015 // Give back the patch memory we no longer need
3016 Assert(orgOffsetPatchMem != (uint32_t)~0);
3017 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3018
3019 return rc;
3020}
3021
3022/**
3023 * Patch IDT handler
3024 *
3025 * @returns VBox status code.
3026 * @param pVM Pointer to the VM.
3027 * @param pInstrGC Guest context point to privileged instruction
3028 * @param uOpSize Size of starting instruction
3029 * @param pPatchRec Patch record
3030 * @param pCacheRec Cache record ptr
3031 *
3032 * @note returns failure if patching is not allowed or possible
3033 *
3034 */
3035static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3036{
3037 PPATCHINFO pPatch = &pPatchRec->patch;
3038 bool disret;
3039 DISCPUSTATE cpuPush, cpuJmp;
3040 uint32_t cbInstr;
3041 RTRCPTR pCurInstrGC = pInstrGC;
3042 uint8_t *pCurInstrHC, *pInstrHC;
3043 uint32_t orgOffsetPatchMem = ~0;
3044
3045 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3046 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3047
3048 /*
3049 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3050 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3051 * condition here and only patch the common entypoint once.
3052 */
3053 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3054 Assert(disret);
3055 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3056 {
3057 RTRCPTR pJmpInstrGC;
3058 int rc;
3059 pCurInstrGC += cbInstr;
3060
3061 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3062 if ( disret
3063 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3064 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3065 )
3066 {
3067 bool fInserted;
3068 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3069 if (pJmpPatch == 0)
3070 {
3071 /* Patch it first! */
3072 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3073 if (rc != VINF_SUCCESS)
3074 goto failure;
3075 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3076 Assert(pJmpPatch);
3077 }
3078 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3079 goto failure;
3080
3081 /* save original offset (in case of failures later on) */
3082 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3083
3084 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3085 pPatch->uCurPatchOffset = 0;
3086 pPatch->nrPatch2GuestRecs = 0;
3087
3088#ifdef VBOX_WITH_STATISTICS
3089 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3090 if (RT_FAILURE(rc))
3091 goto failure;
3092#endif
3093
3094 /* Install fake cli patch (to clear the virtual IF) */
3095 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3096 if (RT_FAILURE(rc))
3097 goto failure;
3098
3099 /* Add lookup record for patch to guest address translation (for the push) */
3100 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3101
3102 /* Duplicate push. */
3103 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3104 if (RT_FAILURE(rc))
3105 goto failure;
3106
3107 /* Generate jump to common entrypoint. */
3108 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3109 if (RT_FAILURE(rc))
3110 goto failure;
3111
3112 /* size of patch block */
3113 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3114
3115 /* Update free pointer in patch memory. */
3116 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3117 /* Round to next 8 byte boundary */
3118 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3119
3120 /* There's no jump from guest to patch code. */
3121 pPatch->cbPatchJump = 0;
3122
3123
3124#ifdef LOG_ENABLED
3125 Log(("Patch code ----------------------------------------------------------\n"));
3126 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3127 Log(("Patch code ends -----------------------------------------------------\n"));
3128#endif
3129 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3130
3131 /*
3132 * Insert into patch to guest lookup tree
3133 */
3134 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3135 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3136 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3137 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3138 patmR3DbgAddPatch(pVM, pPatchRec);
3139
3140 pPatch->uState = PATCH_ENABLED;
3141
3142 return VINF_SUCCESS;
3143 }
3144 }
3145failure:
3146 /* Give back the patch memory we no longer need */
3147 if (orgOffsetPatchMem != (uint32_t)~0)
3148 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3149
3150 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3151}
3152
3153/**
3154 * Install a trampoline to call a guest trap handler directly
3155 *
3156 * @returns VBox status code.
3157 * @param pVM Pointer to the VM.
3158 * @param pInstrGC Guest context point to privileged instruction
3159 * @param pPatchRec Patch record
3160 * @param pCacheRec Cache record ptr
3161 *
3162 */
3163static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3164{
3165 PPATCHINFO pPatch = &pPatchRec->patch;
3166 int rc = VERR_PATCHING_REFUSED;
3167 uint32_t orgOffsetPatchMem = ~0;
3168 bool fInserted;
3169
3170 // save original offset (in case of failures later on)
3171 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3172
3173 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3174 pPatch->uCurPatchOffset = 0;
3175 pPatch->nrPatch2GuestRecs = 0;
3176
3177#ifdef VBOX_WITH_STATISTICS
3178 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3179 if (RT_FAILURE(rc))
3180 goto failure;
3181#endif
3182
3183 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3184 if (RT_FAILURE(rc))
3185 goto failure;
3186
3187 /* size of patch block */
3188 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3189
3190 /* Update free pointer in patch memory. */
3191 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3192 /* Round to next 8 byte boundary */
3193 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3194
3195 /* There's no jump from guest to patch code. */
3196 pPatch->cbPatchJump = 0;
3197
3198#ifdef LOG_ENABLED
3199 Log(("Patch code ----------------------------------------------------------\n"));
3200 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3201 Log(("Patch code ends -----------------------------------------------------\n"));
3202#endif
3203 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3204 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3205
3206 /*
3207 * Insert into patch to guest lookup tree
3208 */
3209 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3210 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3211 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3212 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3213 patmR3DbgAddPatch(pVM, pPatchRec);
3214
3215 pPatch->uState = PATCH_ENABLED;
3216 return VINF_SUCCESS;
3217
3218failure:
3219 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3220
3221 /* Turn this cli patch into a dummy. */
3222 pPatch->uState = PATCH_REFUSED;
3223 pPatch->pPatchBlockOffset = 0;
3224
3225 /* Give back the patch memory we no longer need */
3226 Assert(orgOffsetPatchMem != (uint32_t)~0);
3227 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3228
3229 return rc;
3230}
3231
3232
3233#ifdef LOG_ENABLED
3234/**
3235 * Check if the instruction is patched as a common idt handler
3236 *
3237 * @returns true or false
3238 * @param pVM Pointer to the VM.
3239 * @param pInstrGC Guest context point to the instruction
3240 *
3241 */
3242static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3243{
3244 PPATMPATCHREC pRec;
3245
3246 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3247 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3248 return true;
3249 return false;
3250}
3251#endif //DEBUG
3252
3253
3254/**
3255 * Duplicates a complete function
3256 *
3257 * @returns VBox status code.
3258 * @param pVM Pointer to the VM.
3259 * @param pInstrGC Guest context point to privileged instruction
3260 * @param pPatchRec Patch record
3261 * @param pCacheRec Cache record ptr
3262 *
3263 */
3264static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3265{
3266 PPATCHINFO pPatch = &pPatchRec->patch;
3267 int rc = VERR_PATCHING_REFUSED;
3268 uint32_t orgOffsetPatchMem = ~0;
3269 bool fInserted;
3270
3271 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3272 /* Save original offset (in case of failures later on). */
3273 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3274
3275 /* We will not go on indefinitely with call instruction handling. */
3276 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3277 {
3278 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3279 return VERR_PATCHING_REFUSED;
3280 }
3281
3282 pVM->patm.s.ulCallDepth++;
3283
3284#ifdef PATM_ENABLE_CALL
3285 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3286#endif
3287
3288 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3289
3290 pPatch->nrPatch2GuestRecs = 0;
3291 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3292 pPatch->uCurPatchOffset = 0;
3293
3294 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3295 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3296 if (RT_FAILURE(rc))
3297 goto failure;
3298
3299#ifdef VBOX_WITH_STATISTICS
3300 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3301 if (RT_FAILURE(rc))
3302 goto failure;
3303#endif
3304
3305 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3306 if (rc != VINF_SUCCESS)
3307 {
3308 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3309 goto failure;
3310 }
3311
3312 //size of patch block
3313 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3314
3315 //update free pointer in patch memory
3316 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3317 /* Round to next 8 byte boundary. */
3318 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3319
3320 pPatch->uState = PATCH_ENABLED;
3321
3322 /*
3323 * Insert into patch to guest lookup tree
3324 */
3325 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3326 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3327 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3328 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3329 if (!fInserted)
3330 {
3331 rc = VERR_PATCHING_REFUSED;
3332 goto failure;
3333 }
3334
3335 /* Note that patmr3SetBranchTargets can install additional patches!! */
3336 rc = patmr3SetBranchTargets(pVM, pPatch);
3337 if (rc != VINF_SUCCESS)
3338 {
3339 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3340 goto failure;
3341 }
3342
3343 patmR3DbgAddPatch(pVM, pPatchRec);
3344
3345#ifdef LOG_ENABLED
3346 Log(("Patch code ----------------------------------------------------------\n"));
3347 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3348 Log(("Patch code ends -----------------------------------------------------\n"));
3349#endif
3350
3351 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3352
3353 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3354 pPatch->pTempInfo->nrIllegalInstr = 0;
3355
3356 pVM->patm.s.ulCallDepth--;
3357 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3358 return VINF_SUCCESS;
3359
3360failure:
3361 if (pPatchRec->CoreOffset.Key)
3362 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3363
3364 patmEmptyTree(pVM, &pPatch->FixupTree);
3365 pPatch->nrFixups = 0;
3366
3367 patmEmptyTree(pVM, &pPatch->JumpTree);
3368 pPatch->nrJumpRecs = 0;
3369
3370 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3371 pPatch->pTempInfo->nrIllegalInstr = 0;
3372
3373 /* Turn this cli patch into a dummy. */
3374 pPatch->uState = PATCH_REFUSED;
3375 pPatch->pPatchBlockOffset = 0;
3376
3377 // Give back the patch memory we no longer need
3378 Assert(orgOffsetPatchMem != (uint32_t)~0);
3379 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3380
3381 pVM->patm.s.ulCallDepth--;
3382 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3383 return rc;
3384}
3385
3386/**
3387 * Creates trampoline code to jump inside an existing patch
3388 *
3389 * @returns VBox status code.
3390 * @param pVM Pointer to the VM.
3391 * @param pInstrGC Guest context point to privileged instruction
3392 * @param pPatchRec Patch record
3393 *
3394 */
3395static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3396{
3397 PPATCHINFO pPatch = &pPatchRec->patch;
3398 RTRCPTR pPage, pPatchTargetGC = 0;
3399 uint32_t orgOffsetPatchMem = ~0;
3400 int rc = VERR_PATCHING_REFUSED;
3401 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3402 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3403 bool fInserted = false;
3404
3405 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3406 /* Save original offset (in case of failures later on). */
3407 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3408
3409 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3410 /** @todo we already checked this before */
3411 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3412
3413 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3414 if (pPatchPage)
3415 {
3416 uint32_t i;
3417
3418 for (i=0;i<pPatchPage->cCount;i++)
3419 {
3420 if (pPatchPage->papPatch[i])
3421 {
3422 pPatchToJmp = pPatchPage->papPatch[i];
3423
3424 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3425 && pPatchToJmp->uState == PATCH_ENABLED)
3426 {
3427 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3428 if (pPatchTargetGC)
3429 {
3430 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3431 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3432 Assert(pPatchToGuestRec);
3433
3434 pPatchToGuestRec->fJumpTarget = true;
3435 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3436 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3437 break;
3438 }
3439 }
3440 }
3441 }
3442 }
3443 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3444
3445 /*
3446 * Only record the trampoline patch if this is the first patch to the target
3447 * or we recorded other patches already.
3448 * The goal is to refuse refreshing function duplicates if the guest
3449 * modifies code after a saved state was loaded because it is not possible
3450 * to save the relation between trampoline and target without changing the
3451 * saved satte version.
3452 */
3453 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3454 || pPatchToJmp->pTrampolinePatchesHead)
3455 {
3456 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3457 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3458 if (!pTrampRec)
3459 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3460
3461 pTrampRec->pPatchTrampoline = pPatchRec;
3462 }
3463
3464 pPatch->nrPatch2GuestRecs = 0;
3465 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3466 pPatch->uCurPatchOffset = 0;
3467
3468 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3469 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3470 if (RT_FAILURE(rc))
3471 goto failure;
3472
3473#ifdef VBOX_WITH_STATISTICS
3474 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3475 if (RT_FAILURE(rc))
3476 goto failure;
3477#endif
3478
3479 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3480 if (RT_FAILURE(rc))
3481 goto failure;
3482
3483 /*
3484 * Insert into patch to guest lookup tree
3485 */
3486 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3487 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3488 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3489 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3490 if (!fInserted)
3491 {
3492 rc = VERR_PATCHING_REFUSED;
3493 goto failure;
3494 }
3495 patmR3DbgAddPatch(pVM, pPatchRec);
3496
3497 /* size of patch block */
3498 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3499
3500 /* Update free pointer in patch memory. */
3501 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3502 /* Round to next 8 byte boundary */
3503 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3504
3505 /* There's no jump from guest to patch code. */
3506 pPatch->cbPatchJump = 0;
3507
3508 /* Enable the patch. */
3509 pPatch->uState = PATCH_ENABLED;
3510 /* We allow this patch to be called as a function. */
3511 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3512
3513 if (pTrampRec)
3514 {
3515 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3516 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3517 }
3518 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3519 return VINF_SUCCESS;
3520
3521failure:
3522 if (pPatchRec->CoreOffset.Key)
3523 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3524
3525 patmEmptyTree(pVM, &pPatch->FixupTree);
3526 pPatch->nrFixups = 0;
3527
3528 patmEmptyTree(pVM, &pPatch->JumpTree);
3529 pPatch->nrJumpRecs = 0;
3530
3531 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3532 pPatch->pTempInfo->nrIllegalInstr = 0;
3533
3534 /* Turn this cli patch into a dummy. */
3535 pPatch->uState = PATCH_REFUSED;
3536 pPatch->pPatchBlockOffset = 0;
3537
3538 // Give back the patch memory we no longer need
3539 Assert(orgOffsetPatchMem != (uint32_t)~0);
3540 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3541
3542 if (pTrampRec)
3543 MMR3HeapFree(pTrampRec);
3544
3545 return rc;
3546}
3547
3548
3549/**
3550 * Patch branch target function for call/jump at specified location.
3551 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3552 *
3553 * @returns VBox status code.
3554 * @param pVM Pointer to the VM.
3555 * @param pCtx Pointer to the guest CPU context.
3556 *
3557 */
3558VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3559{
3560 RTRCPTR pBranchTarget, pPage;
3561 int rc;
3562 RTRCPTR pPatchTargetGC = 0;
3563 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
3564
3565 pBranchTarget = pCtx->edx;
3566 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3567
3568 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3569 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3570
3571 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3572 if (pPatchPage)
3573 {
3574 uint32_t i;
3575
3576 for (i=0;i<pPatchPage->cCount;i++)
3577 {
3578 if (pPatchPage->papPatch[i])
3579 {
3580 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3581
3582 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3583 && pPatch->uState == PATCH_ENABLED)
3584 {
3585 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3586 if (pPatchTargetGC)
3587 {
3588 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3589 break;
3590 }
3591 }
3592 }
3593 }
3594 }
3595
3596 if (pPatchTargetGC)
3597 {
3598 /* Create a trampoline that also sets PATM_ASMFIX_INTERRUPTFLAG. */
3599 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3600 }
3601 else
3602 {
3603 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3604 }
3605
3606 if (rc == VINF_SUCCESS)
3607 {
3608 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3609 Assert(pPatchTargetGC);
3610 }
3611
3612 if (pPatchTargetGC)
3613 {
3614 pCtx->eax = pPatchTargetGC;
3615 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3616 }
3617 else
3618 {
3619 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3620 pCtx->eax = 0;
3621 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3622 }
3623 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3624 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3625 AssertRC(rc);
3626
3627 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3628 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3629 return VINF_SUCCESS;
3630}
3631
3632/**
3633 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3634 *
3635 * @returns VBox status code.
3636 * @param pVM Pointer to the VM.
3637 * @param pCpu Disassembly CPU structure ptr
3638 * @param pInstrGC Guest context point to privileged instruction
3639 * @param pCacheRec Cache record ptr
3640 *
3641 */
3642static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3643{
3644 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3645 int rc = VERR_PATCHING_REFUSED;
3646 DISCPUSTATE cpu;
3647 RTRCPTR pTargetGC;
3648 PPATMPATCHREC pPatchFunction;
3649 uint32_t cbInstr;
3650 bool disret;
3651
3652 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3653 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3654
3655 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3656 {
3657 rc = VERR_PATCHING_REFUSED;
3658 goto failure;
3659 }
3660
3661 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3662 if (pTargetGC == 0)
3663 {
3664 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3665 rc = VERR_PATCHING_REFUSED;
3666 goto failure;
3667 }
3668
3669 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3670 if (pPatchFunction == NULL)
3671 {
3672 for(;;)
3673 {
3674 /* It could be an indirect call (call -> jmp dest).
3675 * Note that it's dangerous to assume the jump will never change...
3676 */
3677 uint8_t *pTmpInstrHC;
3678
3679 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3680 Assert(pTmpInstrHC);
3681 if (pTmpInstrHC == 0)
3682 break;
3683
3684 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3685 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3686 break;
3687
3688 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3689 if (pTargetGC == 0)
3690 {
3691 break;
3692 }
3693
3694 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3695 break;
3696 }
3697 if (pPatchFunction == 0)
3698 {
3699 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3700 rc = VERR_PATCHING_REFUSED;
3701 goto failure;
3702 }
3703 }
3704
3705 // make a copy of the guest code bytes that will be overwritten
3706 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3707
3708 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3709 AssertRC(rc);
3710
3711 /* Now replace the original call in the guest code */
3712 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3713 AssertRC(rc);
3714 if (RT_FAILURE(rc))
3715 goto failure;
3716
3717 /* Lowest and highest address for write monitoring. */
3718 pPatch->pInstrGCLowest = pInstrGC;
3719 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3720 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3721
3722 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3723
3724 pPatch->uState = PATCH_ENABLED;
3725 return VINF_SUCCESS;
3726
3727failure:
3728 /* Turn this patch into a dummy. */
3729 pPatch->uState = PATCH_REFUSED;
3730
3731 return rc;
3732}
3733
3734/**
3735 * Replace the address in an MMIO instruction with the cached version.
3736 *
3737 * @returns VBox status code.
3738 * @param pVM Pointer to the VM.
3739 * @param pInstrGC Guest context point to privileged instruction
3740 * @param pCpu Disassembly CPU structure ptr
3741 * @param pCacheRec Cache record ptr
3742 *
3743 * @note returns failure if patching is not allowed or possible
3744 *
3745 */
3746static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3747{
3748 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3749 uint8_t *pPB;
3750 int rc = VERR_PATCHING_REFUSED;
3751
3752 Assert(pVM->patm.s.mmio.pCachedData);
3753 if (!pVM->patm.s.mmio.pCachedData)
3754 goto failure;
3755
3756 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3757 goto failure;
3758
3759 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3760 if (pPB == 0)
3761 goto failure;
3762
3763 /* Add relocation record for cached data access. */
3764 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC,
3765 pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3766 {
3767 Log(("Relocation failed for cached mmio address!!\n"));
3768 return VERR_PATCHING_REFUSED;
3769 }
3770 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3771
3772 /* Save original instruction. */
3773 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3774 AssertRC(rc);
3775
3776 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3777
3778 /* Replace address with that of the cached item. */
3779 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR),
3780 &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3781 AssertRC(rc);
3782 if (RT_FAILURE(rc))
3783 {
3784 goto failure;
3785 }
3786
3787 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3788 pVM->patm.s.mmio.pCachedData = 0;
3789 pVM->patm.s.mmio.GCPhys = 0;
3790 pPatch->uState = PATCH_ENABLED;
3791 return VINF_SUCCESS;
3792
3793failure:
3794 /* Turn this patch into a dummy. */
3795 pPatch->uState = PATCH_REFUSED;
3796
3797 return rc;
3798}
3799
3800
3801/**
3802 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3803 *
3804 * @returns VBox status code.
3805 * @param pVM Pointer to the VM.
3806 * @param pInstrGC Guest context point to privileged instruction
3807 * @param pPatch Patch record
3808 *
3809 * @note returns failure if patching is not allowed or possible
3810 *
3811 */
3812static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3813{
3814 DISCPUSTATE cpu;
3815 uint32_t cbInstr;
3816 bool disret;
3817 uint8_t *pInstrHC;
3818
3819 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3820
3821 /* Convert GC to HC address. */
3822 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3823 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3824
3825 /* Disassemble mmio instruction. */
3826 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3827 &cpu, &cbInstr);
3828 if (disret == false)
3829 {
3830 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3831 return VERR_PATCHING_REFUSED;
3832 }
3833
3834 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3835 if (cbInstr > MAX_INSTR_SIZE)
3836 return VERR_PATCHING_REFUSED;
3837 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3838 return VERR_PATCHING_REFUSED;
3839
3840 /* Add relocation record for cached data access. */
3841 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3842 {
3843 Log(("Relocation failed for cached mmio address!!\n"));
3844 return VERR_PATCHING_REFUSED;
3845 }
3846 /* Replace address with that of the cached item. */
3847 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3848
3849 /* Lowest and highest address for write monitoring. */
3850 pPatch->pInstrGCLowest = pInstrGC;
3851 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3852
3853 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3854 pVM->patm.s.mmio.pCachedData = 0;
3855 pVM->patm.s.mmio.GCPhys = 0;
3856 return VINF_SUCCESS;
3857}
3858
3859/**
3860 * Activates an int3 patch
3861 *
3862 * @returns VBox status code.
3863 * @param pVM Pointer to the VM.
3864 * @param pPatch Patch record
3865 */
3866static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3867{
3868 uint8_t bASMInt3 = 0xCC;
3869 int rc;
3870
3871 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3872 Assert(pPatch->uState != PATCH_ENABLED);
3873
3874 /* Replace first opcode byte with 'int 3'. */
3875 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3876 AssertRC(rc);
3877
3878 pPatch->cbPatchJump = sizeof(bASMInt3);
3879
3880 return rc;
3881}
3882
3883/**
3884 * Deactivates an int3 patch
3885 *
3886 * @returns VBox status code.
3887 * @param pVM Pointer to the VM.
3888 * @param pPatch Patch record
3889 */
3890static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3891{
3892 uint8_t ASMInt3 = 0xCC;
3893 int rc;
3894
3895 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3896 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3897
3898 /* Restore first opcode byte. */
3899 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3900 AssertRC(rc);
3901 return rc;
3902}
3903
3904/**
3905 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3906 * in the raw-mode context.
3907 *
3908 * @returns VBox status code.
3909 * @param pVM Pointer to the VM.
3910 * @param pInstrGC Guest context point to privileged instruction
3911 * @param pInstrHC Host context point to privileged instruction
3912 * @param pCpu Disassembly CPU structure ptr
3913 * @param pPatch Patch record
3914 *
3915 * @note returns failure if patching is not allowed or possible
3916 *
3917 */
3918int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3919{
3920 uint8_t bASMInt3 = 0xCC;
3921 int rc;
3922
3923 /* Note: Do not use patch memory here! It might called during patch installation too. */
3924 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3925
3926 /* Save the original instruction. */
3927 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3928 AssertRC(rc);
3929 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3930
3931 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3932
3933 /* Replace first opcode byte with 'int 3'. */
3934 rc = patmActivateInt3Patch(pVM, pPatch);
3935 if (RT_FAILURE(rc))
3936 goto failure;
3937
3938 /* Lowest and highest address for write monitoring. */
3939 pPatch->pInstrGCLowest = pInstrGC;
3940 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3941
3942 pPatch->uState = PATCH_ENABLED;
3943 return VINF_SUCCESS;
3944
3945failure:
3946 /* Turn this patch into a dummy. */
3947 return VERR_PATCHING_REFUSED;
3948}
3949
3950#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3951/**
3952 * Patch a jump instruction at specified location
3953 *
3954 * @returns VBox status code.
3955 * @param pVM Pointer to the VM.
3956 * @param pInstrGC Guest context point to privileged instruction
3957 * @param pInstrHC Host context point to privileged instruction
3958 * @param pCpu Disassembly CPU structure ptr
3959 * @param pPatchRec Patch record
3960 *
3961 * @note returns failure if patching is not allowed or possible
3962 *
3963 */
3964int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3965{
3966 PPATCHINFO pPatch = &pPatchRec->patch;
3967 int rc = VERR_PATCHING_REFUSED;
3968
3969 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3970 pPatch->uCurPatchOffset = 0;
3971 pPatch->cbPatchBlockSize = 0;
3972 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3973
3974 /*
3975 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3976 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3977 */
3978 switch (pCpu->pCurInstr->uOpcode)
3979 {
3980 case OP_JO:
3981 case OP_JNO:
3982 case OP_JC:
3983 case OP_JNC:
3984 case OP_JE:
3985 case OP_JNE:
3986 case OP_JBE:
3987 case OP_JNBE:
3988 case OP_JS:
3989 case OP_JNS:
3990 case OP_JP:
3991 case OP_JNP:
3992 case OP_JL:
3993 case OP_JNL:
3994 case OP_JLE:
3995 case OP_JNLE:
3996 case OP_JMP:
3997 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3998 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
3999 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
4000 goto failure;
4001
4002 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4003 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
4004 goto failure;
4005
4006 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
4007 {
4008 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4009 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4010 rc = VERR_PATCHING_REFUSED;
4011 goto failure;
4012 }
4013
4014 break;
4015
4016 default:
4017 goto failure;
4018 }
4019
4020 // make a copy of the guest code bytes that will be overwritten
4021 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4022 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4023 pPatch->cbPatchJump = pCpu->cbInstr;
4024
4025 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
4026 AssertRC(rc);
4027
4028 /* Now insert a jump in the guest code. */
4029 /*
4030 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4031 * references the target instruction in the conflict patch.
4032 */
4033 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4034
4035 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4036 pPatch->pPatchJumpDestGC = pJmpDest;
4037
4038 PATMP2GLOOKUPREC cacheRec;
4039 RT_ZERO(cacheRec);
4040 cacheRec.pPatch = pPatch;
4041
4042 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4043 /* Free leftover lock if any. */
4044 if (cacheRec.Lock.pvMap)
4045 {
4046 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4047 cacheRec.Lock.pvMap = NULL;
4048 }
4049 AssertRC(rc);
4050 if (RT_FAILURE(rc))
4051 goto failure;
4052
4053 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4054
4055 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4056 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4057
4058 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4059
4060 /* Lowest and highest address for write monitoring. */
4061 pPatch->pInstrGCLowest = pInstrGC;
4062 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4063
4064 pPatch->uState = PATCH_ENABLED;
4065 return VINF_SUCCESS;
4066
4067failure:
4068 /* Turn this cli patch into a dummy. */
4069 pPatch->uState = PATCH_REFUSED;
4070
4071 return rc;
4072}
4073#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4074
4075
4076/**
4077 * Gives hint to PATM about supervisor guest instructions
4078 *
4079 * @returns VBox status code.
4080 * @param pVM Pointer to the VM.
4081 * @param pInstr Guest context point to privileged instruction
4082 * @param flags Patch flags
4083 */
4084VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4085{
4086 Assert(pInstrGC);
4087 Assert(flags == PATMFL_CODE32);
4088
4089 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4090 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4091}
4092
4093/**
4094 * Patch privileged instruction at specified location
4095 *
4096 * @returns VBox status code.
4097 * @param pVM Pointer to the VM.
4098 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4099 * @param flags Patch flags
4100 *
4101 * @note returns failure if patching is not allowed or possible
4102 */
4103VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4104{
4105 DISCPUSTATE cpu;
4106 R3PTRTYPE(uint8_t *) pInstrHC;
4107 uint32_t cbInstr;
4108 PPATMPATCHREC pPatchRec;
4109 PCPUMCTX pCtx = 0;
4110 bool disret;
4111 int rc;
4112 PVMCPU pVCpu = VMMGetCpu0(pVM);
4113 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4114
4115 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4116
4117 if ( !pVM
4118 || pInstrGC == 0
4119 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4120 {
4121 AssertFailed();
4122 return VERR_INVALID_PARAMETER;
4123 }
4124
4125 if (PATMIsEnabled(pVM) == false)
4126 return VERR_PATCHING_REFUSED;
4127
4128 /* Test for patch conflict only with patches that actually change guest code. */
4129 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4130 {
4131 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4132 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4133 if (pConflictPatch != 0)
4134 return VERR_PATCHING_REFUSED;
4135 }
4136
4137 if (!(flags & PATMFL_CODE32))
4138 {
4139 /** @todo Only 32 bits code right now */
4140 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4141 return VERR_NOT_IMPLEMENTED;
4142 }
4143
4144 /* We ran out of patch memory; don't bother anymore. */
4145 if (pVM->patm.s.fOutOfMemory == true)
4146 return VERR_PATCHING_REFUSED;
4147
4148#if 1 /* DONT COMMIT ENABLED! */
4149 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4150 if ( 0
4151 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4152 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4153 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4154 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4155 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4156 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4157 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4158 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4159 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4160 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4161 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4162 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4163 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4164 || pInstrGC == 0x80014447 /* KfLowerIrql */
4165 || 0)
4166 {
4167 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4168 return VERR_PATCHING_REFUSED;
4169 }
4170#endif
4171
4172 /* Make sure the code selector is wide open; otherwise refuse. */
4173 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4174 if (CPUMGetGuestCPL(pVCpu) == 0)
4175 {
4176 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4177 if (pInstrGCFlat != pInstrGC)
4178 {
4179 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4180 return VERR_PATCHING_REFUSED;
4181 }
4182 }
4183
4184 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4185 if (!(flags & PATMFL_GUEST_SPECIFIC))
4186 {
4187 /* New code. Make sure CSAM has a go at it first. */
4188 CSAMR3CheckCode(pVM, pInstrGC);
4189 }
4190
4191 /* Note: obsolete */
4192 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4193 && (flags & PATMFL_MMIO_ACCESS))
4194 {
4195 RTRCUINTPTR offset;
4196 void *pvPatchCoreOffset;
4197
4198 /* Find the patch record. */
4199 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4200 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4201 if (pvPatchCoreOffset == NULL)
4202 {
4203 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4204 return VERR_PATCH_NOT_FOUND; //fatal error
4205 }
4206 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4207
4208 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4209 }
4210
4211 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4212
4213 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4214 if (pPatchRec)
4215 {
4216 Assert(!(flags & PATMFL_TRAMPOLINE));
4217
4218 /* Hints about existing patches are ignored. */
4219 if (flags & PATMFL_INSTR_HINT)
4220 return VERR_PATCHING_REFUSED;
4221
4222 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4223 {
4224 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4225 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4226 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4227 }
4228
4229 if (pPatchRec->patch.uState == PATCH_DISABLED)
4230 {
4231 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4232 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4233 {
4234 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4235 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4236 }
4237 else
4238 Log(("Enabling patch %RRv again\n", pInstrGC));
4239
4240 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4241 rc = PATMR3EnablePatch(pVM, pInstrGC);
4242 if (RT_SUCCESS(rc))
4243 return VWRN_PATCH_ENABLED;
4244
4245 return rc;
4246 }
4247 if ( pPatchRec->patch.uState == PATCH_ENABLED
4248 || pPatchRec->patch.uState == PATCH_DIRTY)
4249 {
4250 /*
4251 * The patch might have been overwritten.
4252 */
4253 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4254 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4255 {
4256 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4257 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4258 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4259 {
4260 if (flags & PATMFL_IDTHANDLER)
4261 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4262
4263 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4264 }
4265 }
4266 rc = PATMR3RemovePatch(pVM, pInstrGC);
4267 if (RT_FAILURE(rc))
4268 return VERR_PATCHING_REFUSED;
4269 }
4270 else
4271 {
4272 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4273 /* already tried it once! */
4274 return VERR_PATCHING_REFUSED;
4275 }
4276 }
4277
4278 RTGCPHYS GCPhys;
4279 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4280 if (rc != VINF_SUCCESS)
4281 {
4282 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4283 return rc;
4284 }
4285 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4286 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4287 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4288 {
4289 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4290 return VERR_PATCHING_REFUSED;
4291 }
4292
4293 /* Initialize cache record for guest address translations. */
4294 bool fInserted;
4295 PATMP2GLOOKUPREC cacheRec;
4296 RT_ZERO(cacheRec);
4297
4298 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4299 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4300
4301 /* Allocate patch record. */
4302 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4303 if (RT_FAILURE(rc))
4304 {
4305 Log(("Out of memory!!!!\n"));
4306 return VERR_NO_MEMORY;
4307 }
4308 pPatchRec->Core.Key = pInstrGC;
4309 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4310 /* Insert patch record into the lookup tree. */
4311 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4312 Assert(fInserted);
4313
4314 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4315 pPatchRec->patch.flags = flags;
4316 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4317 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4318
4319 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4320 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4321
4322 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4323 {
4324 /*
4325 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4326 */
4327 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4328 if (pPatchNear)
4329 {
4330 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4331 {
4332 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4333
4334 pPatchRec->patch.uState = PATCH_UNUSABLE;
4335 /*
4336 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4337 */
4338 return VERR_PATCHING_REFUSED;
4339 }
4340 }
4341 }
4342
4343 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4344 if (pPatchRec->patch.pTempInfo == 0)
4345 {
4346 Log(("Out of memory!!!!\n"));
4347 return VERR_NO_MEMORY;
4348 }
4349
4350 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4351 if (disret == false)
4352 {
4353 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4354 return VERR_PATCHING_REFUSED;
4355 }
4356
4357 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4358 if (cbInstr > MAX_INSTR_SIZE)
4359 return VERR_PATCHING_REFUSED;
4360
4361 pPatchRec->patch.cbPrivInstr = cbInstr;
4362 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4363
4364 /* Restricted hinting for now. */
4365 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4366
4367 /* Initialize cache record patch pointer. */
4368 cacheRec.pPatch = &pPatchRec->patch;
4369
4370 /* Allocate statistics slot */
4371 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4372 {
4373 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4374 }
4375 else
4376 {
4377 Log(("WARNING: Patch index wrap around!!\n"));
4378 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4379 }
4380
4381 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4382 {
4383 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4384 }
4385 else
4386 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4387 {
4388 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4389 }
4390 else
4391 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4392 {
4393 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4394 }
4395 else
4396 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4397 {
4398 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4399 }
4400 else
4401 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4402 {
4403 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4404 }
4405 else
4406 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4407 {
4408 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4409 }
4410 else
4411 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4412 {
4413 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4414 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4415
4416 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4417#ifdef VBOX_WITH_STATISTICS
4418 if ( rc == VINF_SUCCESS
4419 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4420 {
4421 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4422 }
4423#endif
4424 }
4425 else
4426 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4427 {
4428 switch (cpu.pCurInstr->uOpcode)
4429 {
4430 case OP_SYSENTER:
4431 case OP_PUSH:
4432 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4433 if (rc == VINF_SUCCESS)
4434 {
4435 if (rc == VINF_SUCCESS)
4436 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4437 return rc;
4438 }
4439 break;
4440
4441 default:
4442 rc = VERR_NOT_IMPLEMENTED;
4443 break;
4444 }
4445 }
4446 else
4447 {
4448 switch (cpu.pCurInstr->uOpcode)
4449 {
4450 case OP_SYSENTER:
4451 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4452 if (rc == VINF_SUCCESS)
4453 {
4454 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4455 return VINF_SUCCESS;
4456 }
4457 break;
4458
4459#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4460 case OP_JO:
4461 case OP_JNO:
4462 case OP_JC:
4463 case OP_JNC:
4464 case OP_JE:
4465 case OP_JNE:
4466 case OP_JBE:
4467 case OP_JNBE:
4468 case OP_JS:
4469 case OP_JNS:
4470 case OP_JP:
4471 case OP_JNP:
4472 case OP_JL:
4473 case OP_JNL:
4474 case OP_JLE:
4475 case OP_JNLE:
4476 case OP_JECXZ:
4477 case OP_LOOP:
4478 case OP_LOOPNE:
4479 case OP_LOOPE:
4480 case OP_JMP:
4481 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4482 {
4483 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4484 break;
4485 }
4486 return VERR_NOT_IMPLEMENTED;
4487#endif
4488
4489 case OP_PUSHF:
4490 case OP_CLI:
4491 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4492 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4493 break;
4494
4495#ifndef VBOX_WITH_SAFE_STR
4496 case OP_STR:
4497#endif
4498 case OP_SGDT:
4499 case OP_SLDT:
4500 case OP_SIDT:
4501 case OP_CPUID:
4502 case OP_LSL:
4503 case OP_LAR:
4504 case OP_SMSW:
4505 case OP_VERW:
4506 case OP_VERR:
4507 case OP_IRET:
4508#ifdef VBOX_WITH_RAW_RING1
4509 case OP_MOV:
4510#endif
4511 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4512 break;
4513
4514 default:
4515 return VERR_NOT_IMPLEMENTED;
4516 }
4517 }
4518
4519 if (rc != VINF_SUCCESS)
4520 {
4521 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4522 {
4523 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4524 pPatchRec->patch.nrPatch2GuestRecs = 0;
4525 }
4526 pVM->patm.s.uCurrentPatchIdx--;
4527 }
4528 else
4529 {
4530 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4531 AssertRCReturn(rc, rc);
4532
4533 /* Keep track upper and lower boundaries of patched instructions */
4534 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4535 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4536 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4537 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4538
4539 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4540 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4541
4542 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4543 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4544
4545 rc = VINF_SUCCESS;
4546
4547 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4548 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4549 {
4550 rc = PATMR3DisablePatch(pVM, pInstrGC);
4551 AssertRCReturn(rc, rc);
4552 }
4553
4554#ifdef VBOX_WITH_STATISTICS
4555 /* Register statistics counter */
4556 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4557 {
4558 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4559 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4560#ifndef DEBUG_sandervl
4561 /* Full breakdown for the GUI. */
4562 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4563 "/PATM/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4564 STAMR3RegisterF(pVM, &pPatchRec->patch.pPatchBlockOffset,STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/offPatchBlock", pPatchRec->patch.pPrivInstrGC);
4565 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4566 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4567 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4568 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4569 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4570 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4571 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4572 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4573 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4574 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4575 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4576 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4577 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4578 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4579#endif
4580 }
4581#endif
4582
4583 /* Add debug symbol. */
4584 patmR3DbgAddPatch(pVM, pPatchRec);
4585 }
4586 /* Free leftover lock if any. */
4587 if (cacheRec.Lock.pvMap)
4588 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4589 return rc;
4590}
4591
4592/**
4593 * Query instruction size
4594 *
4595 * @returns VBox status code.
4596 * @param pVM Pointer to the VM.
4597 * @param pPatch Patch record
4598 * @param pInstrGC Instruction address
4599 */
4600static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4601{
4602 uint8_t *pInstrHC;
4603 PGMPAGEMAPLOCK Lock;
4604
4605 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4606 if (rc == VINF_SUCCESS)
4607 {
4608 DISCPUSTATE cpu;
4609 bool disret;
4610 uint32_t cbInstr;
4611
4612 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4613 PGMPhysReleasePageMappingLock(pVM, &Lock);
4614 if (disret)
4615 return cbInstr;
4616 }
4617 return 0;
4618}
4619
4620/**
4621 * Add patch to page record
4622 *
4623 * @returns VBox status code.
4624 * @param pVM Pointer to the VM.
4625 * @param pPage Page address
4626 * @param pPatch Patch record
4627 */
4628int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4629{
4630 PPATMPATCHPAGE pPatchPage;
4631 int rc;
4632
4633 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4634
4635 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4636 if (pPatchPage)
4637 {
4638 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4639 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4640 {
4641 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4642 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4643
4644 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4645 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4646 (void **)&pPatchPage->papPatch);
4647 if (RT_FAILURE(rc))
4648 {
4649 Log(("Out of memory!!!!\n"));
4650 return VERR_NO_MEMORY;
4651 }
4652 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4653 MMHyperFree(pVM, papPatchOld);
4654 }
4655 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4656 pPatchPage->cCount++;
4657 }
4658 else
4659 {
4660 bool fInserted;
4661
4662 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4663 if (RT_FAILURE(rc))
4664 {
4665 Log(("Out of memory!!!!\n"));
4666 return VERR_NO_MEMORY;
4667 }
4668 pPatchPage->Core.Key = pPage;
4669 pPatchPage->cCount = 1;
4670 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4671
4672 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4673 (void **)&pPatchPage->papPatch);
4674 if (RT_FAILURE(rc))
4675 {
4676 Log(("Out of memory!!!!\n"));
4677 MMHyperFree(pVM, pPatchPage);
4678 return VERR_NO_MEMORY;
4679 }
4680 pPatchPage->papPatch[0] = pPatch;
4681
4682 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4683 Assert(fInserted);
4684 pVM->patm.s.cPageRecords++;
4685
4686 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4687 }
4688 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4689
4690 /* Get the closest guest instruction (from below) */
4691 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4692 Assert(pGuestToPatchRec);
4693 if (pGuestToPatchRec)
4694 {
4695 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4696 if ( pPatchPage->pLowestAddrGC == 0
4697 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4698 {
4699 RTRCUINTPTR offset;
4700
4701 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4702
4703 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4704 /* If we're too close to the page boundary, then make sure an
4705 instruction from the previous page doesn't cross the
4706 boundary itself. */
4707 if (offset && offset < MAX_INSTR_SIZE)
4708 {
4709 /* Get the closest guest instruction (from above) */
4710 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4711
4712 if (pGuestToPatchRec)
4713 {
4714 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4715 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4716 {
4717 pPatchPage->pLowestAddrGC = pPage;
4718 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4719 }
4720 }
4721 }
4722 }
4723 }
4724
4725 /* Get the closest guest instruction (from above) */
4726 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4727 Assert(pGuestToPatchRec);
4728 if (pGuestToPatchRec)
4729 {
4730 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4731 if ( pPatchPage->pHighestAddrGC == 0
4732 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4733 {
4734 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4735 /* Increase by instruction size. */
4736 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4737//// Assert(size);
4738 pPatchPage->pHighestAddrGC += size;
4739 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4740 }
4741 }
4742
4743 return VINF_SUCCESS;
4744}
4745
4746/**
4747 * Remove patch from page record
4748 *
4749 * @returns VBox status code.
4750 * @param pVM Pointer to the VM.
4751 * @param pPage Page address
4752 * @param pPatch Patch record
4753 */
4754int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4755{
4756 PPATMPATCHPAGE pPatchPage;
4757 int rc;
4758
4759 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4760 Assert(pPatchPage);
4761
4762 if (!pPatchPage)
4763 return VERR_INVALID_PARAMETER;
4764
4765 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4766
4767 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4768 if (pPatchPage->cCount > 1)
4769 {
4770 uint32_t i;
4771
4772 /* Used by multiple patches */
4773 for (i = 0; i < pPatchPage->cCount; i++)
4774 {
4775 if (pPatchPage->papPatch[i] == pPatch)
4776 {
4777 /* close the gap between the remaining pointers. */
4778 uint32_t cNew = --pPatchPage->cCount;
4779 if (i < cNew)
4780 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4781 pPatchPage->papPatch[cNew] = NULL;
4782 return VINF_SUCCESS;
4783 }
4784 }
4785 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4786 }
4787 else
4788 {
4789 PPATMPATCHPAGE pPatchNode;
4790
4791 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4792
4793 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4794 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4795 Assert(pPatchNode && pPatchNode == pPatchPage);
4796
4797 Assert(pPatchPage->papPatch);
4798 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4799 AssertRC(rc);
4800 rc = MMHyperFree(pVM, pPatchPage);
4801 AssertRC(rc);
4802 pVM->patm.s.cPageRecords--;
4803 }
4804 return VINF_SUCCESS;
4805}
4806
4807/**
4808 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4809 *
4810 * @returns VBox status code.
4811 * @param pVM Pointer to the VM.
4812 * @param pPatch Patch record
4813 */
4814int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4815{
4816 int rc;
4817 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4818
4819 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4820 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4821 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4822
4823 /** @todo optimize better (large gaps between current and next used page) */
4824 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4825 {
4826 /* Get the closest guest instruction (from above) */
4827 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4828 if ( pGuestToPatchRec
4829 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4830 )
4831 {
4832 /* Code in page really patched -> add record */
4833 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4834 AssertRC(rc);
4835 }
4836 }
4837 pPatch->flags |= PATMFL_CODE_MONITORED;
4838 return VINF_SUCCESS;
4839}
4840
4841/**
4842 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4843 *
4844 * @returns VBox status code.
4845 * @param pVM Pointer to the VM.
4846 * @param pPatch Patch record
4847 */
4848static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4849{
4850 int rc;
4851 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4852
4853 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4854 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4855 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4856
4857 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4858 {
4859 /* Get the closest guest instruction (from above) */
4860 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4861 if ( pGuestToPatchRec
4862 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4863 )
4864 {
4865 /* Code in page really patched -> remove record */
4866 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4867 AssertRC(rc);
4868 }
4869 }
4870 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4871 return VINF_SUCCESS;
4872}
4873
4874/**
4875 * Notifies PATM about a (potential) write to code that has been patched.
4876 *
4877 * @returns VBox status code.
4878 * @param pVM Pointer to the VM.
4879 * @param GCPtr GC pointer to write address
4880 * @param cbWrite Nr of bytes to write
4881 *
4882 */
4883VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4884{
4885 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4886
4887 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4888
4889 Assert(VM_IS_EMT(pVM));
4890 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4891
4892 /* Quick boundary check */
4893 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4894 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4895 )
4896 return VINF_SUCCESS;
4897
4898 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4899
4900 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4901 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4902
4903 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4904 {
4905loop_start:
4906 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4907 if (pPatchPage)
4908 {
4909 uint32_t i;
4910 bool fValidPatchWrite = false;
4911
4912 /* Quick check to see if the write is in the patched part of the page */
4913 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4914 || pPatchPage->pHighestAddrGC < GCPtr)
4915 {
4916 break;
4917 }
4918
4919 for (i=0;i<pPatchPage->cCount;i++)
4920 {
4921 if (pPatchPage->papPatch[i])
4922 {
4923 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4924 RTRCPTR pPatchInstrGC;
4925 //unused: bool fForceBreak = false;
4926
4927 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4928 /** @todo inefficient and includes redundant checks for multiple pages. */
4929 for (uint32_t j=0; j<cbWrite; j++)
4930 {
4931 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4932
4933 if ( pPatch->cbPatchJump
4934 && pGuestPtrGC >= pPatch->pPrivInstrGC
4935 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4936 {
4937 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4938 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4939 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4940 if (rc == VINF_SUCCESS)
4941 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4942 goto loop_start;
4943
4944 continue;
4945 }
4946
4947 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4948 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4949 if (!pPatchInstrGC)
4950 {
4951 RTRCPTR pClosestInstrGC;
4952 uint32_t size;
4953
4954 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4955 if (pPatchInstrGC)
4956 {
4957 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4958 Assert(pClosestInstrGC <= pGuestPtrGC);
4959 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4960 /* Check if this is not a write into a gap between two patches */
4961 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4962 pPatchInstrGC = 0;
4963 }
4964 }
4965 if (pPatchInstrGC)
4966 {
4967 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4968
4969 fValidPatchWrite = true;
4970
4971 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4972 Assert(pPatchToGuestRec);
4973 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4974 {
4975 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4976
4977 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4978 {
4979 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4980
4981 patmR3MarkDirtyPatch(pVM, pPatch);
4982
4983 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4984 goto loop_start;
4985 }
4986 else
4987 {
4988 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4989 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4990
4991 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4992 pPatchToGuestRec->fDirty = true;
4993
4994 *pInstrHC = 0xCC;
4995
4996 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4997 }
4998 }
4999 /* else already marked dirty */
5000 }
5001 }
5002 }
5003 } /* for each patch */
5004
5005 if (fValidPatchWrite == false)
5006 {
5007 /* Write to a part of the page that either:
5008 * - doesn't contain any code (shared code/data); rather unlikely
5009 * - old code page that's no longer in active use.
5010 */
5011invalid_write_loop_start:
5012 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
5013
5014 if (pPatchPage)
5015 {
5016 for (i=0;i<pPatchPage->cCount;i++)
5017 {
5018 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5019
5020 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
5021 {
5022 /* Note: possibly dangerous assumption that all future writes will be harmless. */
5023 if (pPatch->flags & PATMFL_IDTHANDLER)
5024 {
5025 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5026
5027 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5028 int rc = patmRemovePatchPages(pVM, pPatch);
5029 AssertRC(rc);
5030 }
5031 else
5032 {
5033 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5034 patmR3MarkDirtyPatch(pVM, pPatch);
5035 }
5036 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5037 goto invalid_write_loop_start;
5038 }
5039 } /* for */
5040 }
5041 }
5042 }
5043 }
5044 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5045 return VINF_SUCCESS;
5046
5047}
5048
5049/**
5050 * Disable all patches in a flushed page
5051 *
5052 * @returns VBox status code
5053 * @param pVM Pointer to the VM.
5054 * @param addr GC address of the page to flush
5055 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5056 * having to double check if the physical address has changed
5057 */
5058VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5059{
5060 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5061
5062 addr &= PAGE_BASE_GC_MASK;
5063
5064 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5065 if (pPatchPage)
5066 {
5067 int i;
5068
5069 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5070 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5071 {
5072 if (pPatchPage->papPatch[i])
5073 {
5074 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5075
5076 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5077 patmR3MarkDirtyPatch(pVM, pPatch);
5078 }
5079 }
5080 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5081 }
5082 return VINF_SUCCESS;
5083}
5084
5085/**
5086 * Checks if the instructions at the specified address has been patched already.
5087 *
5088 * @returns boolean, patched or not
5089 * @param pVM Pointer to the VM.
5090 * @param pInstrGC Guest context pointer to instruction
5091 */
5092VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5093{
5094 Assert(!HMIsEnabled(pVM));
5095 PPATMPATCHREC pPatchRec;
5096 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5097 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5098 return true;
5099 return false;
5100}
5101
5102/**
5103 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5104 *
5105 * @returns VBox status code.
5106 * @param pVM Pointer to the VM.
5107 * @param pInstrGC GC address of instr
5108 * @param pByte opcode byte pointer (OUT)
5109 *
5110 */
5111VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5112{
5113 PPATMPATCHREC pPatchRec;
5114
5115 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5116
5117 /* Shortcut. */
5118 if (!PATMIsEnabled(pVM))
5119 return VERR_PATCH_NOT_FOUND;
5120 Assert(!HMIsEnabled(pVM));
5121 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5122 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5123 return VERR_PATCH_NOT_FOUND;
5124
5125 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5126 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5127 if ( pPatchRec
5128 && pPatchRec->patch.uState == PATCH_ENABLED
5129 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5130 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5131 {
5132 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5133 *pByte = pPatchRec->patch.aPrivInstr[offset];
5134
5135 if (pPatchRec->patch.cbPatchJump == 1)
5136 {
5137 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5138 }
5139 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5140 return VINF_SUCCESS;
5141 }
5142 return VERR_PATCH_NOT_FOUND;
5143}
5144
5145/**
5146 * Read instruction bytes of the original code that was overwritten by the 5
5147 * bytes patch jump.
5148 *
5149 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5150 * @param pVM Pointer to the VM.
5151 * @param GCPtrInstr GC address of instr
5152 * @param pbDst The output buffer.
5153 * @param cbToRead The maximum number bytes to read.
5154 * @param pcbRead Where to return the acutal number of bytes read.
5155 */
5156VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5157{
5158 /* Shortcut. */
5159 if (!PATMIsEnabled(pVM))
5160 return VERR_PATCH_NOT_FOUND;
5161 Assert(!HMIsEnabled(pVM));
5162 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5163 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5164 return VERR_PATCH_NOT_FOUND;
5165
5166 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5167
5168 /*
5169 * If the patch is enabled and the pointer lies within 5 bytes of this
5170 * priv instr ptr, then we've got a hit!
5171 */
5172 RTGCPTR32 off;
5173 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5174 GCPtrInstr, false /*fAbove*/);
5175 if ( pPatchRec
5176 && pPatchRec->patch.uState == PATCH_ENABLED
5177 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5178 {
5179 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5180 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5181 if (cbToRead > cbMax)
5182 cbToRead = cbMax;
5183 switch (cbToRead)
5184 {
5185 case 5: pbDst[4] = pbSrc[4];
5186 case 4: pbDst[3] = pbSrc[3];
5187 case 3: pbDst[2] = pbSrc[2];
5188 case 2: pbDst[1] = pbSrc[1];
5189 case 1: pbDst[0] = pbSrc[0];
5190 break;
5191 default:
5192 memcpy(pbDst, pbSrc, cbToRead);
5193 }
5194 *pcbRead = cbToRead;
5195
5196 if (pPatchRec->patch.cbPatchJump == 1)
5197 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5198 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5199 return VINF_SUCCESS;
5200 }
5201
5202 return VERR_PATCH_NOT_FOUND;
5203}
5204
5205/**
5206 * Disable patch for privileged instruction at specified location
5207 *
5208 * @returns VBox status code.
5209 * @param pVM Pointer to the VM.
5210 * @param pInstr Guest context point to privileged instruction
5211 *
5212 * @note returns failure if patching is not allowed or possible
5213 *
5214 */
5215VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5216{
5217 PPATMPATCHREC pPatchRec;
5218 PPATCHINFO pPatch;
5219
5220 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5221 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5222 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5223 if (pPatchRec)
5224 {
5225 int rc = VINF_SUCCESS;
5226
5227 pPatch = &pPatchRec->patch;
5228
5229 /* Already disabled? */
5230 if (pPatch->uState == PATCH_DISABLED)
5231 return VINF_SUCCESS;
5232
5233 /* Clear the IDT entries for the patch we're disabling. */
5234 /* Note: very important as we clear IF in the patch itself */
5235 /** @todo this needs to be changed */
5236 if (pPatch->flags & PATMFL_IDTHANDLER)
5237 {
5238 uint32_t iGate;
5239
5240 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5241 if (iGate != (uint32_t)~0)
5242 {
5243 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5244 if (++cIDTHandlersDisabled < 256)
5245 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5246 }
5247 }
5248
5249 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5250 if ( pPatch->pPatchBlockOffset
5251 && pPatch->uState == PATCH_ENABLED)
5252 {
5253 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5254 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5255 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5256 }
5257
5258 /* IDT or function patches haven't changed any guest code. */
5259 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5260 {
5261 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5262 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5263
5264 if (pPatch->uState != PATCH_REFUSED)
5265 {
5266 uint8_t temp[16];
5267
5268 Assert(pPatch->cbPatchJump < sizeof(temp));
5269
5270 /* Let's first check if the guest code is still the same. */
5271 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5272 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5273 if (rc == VINF_SUCCESS)
5274 {
5275 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5276
5277 if ( temp[0] != 0xE9 /* jmp opcode */
5278 || *(RTRCINTPTR *)(&temp[1]) != displ
5279 )
5280 {
5281 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5282 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5283 /* Remove it completely */
5284 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5285 rc = PATMR3RemovePatch(pVM, pInstrGC);
5286 AssertRC(rc);
5287 return VWRN_PATCH_REMOVED;
5288 }
5289 patmRemoveJumpToPatch(pVM, pPatch);
5290 }
5291 else
5292 {
5293 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5294 pPatch->uState = PATCH_DISABLE_PENDING;
5295 }
5296 }
5297 else
5298 {
5299 AssertMsgFailed(("Patch was refused!\n"));
5300 return VERR_PATCH_ALREADY_DISABLED;
5301 }
5302 }
5303 else
5304 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5305 {
5306 uint8_t temp[16];
5307
5308 Assert(pPatch->cbPatchJump < sizeof(temp));
5309
5310 /* Let's first check if the guest code is still the same. */
5311 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5312 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5313 if (rc == VINF_SUCCESS)
5314 {
5315 if (temp[0] != 0xCC)
5316 {
5317 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5318 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5319 /* Remove it completely */
5320 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5321 rc = PATMR3RemovePatch(pVM, pInstrGC);
5322 AssertRC(rc);
5323 return VWRN_PATCH_REMOVED;
5324 }
5325 patmDeactivateInt3Patch(pVM, pPatch);
5326 }
5327 }
5328
5329 if (rc == VINF_SUCCESS)
5330 {
5331 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5332 if (pPatch->uState == PATCH_DISABLE_PENDING)
5333 {
5334 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5335 pPatch->uState = PATCH_UNUSABLE;
5336 }
5337 else
5338 if (pPatch->uState != PATCH_DIRTY)
5339 {
5340 pPatch->uOldState = pPatch->uState;
5341 pPatch->uState = PATCH_DISABLED;
5342 }
5343 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5344 }
5345
5346 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5347 return VINF_SUCCESS;
5348 }
5349 Log(("Patch not found!\n"));
5350 return VERR_PATCH_NOT_FOUND;
5351}
5352
5353/**
5354 * Permanently disable patch for privileged instruction at specified location
5355 *
5356 * @returns VBox status code.
5357 * @param pVM Pointer to the VM.
5358 * @param pInstr Guest context instruction pointer
5359 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5360 * @param pConflictPatch Conflicting patch
5361 *
5362 */
5363static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5364{
5365 NOREF(pConflictAddr);
5366#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5367 PATCHINFO patch;
5368 DISCPUSTATE cpu;
5369 R3PTRTYPE(uint8_t *) pInstrHC;
5370 uint32_t cbInstr;
5371 bool disret;
5372 int rc;
5373
5374 RT_ZERO(patch);
5375 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5376 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5377 /*
5378 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5379 * with one that jumps right into the conflict patch.
5380 * Otherwise we must disable the conflicting patch to avoid serious problems.
5381 */
5382 if ( disret == true
5383 && (pConflictPatch->flags & PATMFL_CODE32)
5384 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5385 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5386 {
5387 /* Hint patches must be enabled first. */
5388 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5389 {
5390 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5391 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5392 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5393 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5394 /* Enabling might fail if the patched code has changed in the meantime. */
5395 if (rc != VINF_SUCCESS)
5396 return rc;
5397 }
5398
5399 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5400 if (RT_SUCCESS(rc))
5401 {
5402 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5403 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5404 return VINF_SUCCESS;
5405 }
5406 }
5407#endif
5408
5409 if (pConflictPatch->opcode == OP_CLI)
5410 {
5411 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5412 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5413 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5414 if (rc == VWRN_PATCH_REMOVED)
5415 return VINF_SUCCESS;
5416 if (RT_SUCCESS(rc))
5417 {
5418 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5419 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5420 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5421 if (rc == VERR_PATCH_NOT_FOUND)
5422 return VINF_SUCCESS; /* removed already */
5423
5424 AssertRC(rc);
5425 if (RT_SUCCESS(rc))
5426 {
5427 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5428 return VINF_SUCCESS;
5429 }
5430 }
5431 /* else turned into unusable patch (see below) */
5432 }
5433 else
5434 {
5435 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5436 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5437 if (rc == VWRN_PATCH_REMOVED)
5438 return VINF_SUCCESS;
5439 }
5440
5441 /* No need to monitor the code anymore. */
5442 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5443 {
5444 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5445 AssertRC(rc);
5446 }
5447 pConflictPatch->uState = PATCH_UNUSABLE;
5448 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5449 return VERR_PATCH_DISABLED;
5450}
5451
5452/**
5453 * Enable patch for privileged instruction at specified location
5454 *
5455 * @returns VBox status code.
5456 * @param pVM Pointer to the VM.
5457 * @param pInstr Guest context point to privileged instruction
5458 *
5459 * @note returns failure if patching is not allowed or possible
5460 *
5461 */
5462VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5463{
5464 PPATMPATCHREC pPatchRec;
5465 PPATCHINFO pPatch;
5466
5467 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5468 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5469 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5470 if (pPatchRec)
5471 {
5472 int rc = VINF_SUCCESS;
5473
5474 pPatch = &pPatchRec->patch;
5475
5476 if (pPatch->uState == PATCH_DISABLED)
5477 {
5478 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5479 {
5480 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5481 uint8_t temp[16];
5482
5483 Assert(pPatch->cbPatchJump < sizeof(temp));
5484
5485 /* Let's first check if the guest code is still the same. */
5486 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5487 AssertRC(rc2);
5488 if (rc2 == VINF_SUCCESS)
5489 {
5490 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5491 {
5492 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5493 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5494 /* Remove it completely */
5495 rc = PATMR3RemovePatch(pVM, pInstrGC);
5496 AssertRC(rc);
5497 return VERR_PATCH_NOT_FOUND;
5498 }
5499
5500 PATMP2GLOOKUPREC cacheRec;
5501 RT_ZERO(cacheRec);
5502 cacheRec.pPatch = pPatch;
5503
5504 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5505 /* Free leftover lock if any. */
5506 if (cacheRec.Lock.pvMap)
5507 {
5508 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5509 cacheRec.Lock.pvMap = NULL;
5510 }
5511 AssertRC(rc2);
5512 if (RT_FAILURE(rc2))
5513 return rc2;
5514
5515#ifdef DEBUG
5516 {
5517 DISCPUSTATE cpu;
5518 char szOutput[256];
5519 uint32_t cbInstr;
5520 uint32_t i = 0;
5521 bool disret;
5522 while(i < pPatch->cbPatchJump)
5523 {
5524 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5525 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5526 Log(("Renewed patch instr: %s", szOutput));
5527 i += cbInstr;
5528 }
5529 }
5530#endif
5531 }
5532 }
5533 else
5534 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5535 {
5536 uint8_t temp[16];
5537
5538 Assert(pPatch->cbPatchJump < sizeof(temp));
5539
5540 /* Let's first check if the guest code is still the same. */
5541 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5542 AssertRC(rc2);
5543
5544 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5545 {
5546 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5547 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5548 rc = PATMR3RemovePatch(pVM, pInstrGC);
5549 AssertRC(rc);
5550 return VERR_PATCH_NOT_FOUND;
5551 }
5552
5553 rc2 = patmActivateInt3Patch(pVM, pPatch);
5554 if (RT_FAILURE(rc2))
5555 return rc2;
5556 }
5557
5558 pPatch->uState = pPatch->uOldState; //restore state
5559
5560 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5561 if (pPatch->pPatchBlockOffset)
5562 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5563
5564 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5565 }
5566 else
5567 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5568
5569 return rc;
5570 }
5571 return VERR_PATCH_NOT_FOUND;
5572}
5573
5574/**
5575 * Remove patch for privileged instruction at specified location
5576 *
5577 * @returns VBox status code.
5578 * @param pVM Pointer to the VM.
5579 * @param pPatchRec Patch record
5580 * @param fForceRemove Remove *all* patches
5581 */
5582int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5583{
5584 PPATCHINFO pPatch;
5585
5586 pPatch = &pPatchRec->patch;
5587
5588 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5589 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5590 {
5591 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5592 return VERR_ACCESS_DENIED;
5593 }
5594 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5595
5596 /* Note: NEVER EVER REUSE PATCH MEMORY */
5597 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5598
5599 if (pPatchRec->patch.pPatchBlockOffset)
5600 {
5601 PAVLOU32NODECORE pNode;
5602
5603 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5604 Assert(pNode);
5605 }
5606
5607 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5608 {
5609 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5610 AssertRC(rc);
5611 }
5612
5613#ifdef VBOX_WITH_STATISTICS
5614 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5615 {
5616 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5617 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5618 }
5619#endif
5620
5621 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5622 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5623 pPatch->nrPatch2GuestRecs = 0;
5624 Assert(pPatch->Patch2GuestAddrTree == 0);
5625
5626 patmEmptyTree(pVM, &pPatch->FixupTree);
5627 pPatch->nrFixups = 0;
5628 Assert(pPatch->FixupTree == 0);
5629
5630 if (pPatchRec->patch.pTempInfo)
5631 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5632
5633 /* Note: might fail, because it has already been removed (e.g. during reset). */
5634 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5635
5636 /* Free the patch record */
5637 MMHyperFree(pVM, pPatchRec);
5638 return VINF_SUCCESS;
5639}
5640
5641/**
5642 * RTAvlU32DoWithAll() worker.
5643 * Checks whether the current trampoline instruction is the jump to the target patch
5644 * and updates the displacement to jump to the new target.
5645 *
5646 * @returns VBox status code.
5647 * @retval VERR_ALREADY_EXISTS if the jump was found.
5648 * @param pNode The current patch to guest record to check.
5649 * @param pvUser The refresh state.
5650 */
5651static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5652{
5653 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5654 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5655 PVM pVM = pRefreshPatchState->pVM;
5656
5657 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5658
5659 /*
5660 * Check if the patch instruction starts with a jump.
5661 * ASSUMES that there is no other patch to guest record that starts
5662 * with a jump.
5663 */
5664 if (*pPatchInstr == 0xE9)
5665 {
5666 /* Jump found, update the displacement. */
5667 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5668 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5669 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5670
5671 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5672 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5673
5674 *(uint32_t *)&pPatchInstr[1] = displ;
5675 return VERR_ALREADY_EXISTS; /** @todo better return code */
5676 }
5677
5678 return VINF_SUCCESS;
5679}
5680
5681/**
5682 * Attempt to refresh the patch by recompiling its entire code block
5683 *
5684 * @returns VBox status code.
5685 * @param pVM Pointer to the VM.
5686 * @param pPatchRec Patch record
5687 */
5688int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5689{
5690 PPATCHINFO pPatch;
5691 int rc;
5692 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5693 PTRAMPREC pTrampolinePatchesHead = NULL;
5694
5695 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5696
5697 pPatch = &pPatchRec->patch;
5698 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5699 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5700 {
5701 if (!pPatch->pTrampolinePatchesHead)
5702 {
5703 /*
5704 * It is sometimes possible that there are trampoline patches to this patch
5705 * but they are not recorded (after a saved state load for example).
5706 * Refuse to refresh those patches.
5707 * Can hurt performance in theory if the patched code is modified by the guest
5708 * and is executed often. However most of the time states are saved after the guest
5709 * code was modified and is not updated anymore afterwards so this shouldn't be a
5710 * big problem.
5711 */
5712 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5713 return VERR_PATCHING_REFUSED;
5714 }
5715 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5716 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5717 }
5718
5719 /* Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5720
5721 rc = PATMR3DisablePatch(pVM, pInstrGC);
5722 AssertRC(rc);
5723
5724 /* Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5725 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5726#ifdef VBOX_WITH_STATISTICS
5727 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5728 {
5729 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5730 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5731 }
5732#endif
5733
5734 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5735
5736 /* Attempt to install a new patch. */
5737 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5738 if (RT_SUCCESS(rc))
5739 {
5740 RTRCPTR pPatchTargetGC;
5741 PPATMPATCHREC pNewPatchRec;
5742
5743 /* Determine target address in new patch */
5744 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5745 Assert(pPatchTargetGC);
5746 if (!pPatchTargetGC)
5747 {
5748 rc = VERR_PATCHING_REFUSED;
5749 goto failure;
5750 }
5751
5752 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5753 pPatch->uCurPatchOffset = 0;
5754
5755 /* insert jump to new patch in old patch block */
5756 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5757 if (RT_FAILURE(rc))
5758 goto failure;
5759
5760 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5761 Assert(pNewPatchRec); /* can't fail */
5762
5763 /* Remove old patch (only do that when everything is finished) */
5764 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5765 AssertRC(rc2);
5766
5767 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5768 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5769 Assert(fInserted); NOREF(fInserted);
5770
5771 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5772 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5773
5774 /* Used by another patch, so don't remove it! */
5775 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5776
5777 if (pTrampolinePatchesHead)
5778 {
5779 /* Update all trampoline patches to jump to the new patch. */
5780 PTRAMPREC pTrampRec = NULL;
5781 PATMREFRESHPATCH RefreshPatch;
5782
5783 RefreshPatch.pVM = pVM;
5784 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5785
5786 pTrampRec = pTrampolinePatchesHead;
5787
5788 while (pTrampRec)
5789 {
5790 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5791
5792 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5793 /*
5794 * We have to find the right patch2guest record because there might be others
5795 * for statistics.
5796 */
5797 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5798 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5799 Assert(rc == VERR_ALREADY_EXISTS);
5800 rc = VINF_SUCCESS;
5801 pTrampRec = pTrampRec->pNext;
5802 }
5803 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5804 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5805 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5806 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5807 }
5808 }
5809
5810failure:
5811 if (RT_FAILURE(rc))
5812 {
5813 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5814
5815 /* Remove the new inactive patch */
5816 rc = PATMR3RemovePatch(pVM, pInstrGC);
5817 AssertRC(rc);
5818
5819 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5820 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5821 Assert(fInserted); NOREF(fInserted);
5822
5823 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5824 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5825 AssertRC(rc2);
5826
5827 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5828 }
5829 return rc;
5830}
5831
5832/**
5833 * Find patch for privileged instruction at specified location
5834 *
5835 * @returns Patch structure pointer if found; else NULL
5836 * @param pVM Pointer to the VM.
5837 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5838 * @param fIncludeHints Include hinted patches or not
5839 *
5840 */
5841PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5842{
5843 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5844 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5845 if (pPatchRec)
5846 {
5847 if ( pPatchRec->patch.uState == PATCH_ENABLED
5848 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5849 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5850 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5851 {
5852 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5853 return &pPatchRec->patch;
5854 }
5855 else
5856 if ( fIncludeHints
5857 && pPatchRec->patch.uState == PATCH_DISABLED
5858 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5859 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5860 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5861 {
5862 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5863 return &pPatchRec->patch;
5864 }
5865 }
5866 return NULL;
5867}
5868
5869/**
5870 * Checks whether the GC address is inside a generated patch jump
5871 *
5872 * @returns true -> yes, false -> no
5873 * @param pVM Pointer to the VM.
5874 * @param pAddr Guest context address.
5875 * @param pPatchAddr Guest context patch address (if true).
5876 */
5877VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5878{
5879 RTRCPTR addr;
5880 PPATCHINFO pPatch;
5881
5882 Assert(!HMIsEnabled(pVM));
5883 if (PATMIsEnabled(pVM) == false)
5884 return false;
5885
5886 if (pPatchAddr == NULL)
5887 pPatchAddr = &addr;
5888
5889 *pPatchAddr = 0;
5890
5891 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5892 if (pPatch)
5893 *pPatchAddr = pPatch->pPrivInstrGC;
5894
5895 return *pPatchAddr == 0 ? false : true;
5896}
5897
5898/**
5899 * Remove patch for privileged instruction at specified location
5900 *
5901 * @returns VBox status code.
5902 * @param pVM Pointer to the VM.
5903 * @param pInstr Guest context point to privileged instruction
5904 *
5905 * @note returns failure if patching is not allowed or possible
5906 *
5907 */
5908VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5909{
5910 PPATMPATCHREC pPatchRec;
5911
5912 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5913 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5914 if (pPatchRec)
5915 {
5916 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5917 if (rc == VWRN_PATCH_REMOVED)
5918 return VINF_SUCCESS;
5919
5920 return patmR3RemovePatch(pVM, pPatchRec, false);
5921 }
5922 AssertFailed();
5923 return VERR_PATCH_NOT_FOUND;
5924}
5925
5926/**
5927 * Mark patch as dirty
5928 *
5929 * @returns VBox status code.
5930 * @param pVM Pointer to the VM.
5931 * @param pPatch Patch record
5932 *
5933 * @note returns failure if patching is not allowed or possible
5934 *
5935 */
5936static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5937{
5938 if (pPatch->pPatchBlockOffset)
5939 {
5940 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5941 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5942 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5943 }
5944
5945 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5946 /* Put back the replaced instruction. */
5947 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5948 if (rc == VWRN_PATCH_REMOVED)
5949 return VINF_SUCCESS;
5950
5951 /* Note: we don't restore patch pages for patches that are not enabled! */
5952 /* Note: be careful when changing this behaviour!! */
5953
5954 /* The patch pages are no longer marked for self-modifying code detection */
5955 if (pPatch->flags & PATMFL_CODE_MONITORED)
5956 {
5957 rc = patmRemovePatchPages(pVM, pPatch);
5958 AssertRCReturn(rc, rc);
5959 }
5960 pPatch->uState = PATCH_DIRTY;
5961
5962 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5963 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5964
5965 return VINF_SUCCESS;
5966}
5967
5968/**
5969 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5970 *
5971 * @returns VBox status code.
5972 * @param pVM Pointer to the VM.
5973 * @param pPatch Patch block structure pointer
5974 * @param pPatchGC GC address in patch block
5975 */
5976RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5977{
5978 Assert(pPatch->Patch2GuestAddrTree);
5979 /* Get the closest record from below. */
5980 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5981 if (pPatchToGuestRec)
5982 return pPatchToGuestRec->pOrgInstrGC;
5983
5984 return 0;
5985}
5986
5987/**
5988 * Converts Guest code GC ptr to Patch code GC ptr (if found)
5989 *
5990 * @returns corresponding GC pointer in patch block
5991 * @param pVM Pointer to the VM.
5992 * @param pPatch Current patch block pointer
5993 * @param pInstrGC Guest context pointer to privileged instruction
5994 *
5995 */
5996RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5997{
5998 if (pPatch->Guest2PatchAddrTree)
5999 {
6000 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
6001 if (pGuestToPatchRec)
6002 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6003 }
6004
6005 return 0;
6006}
6007
6008/**
6009 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6010 *
6011 * @returns corresponding GC pointer in patch block
6012 * @param pVM Pointer to the VM.
6013 * @param pInstrGC Guest context pointer to privileged instruction
6014 */
6015static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
6016{
6017 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6018 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6019 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6020 return NIL_RTRCPTR;
6021}
6022
6023/**
6024 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6025 * identical match)
6026 *
6027 * @returns corresponding GC pointer in patch block
6028 * @param pVM Pointer to the VM.
6029 * @param pPatch Current patch block pointer
6030 * @param pInstrGC Guest context pointer to privileged instruction
6031 *
6032 */
6033RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6034{
6035 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6036 if (pGuestToPatchRec)
6037 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6038 return NIL_RTRCPTR;
6039}
6040
6041/**
6042 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6043 *
6044 * @returns original GC instruction pointer or 0 if not found
6045 * @param pVM Pointer to the VM.
6046 * @param pPatchGC GC address in patch block
6047 * @param pEnmState State of the translated address (out)
6048 *
6049 */
6050VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6051{
6052 PPATMPATCHREC pPatchRec;
6053 void *pvPatchCoreOffset;
6054 RTRCPTR pPrivInstrGC;
6055
6056 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6057 Assert(!HMIsEnabled(pVM));
6058 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6059 if (pvPatchCoreOffset == 0)
6060 {
6061 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6062 return 0;
6063 }
6064 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6065 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6066 if (pEnmState)
6067 {
6068 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6069 || pPatchRec->patch.uState == PATCH_DIRTY
6070 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6071 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6072 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6073
6074 if ( !pPrivInstrGC
6075 || pPatchRec->patch.uState == PATCH_UNUSABLE
6076 || pPatchRec->patch.uState == PATCH_REFUSED)
6077 {
6078 pPrivInstrGC = 0;
6079 *pEnmState = PATMTRANS_FAILED;
6080 }
6081 else
6082 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6083 {
6084 *pEnmState = PATMTRANS_INHIBITIRQ;
6085 }
6086 else
6087 if ( pPatchRec->patch.uState == PATCH_ENABLED
6088 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6089 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6090 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6091 {
6092 *pEnmState = PATMTRANS_OVERWRITTEN;
6093 }
6094 else
6095 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6096 {
6097 *pEnmState = PATMTRANS_OVERWRITTEN;
6098 }
6099 else
6100 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6101 {
6102 *pEnmState = PATMTRANS_PATCHSTART;
6103 }
6104 else
6105 *pEnmState = PATMTRANS_SAFE;
6106 }
6107 return pPrivInstrGC;
6108}
6109
6110/**
6111 * Returns the GC pointer of the patch for the specified GC address
6112 *
6113 * @returns VBox status code.
6114 * @param pVM Pointer to the VM.
6115 * @param pAddrGC Guest context address
6116 */
6117VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6118{
6119 PPATMPATCHREC pPatchRec;
6120
6121 Assert(!HMIsEnabled(pVM));
6122
6123 /* Find the patch record. */
6124 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6125 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6126 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6127 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6128 return NIL_RTRCPTR;
6129}
6130
6131/**
6132 * Attempt to recover dirty instructions
6133 *
6134 * @returns VBox status code.
6135 * @param pVM Pointer to the VM.
6136 * @param pCtx Pointer to the guest CPU context.
6137 * @param pPatch Patch record.
6138 * @param pPatchToGuestRec Patch to guest address record.
6139 * @param pEip GC pointer of trapping instruction.
6140 */
6141static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6142{
6143 DISCPUSTATE CpuOld, CpuNew;
6144 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6145 int rc;
6146 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6147 uint32_t cbDirty;
6148 PRECPATCHTOGUEST pRec;
6149 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6150 PVMCPU pVCpu = VMMGetCpu0(pVM);
6151 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6152
6153 pRec = pPatchToGuestRec;
6154 pCurInstrGC = pOrgInstrGC;
6155 pCurPatchInstrGC = pEip;
6156 cbDirty = 0;
6157 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6158
6159 /* Find all adjacent dirty instructions */
6160 while (true)
6161 {
6162 if (pRec->fJumpTarget)
6163 {
6164 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6165 pRec->fDirty = false;
6166 return VERR_PATCHING_REFUSED;
6167 }
6168
6169 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6170 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6171 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6172
6173 /* Only harmless instructions are acceptable. */
6174 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6175 if ( RT_FAILURE(rc)
6176 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6177 {
6178 if (RT_SUCCESS(rc))
6179 cbDirty += CpuOld.cbInstr;
6180 else
6181 if (!cbDirty)
6182 cbDirty = 1;
6183 break;
6184 }
6185
6186#ifdef DEBUG
6187 char szBuf[256];
6188 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6189 szBuf, sizeof(szBuf), NULL);
6190 Log(("DIRTY: %s\n", szBuf));
6191#endif
6192 /* Mark as clean; if we fail we'll let it always fault. */
6193 pRec->fDirty = false;
6194
6195 /* Remove old lookup record. */
6196 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6197 pPatchToGuestRec = NULL;
6198
6199 pCurPatchInstrGC += CpuOld.cbInstr;
6200 cbDirty += CpuOld.cbInstr;
6201
6202 /* Let's see if there's another dirty instruction right after. */
6203 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6204 if (!pRec || !pRec->fDirty)
6205 break; /* no more dirty instructions */
6206
6207 /* In case of complex instructions the next guest instruction could be quite far off. */
6208 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6209 }
6210
6211 if ( RT_SUCCESS(rc)
6212 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6213 )
6214 {
6215 uint32_t cbLeft;
6216
6217 pCurPatchInstrHC = pPatchInstrHC;
6218 pCurPatchInstrGC = pEip;
6219 cbLeft = cbDirty;
6220
6221 while (cbLeft && RT_SUCCESS(rc))
6222 {
6223 bool fValidInstr;
6224
6225 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6226
6227 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6228 if ( !fValidInstr
6229 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6230 )
6231 {
6232 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6233
6234 if ( pTargetGC >= pOrgInstrGC
6235 && pTargetGC <= pOrgInstrGC + cbDirty
6236 )
6237 {
6238 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6239 fValidInstr = true;
6240 }
6241 }
6242
6243 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6244 if ( rc == VINF_SUCCESS
6245 && CpuNew.cbInstr <= cbLeft /* must still fit */
6246 && fValidInstr
6247 )
6248 {
6249#ifdef DEBUG
6250 char szBuf[256];
6251 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6252 szBuf, sizeof(szBuf), NULL);
6253 Log(("NEW: %s\n", szBuf));
6254#endif
6255
6256 /* Copy the new instruction. */
6257 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6258 AssertRC(rc);
6259
6260 /* Add a new lookup record for the duplicated instruction. */
6261 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6262 }
6263 else
6264 {
6265#ifdef DEBUG
6266 char szBuf[256];
6267 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6268 szBuf, sizeof(szBuf), NULL);
6269 Log(("NEW: %s (FAILED)\n", szBuf));
6270#endif
6271 /* Restore the old lookup record for the duplicated instruction. */
6272 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6273
6274 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6275 rc = VERR_PATCHING_REFUSED;
6276 break;
6277 }
6278 pCurInstrGC += CpuNew.cbInstr;
6279 pCurPatchInstrHC += CpuNew.cbInstr;
6280 pCurPatchInstrGC += CpuNew.cbInstr;
6281 cbLeft -= CpuNew.cbInstr;
6282
6283 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6284 if (!cbLeft)
6285 {
6286 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6287 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6288 {
6289 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6290 if (pRec)
6291 {
6292 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6293 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6294
6295 Assert(!pRec->fDirty);
6296
6297 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6298 if (cbFiller >= SIZEOF_NEARJUMP32)
6299 {
6300 pPatchFillHC[0] = 0xE9;
6301 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6302#ifdef DEBUG
6303 char szBuf[256];
6304 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6305 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6306 Log(("FILL: %s\n", szBuf));
6307#endif
6308 }
6309 else
6310 {
6311 for (unsigned i = 0; i < cbFiller; i++)
6312 {
6313 pPatchFillHC[i] = 0x90; /* NOP */
6314#ifdef DEBUG
6315 char szBuf[256];
6316 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6317 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6318 Log(("FILL: %s\n", szBuf));
6319#endif
6320 }
6321 }
6322 }
6323 }
6324 }
6325 }
6326 }
6327 else
6328 rc = VERR_PATCHING_REFUSED;
6329
6330 if (RT_SUCCESS(rc))
6331 {
6332 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6333 }
6334 else
6335 {
6336 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6337 Assert(cbDirty);
6338
6339 /* Mark the whole instruction stream with breakpoints. */
6340 if (cbDirty)
6341 memset(pPatchInstrHC, 0xCC, cbDirty);
6342
6343 if ( pVM->patm.s.fOutOfMemory == false
6344 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6345 {
6346 rc = patmR3RefreshPatch(pVM, pPatch);
6347 if (RT_FAILURE(rc))
6348 {
6349 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6350 }
6351 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6352 rc = VERR_PATCHING_REFUSED;
6353 }
6354 }
6355 return rc;
6356}
6357
6358/**
6359 * Handle trap inside patch code
6360 *
6361 * @returns VBox status code.
6362 * @param pVM Pointer to the VM.
6363 * @param pCtx Pointer to the guest CPU context.
6364 * @param pEip GC pointer of trapping instruction.
6365 * @param ppNewEip GC pointer to new instruction.
6366 */
6367VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6368{
6369 PPATMPATCHREC pPatch = 0;
6370 void *pvPatchCoreOffset;
6371 RTRCUINTPTR offset;
6372 RTRCPTR pNewEip;
6373 int rc ;
6374 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6375 PVMCPU pVCpu = VMMGetCpu0(pVM);
6376
6377 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6378 Assert(pVM->cCpus == 1);
6379
6380 pNewEip = 0;
6381 *ppNewEip = 0;
6382
6383 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6384
6385 /* Find the patch record. */
6386 /* Note: there might not be a patch to guest translation record (global function) */
6387 offset = pEip - pVM->patm.s.pPatchMemGC;
6388 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6389 if (pvPatchCoreOffset)
6390 {
6391 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6392
6393 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6394
6395 if (pPatch->patch.uState == PATCH_DIRTY)
6396 {
6397 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6398 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6399 {
6400 /* Function duplication patches set fPIF to 1 on entry */
6401 pVM->patm.s.pGCStateHC->fPIF = 1;
6402 }
6403 }
6404 else
6405 if (pPatch->patch.uState == PATCH_DISABLED)
6406 {
6407 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6408 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6409 {
6410 /* Function duplication patches set fPIF to 1 on entry */
6411 pVM->patm.s.pGCStateHC->fPIF = 1;
6412 }
6413 }
6414 else
6415 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6416 {
6417 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6418
6419 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6420 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6421 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6422 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6423 }
6424
6425 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6426 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6427
6428 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6429 pPatch->patch.cTraps++;
6430 PATM_STAT_FAULT_INC(&pPatch->patch);
6431 }
6432 else
6433 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6434
6435 /* Check if we were interrupted in PATM generated instruction code. */
6436 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6437 {
6438 DISCPUSTATE Cpu;
6439 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6440 AssertRC(rc);
6441
6442 if ( rc == VINF_SUCCESS
6443 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6444 || Cpu.pCurInstr->uOpcode == OP_PUSH
6445 || Cpu.pCurInstr->uOpcode == OP_CALL)
6446 )
6447 {
6448 uint64_t fFlags;
6449
6450 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6451
6452 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6453 {
6454 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6455 if ( rc == VINF_SUCCESS
6456 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6457 {
6458 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6459
6460 /* Reset the PATM stack. */
6461 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6462
6463 pVM->patm.s.pGCStateHC->fPIF = 1;
6464
6465 Log(("Faulting push -> go back to the original instruction\n"));
6466
6467 /* continue at the original instruction */
6468 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6469 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6470 return VINF_SUCCESS;
6471 }
6472 }
6473
6474 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6475 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6476 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6477 if (rc == VINF_SUCCESS)
6478 {
6479 /* The guest page *must* be present. */
6480 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6481 if ( rc == VINF_SUCCESS
6482 && (fFlags & X86_PTE_P))
6483 {
6484 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6485 return VINF_PATCH_CONTINUE;
6486 }
6487 }
6488 }
6489 else
6490 if (pPatch->patch.pPrivInstrGC == pNewEip)
6491 {
6492 /* Invalidated patch or first instruction overwritten.
6493 * We can ignore the fPIF state in this case.
6494 */
6495 /* Reset the PATM stack. */
6496 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6497
6498 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6499
6500 pVM->patm.s.pGCStateHC->fPIF = 1;
6501
6502 /* continue at the original instruction */
6503 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6504 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6505 return VINF_SUCCESS;
6506 }
6507
6508 char szBuf[256];
6509 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6510
6511 /* Very bad. We crashed in emitted code. Probably stack? */
6512 if (pPatch)
6513 {
6514 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6515 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n",
6516 pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags,
6517 pPatchToGuestRec->fDirty, szBuf));
6518 }
6519 else
6520 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6521 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6522 EMR3FatalError(pVCpu, VERR_PATM_IPE_TRAP_IN_PATCH_CODE);
6523 }
6524
6525 /* From here on, we must have a valid patch to guest translation. */
6526 if (pvPatchCoreOffset == 0)
6527 {
6528 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6529 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6530 return VERR_PATCH_NOT_FOUND;
6531 }
6532
6533 /* Take care of dirty/changed instructions. */
6534 if (pPatchToGuestRec->fDirty)
6535 {
6536 Assert(pPatchToGuestRec->Core.Key == offset);
6537 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6538
6539 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6540 if (RT_SUCCESS(rc))
6541 {
6542 /* Retry the current instruction. */
6543 pNewEip = pEip;
6544 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6545 }
6546 else
6547 {
6548 /* Reset the PATM stack. */
6549 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6550
6551 rc = VINF_SUCCESS; /* Continue at original instruction. */
6552 }
6553
6554 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6555 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6556 return rc;
6557 }
6558
6559#ifdef VBOX_STRICT
6560 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6561 {
6562 DISCPUSTATE cpu;
6563 bool disret;
6564 uint32_t cbInstr;
6565 PATMP2GLOOKUPREC cacheRec;
6566 RT_ZERO(cacheRec);
6567 cacheRec.pPatch = &pPatch->patch;
6568
6569 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6570 &cpu, &cbInstr);
6571 if (cacheRec.Lock.pvMap)
6572 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6573
6574 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6575 {
6576 RTRCPTR retaddr;
6577 PCPUMCTX pCtx2;
6578
6579 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6580
6581 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6582 AssertRC(rc);
6583
6584 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6585 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6586 }
6587 }
6588#endif
6589
6590 /* Return original address, correct by subtracting the CS base address. */
6591 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6592
6593 /* Reset the PATM stack. */
6594 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6595
6596 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6597 {
6598 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6599 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6600#ifdef VBOX_STRICT
6601 DISCPUSTATE cpu;
6602 bool disret;
6603 uint32_t cbInstr;
6604 PATMP2GLOOKUPREC cacheRec;
6605 RT_ZERO(cacheRec);
6606 cacheRec.pPatch = &pPatch->patch;
6607
6608 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6609 &cpu, &cbInstr);
6610 if (cacheRec.Lock.pvMap)
6611 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6612
6613 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6614 {
6615 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6616 &cpu, &cbInstr);
6617 if (cacheRec.Lock.pvMap)
6618 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6619
6620 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6621 }
6622#endif
6623 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6624 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6625 }
6626
6627 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6628 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6629 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6630 {
6631 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6632 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6633 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6634 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6635 return VERR_PATCH_DISABLED;
6636 }
6637
6638#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6639 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6640 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6641 {
6642 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6643 //we are only wasting time, back out the patch
6644 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6645 pTrapRec->pNextPatchInstr = 0;
6646 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6647 return VERR_PATCH_DISABLED;
6648 }
6649#endif
6650
6651 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6652 return VINF_SUCCESS;
6653}
6654
6655
6656/**
6657 * Handle page-fault in monitored page
6658 *
6659 * @returns VBox status code.
6660 * @param pVM Pointer to the VM.
6661 */
6662VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6663{
6664 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6665 PVMCPU pVCpu = VMMGetCpu0(pVM);
6666
6667 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6668 addr &= PAGE_BASE_GC_MASK;
6669
6670 int rc = PGMHandlerVirtualDeregister(pVM, pVCpu, addr, false /*fHypervisor*/);
6671 AssertRC(rc); NOREF(rc);
6672
6673 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6674 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6675 {
6676 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6677 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6678 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6679 if (rc == VWRN_PATCH_REMOVED)
6680 return VINF_SUCCESS;
6681
6682 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6683
6684 if (addr == pPatchRec->patch.pPrivInstrGC)
6685 addr++;
6686 }
6687
6688 for(;;)
6689 {
6690 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6691
6692 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6693 break;
6694
6695 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6696 {
6697 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6698 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6699 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6700 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6701 }
6702 addr = pPatchRec->patch.pPrivInstrGC + 1;
6703 }
6704
6705 pVM->patm.s.pvFaultMonitor = 0;
6706 return VINF_SUCCESS;
6707}
6708
6709
6710#ifdef VBOX_WITH_STATISTICS
6711
6712static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6713{
6714 if (pPatch->flags & PATMFL_SYSENTER)
6715 {
6716 return "SYSENT";
6717 }
6718 else
6719 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6720 {
6721 static char szTrap[16];
6722 uint32_t iGate;
6723
6724 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6725 if (iGate < 256)
6726 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6727 else
6728 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6729 return szTrap;
6730 }
6731 else
6732 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6733 return "DUPFUNC";
6734 else
6735 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6736 return "FUNCCALL";
6737 else
6738 if (pPatch->flags & PATMFL_TRAMPOLINE)
6739 return "TRAMP";
6740 else
6741 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6742}
6743
6744static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6745{
6746 NOREF(pVM);
6747 switch(pPatch->uState)
6748 {
6749 case PATCH_ENABLED:
6750 return "ENA";
6751 case PATCH_DISABLED:
6752 return "DIS";
6753 case PATCH_DIRTY:
6754 return "DIR";
6755 case PATCH_UNUSABLE:
6756 return "UNU";
6757 case PATCH_REFUSED:
6758 return "REF";
6759 case PATCH_DISABLE_PENDING:
6760 return "DIP";
6761 default:
6762 AssertFailed();
6763 return " ";
6764 }
6765}
6766
6767/**
6768 * Resets the sample.
6769 * @param pVM Pointer to the VM.
6770 * @param pvSample The sample registered using STAMR3RegisterCallback.
6771 */
6772static void patmResetStat(PVM pVM, void *pvSample)
6773{
6774 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6775 Assert(pPatch);
6776
6777 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6778 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6779}
6780
6781/**
6782 * Prints the sample into the buffer.
6783 *
6784 * @param pVM Pointer to the VM.
6785 * @param pvSample The sample registered using STAMR3RegisterCallback.
6786 * @param pszBuf The buffer to print into.
6787 * @param cchBuf The size of the buffer.
6788 */
6789static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6790{
6791 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6792 Assert(pPatch);
6793
6794 Assert(pPatch->uState != PATCH_REFUSED);
6795 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6796
6797 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6798 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6799 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6800}
6801
6802/**
6803 * Returns the GC address of the corresponding patch statistics counter
6804 *
6805 * @returns Stat address
6806 * @param pVM Pointer to the VM.
6807 * @param pPatch Patch structure
6808 */
6809RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6810{
6811 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6812 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6813}
6814
6815#endif /* VBOX_WITH_STATISTICS */
6816#ifdef VBOX_WITH_DEBUGGER
6817
6818/**
6819 * The '.patmoff' command.
6820 *
6821 * @returns VBox status.
6822 * @param pCmd Pointer to the command descriptor (as registered).
6823 * @param pCmdHlp Pointer to command helper functions.
6824 * @param pVM Pointer to the current VM (if any).
6825 * @param paArgs Pointer to (readonly) array of arguments.
6826 * @param cArgs Number of arguments in the array.
6827 */
6828static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6829{
6830 /*
6831 * Validate input.
6832 */
6833 NOREF(cArgs); NOREF(paArgs);
6834 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6835 PVM pVM = pUVM->pVM;
6836 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6837
6838 if (HMIsEnabled(pVM))
6839 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6840
6841 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6842 PATMR3AllowPatching(pVM->pUVM, false);
6843 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6844}
6845
6846/**
6847 * The '.patmon' command.
6848 *
6849 * @returns VBox status.
6850 * @param pCmd Pointer to the command descriptor (as registered).
6851 * @param pCmdHlp Pointer to command helper functions.
6852 * @param pVM Pointer to the current VM (if any).
6853 * @param paArgs Pointer to (readonly) array of arguments.
6854 * @param cArgs Number of arguments in the array.
6855 */
6856static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6857{
6858 /*
6859 * Validate input.
6860 */
6861 NOREF(cArgs); NOREF(paArgs);
6862 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6863 PVM pVM = pUVM->pVM;
6864 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6865
6866 if (HMIsEnabled(pVM))
6867 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6868
6869 PATMR3AllowPatching(pVM->pUVM, true);
6870 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6871 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6872}
6873
6874#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette