VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp@ 39944

Last change on this file since 39944 was 39078, checked in by vboxsync, 13 years ago

VMM: -Wunused-parameter

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 57.1 KB
Line 
1/* $Id: PATMSSM.cpp 39078 2011-10-21 14:18:22Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/mm.h>
27#include <VBox/vmm/ssm.h>
28#include <VBox/param.h>
29#include <iprt/avl.h>
30#include "PATMInternal.h"
31#include "PATMPatch.h"
32#include "PATMA.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/csam.h>
35#include "internal/pgm.h"
36#include <VBox/dbg.h>
37#include <VBox/err.h>
38#include <VBox/log.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/string.h>
42#include <VBox/dis.h>
43#include <VBox/disopcode.h>
44
45/**
46 * Patch information - SSM version.
47 *
48 * the difference is the missing pTrampolinePatchesHead member
49 * to avoid changing the saved state version for now (will come later).
50 */
51typedef struct _PATCHINFOSSM
52{
53 uint32_t uState;
54 uint32_t uOldState;
55 DISCPUMODE uOpMode;
56
57 /* GC pointer of privileged instruction */
58 RCPTRTYPE(uint8_t *) pPrivInstrGC;
59 R3PTRTYPE(uint8_t *) unusedHC; /* todo Can't remove due to structure size dependencies in saved states. */
60 uint8_t aPrivInstr[MAX_INSTR_SIZE];
61 uint32_t cbPrivInstr;
62 uint32_t opcode; //opcode for priv instr (OP_*)
63 uint32_t cbPatchJump; //patch jump size
64
65 /* Only valid for PATMFL_JUMP_CONFLICT patches */
66 RTRCPTR pPatchJumpDestGC;
67
68 RTGCUINTPTR32 pPatchBlockOffset;
69 uint32_t cbPatchBlockSize;
70 uint32_t uCurPatchOffset;
71#if HC_ARCH_BITS == 64
72 uint32_t Alignment0; /**< Align flags correctly. */
73#endif
74
75 uint64_t flags;
76
77 /**
78 * Lowest and highest patched GC instruction address. To optimize searches.
79 */
80 RTRCPTR pInstrGCLowest;
81 RTRCPTR pInstrGCHighest;
82
83 /* Tree of fixup records for the patch. */
84 R3PTRTYPE(PAVLPVNODECORE) FixupTree;
85 uint32_t nrFixups;
86
87 /* Tree of jumps inside the generated patch code. */
88 uint32_t nrJumpRecs;
89 R3PTRTYPE(PAVLPVNODECORE) JumpTree;
90
91 /**
92 * Lookup trees for determining the corresponding guest address of an
93 * instruction in the patch block.
94 */
95 R3PTRTYPE(PAVLU32NODECORE) Patch2GuestAddrTree;
96 R3PTRTYPE(PAVLU32NODECORE) Guest2PatchAddrTree;
97 uint32_t nrPatch2GuestRecs;
98#if HC_ARCH_BITS == 64
99 uint32_t Alignment1;
100#endif
101
102 /* Unused, but can't remove due to structure size dependencies in the saved state. */
103 PATMP2GLOOKUPREC_OBSOLETE unused;
104
105 /* Temporary information during patch creation. Don't waste hypervisor memory for this. */
106 R3PTRTYPE(PPATCHINFOTEMP) pTempInfo;
107
108 /* Count the number of writes to the corresponding guest code. */
109 uint32_t cCodeWrites;
110
111 /* Count the number of invalid writes to pages monitored for the patch. */
112 //some statistics to determine if we should keep this patch activated
113 uint32_t cTraps;
114
115 uint32_t cInvalidWrites;
116
117 // Index into the uPatchRun and uPatchTrap arrays (0..MAX_PATCHES-1)
118 uint32_t uPatchIdx;
119
120 /* First opcode byte, that's overwritten when a patch is marked dirty. */
121 uint8_t bDirtyOpcode;
122 uint8_t Alignment2[7]; /**< Align the structure size on a 8-byte boundary. */
123} PATCHINFOSSM, *PPATCHINFOSSM;
124
125/**
126 * Lookup record for patches - SSM version.
127 */
128typedef struct PATMPATCHRECSSM
129{
130 /** The key is a GC virtual address. */
131 AVLOU32NODECORE Core;
132 /** The key is a patch offset. */
133 AVLOU32NODECORE CoreOffset;
134
135 PATCHINFOSSM patch;
136} PATMPATCHRECSSM, *PPATMPATCHRECSSM;
137
138/*******************************************************************************
139* Defined Constants And Macros *
140*******************************************************************************/
141#define PATM_SUBTRACT_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) - (uintptr_t)(b)
142#define PATM_ADD_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) + (uintptr_t)(b)
143
144/*******************************************************************************
145* Internal Functions *
146*******************************************************************************/
147static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup);
148
149/*******************************************************************************
150* Global Variables *
151*******************************************************************************/
152/**
153 * SSM descriptor table for the PATM structure.
154 */
155static SSMFIELD const g_aPatmFields[] =
156{
157 /** @todo there are a bunch more fields here which can be marked as ignored. */
158 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
159 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
160 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
161 SSMFIELD_ENTRY( PATM, cbPatchMem),
162 SSMFIELD_ENTRY( PATM, offPatchMem),
163 SSMFIELD_ENTRY( PATM, fOutOfMemory),
164 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
165 SSMFIELD_ENTRY( PATM, deltaReloc),
166 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
167 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
168 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
169 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
170 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
171 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
172 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
173 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
174 SSMFIELD_ENTRY( PATM, ulCallDepth),
175 SSMFIELD_ENTRY( PATM, cPageRecords),
176 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
177 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
178 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
179 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
180 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
181 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
182 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
183 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
184 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
185 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
186 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
187 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
188 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
189 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
190 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
191 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
192 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
193 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
194 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
195 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
196 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
197 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
198 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
199 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
200 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
201 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
202 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
203 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
204 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
205 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
206 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
207 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
208 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
209 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
210 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
211 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
212 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
213 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
214 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
215 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
216 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
217 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
218 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
219 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
220 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
221 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
222 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
223 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
224 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
225 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
226 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
227 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
228 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
229 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
230 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
231 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
232 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
233 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
234 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
235 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
236 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
237 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
238 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
239 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
240 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
241 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
242 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
243 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
244 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
245 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
246 SSMFIELD_ENTRY_TERM()
247};
248
249/**
250 * SSM descriptor table for the PATMGCSTATE structure.
251 */
252static SSMFIELD const g_aPatmGCStateFields[] =
253{
254 SSMFIELD_ENTRY( PATMGCSTATE, uVMFlags),
255 SSMFIELD_ENTRY( PATMGCSTATE, uPendingAction),
256 SSMFIELD_ENTRY( PATMGCSTATE, uPatchCalls),
257 SSMFIELD_ENTRY( PATMGCSTATE, uScratch),
258 SSMFIELD_ENTRY( PATMGCSTATE, uIretEFlags),
259 SSMFIELD_ENTRY( PATMGCSTATE, uIretCS),
260 SSMFIELD_ENTRY( PATMGCSTATE, uIretEIP),
261 SSMFIELD_ENTRY( PATMGCSTATE, Psp),
262 SSMFIELD_ENTRY( PATMGCSTATE, fPIF),
263 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCPtrInhibitInterrupts),
264 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallPatchTargetAddr),
265 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallReturnAddr),
266 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEAX),
267 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uECX),
268 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEDI),
269 SSMFIELD_ENTRY( PATMGCSTATE, Restore.eFlags),
270 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uFlags),
271 SSMFIELD_ENTRY_TERM()
272};
273
274/**
275 * SSM descriptor table for the PATMPATCHREC structure.
276 */
277static SSMFIELD const g_aPatmPatchRecFields[] =
278{
279 SSMFIELD_ENTRY( PATMPATCHRECSSM, Core.Key),
280 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pLeft),
281 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pRight),
282 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.uchHeight),
283 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
284 SSMFIELD_ENTRY( PATMPATCHRECSSM, CoreOffset.Key),
285 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pLeft),
286 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pRight),
287 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.uchHeight),
288 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
289 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uState),
290 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOldState),
291 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOpMode),
292 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPrivInstrGC),
293 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unusedHC),
294 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.aPrivInstr),
295 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPrivInstr),
296 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.opcode),
297 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchJump),
298 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPatchJumpDestGC),
299 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.pPatchBlockOffset),
300 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchBlockSize),
301 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uCurPatchOffset),
302 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment0, sizeof(uint32_t)),
303 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.flags),
304 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCLowest),
305 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCHighest),
306 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.FixupTree),
307 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrFixups),
308 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrJumpRecs), // should be zero?
309 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.JumpTree),
310 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Patch2GuestAddrTree),
311 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Guest2PatchAddrTree),
312 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrPatch2GuestRecs),
313 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment1, sizeof(uint32_t)),
314 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocStartHC), // saved as zero
315 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocEndHC), // ditto
316 SSMFIELD_ENTRY_IGN_RCPTR( PATMPATCHRECSSM, patch.unused.pGuestLoc), // ditto
317 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.unused.opsize), // ditto
318 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.pTempInfo),
319 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cCodeWrites),
320 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cTraps),
321 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cInvalidWrites),
322 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uPatchIdx),
323 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.bDirtyOpcode),
324 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.Alignment2),
325 SSMFIELD_ENTRY_TERM()
326};
327
328/**
329 * SSM descriptor table for the RELOCREC structure.
330 */
331static SSMFIELD const g_aPatmRelocRec[] =
332{
333 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, Core.Key), // Used to store the relocation type
334 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pLeft),
335 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pRight),
336 SSMFIELD_ENTRY_IGNORE( RELOCREC, Core.uchHeight),
337 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
338 SSMFIELD_ENTRY( RELOCREC, uType),
339 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
340 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, pRelocPos), // converted to a patch member offset.
341 SSMFIELD_ENTRY_RCPTR( RELOCREC, pSource),
342 SSMFIELD_ENTRY_RCPTR( RELOCREC, pDest),
343 SSMFIELD_ENTRY_TERM()
344};
345
346/**
347 * SSM descriptor table for the RECPATCHTOGUEST structure.
348 */
349static SSMFIELD const g_aPatmRecPatchToGuest[] =
350{
351 SSMFIELD_ENTRY( RECPATCHTOGUEST, Core.Key),
352 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
353 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pLeft),
354 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pRight),
355 SSMFIELD_ENTRY_IGNORE( RECPATCHTOGUEST, Core.uchHeight),
356 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
357 SSMFIELD_ENTRY_RCPTR( RECPATCHTOGUEST, pOrgInstrGC),
358 SSMFIELD_ENTRY( RECPATCHTOGUEST, enmType),
359 SSMFIELD_ENTRY( RECPATCHTOGUEST, fDirty),
360 SSMFIELD_ENTRY( RECPATCHTOGUEST, fJumpTarget),
361 SSMFIELD_ENTRY( RECPATCHTOGUEST, u8DirtyOpcode),
362 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 5),
363 SSMFIELD_ENTRY_TERM()
364};
365
366#ifdef VBOX_STRICT
367
368/**
369 * Callback function for RTAvlPVDoWithAll
370 *
371 * Counts the number of patches in the tree
372 *
373 * @returns VBox status code.
374 * @param pNode Current node
375 * @param pcPatches Pointer to patch counter (uint32_t)
376 */
377static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
378{
379 NOREF(pNode);
380 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
381 return VINF_SUCCESS;
382}
383
384/**
385 * Callback function for RTAvlU32DoWithAll
386 *
387 * Counts the number of patches in the tree
388 *
389 * @returns VBox status code.
390 * @param pNode Current node
391 * @param pcPatches Pointer to patch counter (uint32_t)
392 */
393static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
394{
395 NOREF(pNode);
396 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
397 return VINF_SUCCESS;
398}
399
400#endif /* VBOX_STRICT */
401
402/**
403 * Callback function for RTAvloU32DoWithAll
404 *
405 * Counts the number of patches in the tree
406 *
407 * @returns VBox status code.
408 * @param pNode Current node
409 * @param pcPatches Pointer to patch counter
410 */
411static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
412{
413 NOREF(pNode);
414 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
415 return VINF_SUCCESS;
416}
417
418/**
419 * Callback function for RTAvlU32DoWithAll
420 *
421 * Saves all patch to guest lookup records.
422 *
423 * @returns VBox status code.
424 * @param pNode Current node
425 * @param pVM1 VM Handle
426 */
427static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
428{
429 PVM pVM = (PVM)pVM1;
430 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
431 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
432
433 /* Save the lookup record. */
434 int rc = SSMR3PutMem(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST));
435 AssertRCReturn(rc, rc);
436
437 return VINF_SUCCESS;
438}
439
440/**
441 * Callback function for RTAvlPVDoWithAll
442 *
443 * Saves all patch to guest lookup records.
444 *
445 * @returns VBox status code.
446 * @param pNode Current node
447 * @param pVM1 VM Handle
448 */
449static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
450{
451 PVM pVM = (PVM)pVM1;
452 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
453 RELOCREC rec = *(PRELOCREC)pNode;
454 RTRCPTR *pFixup = (RTRCPTR *)rec.pRelocPos;
455
456 Assert(rec.pRelocPos);
457 /* Convert pointer to an offset into patch memory. */
458 PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
459
460 if (rec.uType == FIXUP_ABSOLUTE)
461 {
462 /* Core.Key abused to store the fixup type. */
463 if (*pFixup == pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))
464 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPU_FF_ACTION;
465 else
466 if (*pFixup == CPUMR3GetGuestCpuIdDefRCPtr(pVM))
467 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_DEFAULT;
468 else
469 if (*pFixup == CPUMR3GetGuestCpuIdStdRCPtr(pVM))
470 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_STANDARD;
471 else
472 if (*pFixup == CPUMR3GetGuestCpuIdExtRCPtr(pVM))
473 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_EXTENDED;
474 else
475 if (*pFixup == CPUMR3GetGuestCpuIdCentaurRCPtr(pVM))
476 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_CENTAUR;
477 }
478
479 /* Save the lookup record. */
480 int rc = SSMR3PutMem(pSSM, &rec, sizeof(rec));
481 AssertRCReturn(rc, rc);
482
483 return VINF_SUCCESS;
484}
485
486/**
487 * Converts a saved state patch record to the memory record.
488 *
489 * @returns nothing.
490 * @param pPatch The memory record.
491 * @param pPatchSSM The SSM version of the patch record.
492 */
493static void patmR3PatchConvertSSM2Mem(PPATMPATCHREC pPatch, PPATMPATCHRECSSM pPatchSSM)
494{
495 /*
496 * Only restore the patch part of the tree record; not the internal data (except the key of course)
497 */
498 pPatch->Core.Key = pPatchSSM->Core.Key;
499 pPatch->CoreOffset.Key = pPatchSSM->CoreOffset.Key;
500 pPatch->patch.uState = pPatchSSM->patch.uState;
501 pPatch->patch.uOldState = pPatchSSM->patch.uOldState;
502 pPatch->patch.uOpMode = pPatchSSM->patch.uOpMode;
503 pPatch->patch.pPrivInstrGC = pPatchSSM->patch.pPrivInstrGC;
504 pPatch->patch.unusedHC = pPatchSSM->patch.unusedHC;
505 memcpy(&pPatch->patch.aPrivInstr[0], &pPatchSSM->patch.aPrivInstr[0], MAX_INSTR_SIZE);
506 pPatch->patch.cbPrivInstr = pPatchSSM->patch.cbPrivInstr;
507 pPatch->patch.opcode = pPatchSSM->patch.opcode;
508 pPatch->patch.cbPatchJump = pPatchSSM->patch.cbPatchJump;
509 pPatch->patch.pPatchJumpDestGC = pPatchSSM->patch.pPatchJumpDestGC;
510 pPatch->patch.pPatchBlockOffset = pPatchSSM->patch.pPatchBlockOffset;
511 pPatch->patch.cbPatchBlockSize = pPatchSSM->patch.cbPatchBlockSize;
512 pPatch->patch.uCurPatchOffset = pPatchSSM->patch.uCurPatchOffset;
513 pPatch->patch.flags = pPatchSSM->patch.flags;
514 pPatch->patch.pInstrGCLowest = pPatchSSM->patch.pInstrGCLowest;
515 pPatch->patch.pInstrGCHighest = pPatchSSM->patch.pInstrGCHighest;
516 pPatch->patch.FixupTree = pPatchSSM->patch.FixupTree;
517 pPatch->patch.nrFixups = pPatchSSM->patch.nrFixups;
518 pPatch->patch.nrJumpRecs = pPatchSSM->patch.nrJumpRecs;
519 pPatch->patch.JumpTree = pPatchSSM->patch.JumpTree;
520 pPatch->patch.Patch2GuestAddrTree = pPatchSSM->patch.Patch2GuestAddrTree;
521 pPatch->patch.Guest2PatchAddrTree = pPatchSSM->patch.Guest2PatchAddrTree;
522 pPatch->patch.nrPatch2GuestRecs = pPatchSSM->patch.nrPatch2GuestRecs;
523 pPatch->patch.unused = pPatchSSM->patch.unused;
524 pPatch->patch.pTempInfo = pPatchSSM->patch.pTempInfo;
525 pPatch->patch.cCodeWrites = pPatchSSM->patch.cCodeWrites;
526 pPatch->patch.cTraps = pPatchSSM->patch.cTraps;
527 pPatch->patch.cInvalidWrites = pPatchSSM->patch.cInvalidWrites;
528 pPatch->patch.uPatchIdx = pPatchSSM->patch.uPatchIdx;
529 pPatch->patch.bDirtyOpcode = pPatchSSM->patch.bDirtyOpcode;
530 pPatch->patch.pTrampolinePatchesHead = NULL;
531}
532
533/**
534 * Converts a memory patch record to the saved state version.
535 *
536 * @returns nothing.
537 * @param pPatchSSM The saved state record.
538 * @param pPatch The memory version to save.
539 */
540static void patmR3PatchConvertMem2SSM(PPATMPATCHRECSSM pPatchSSM, PPATMPATCHREC pPatch)
541{
542 pPatchSSM->Core = pPatch->Core;
543 pPatchSSM->CoreOffset = pPatch->CoreOffset;
544 pPatchSSM->patch.uState = pPatch->patch.uState;
545 pPatchSSM->patch.uOldState = pPatch->patch.uOldState;
546 pPatchSSM->patch.uOpMode = pPatch->patch.uOpMode;
547 pPatchSSM->patch.pPrivInstrGC = pPatch->patch.pPrivInstrGC;
548 pPatchSSM->patch.unusedHC = pPatch->patch.unusedHC;
549 memcpy(&pPatchSSM->patch.aPrivInstr[0], &pPatch->patch.aPrivInstr[0], MAX_INSTR_SIZE);
550 pPatchSSM->patch.cbPrivInstr = pPatch->patch.cbPrivInstr;
551 pPatchSSM->patch.opcode = pPatch->patch.opcode;
552 pPatchSSM->patch.cbPatchJump = pPatch->patch.cbPatchJump;
553 pPatchSSM->patch.pPatchJumpDestGC = pPatch->patch.pPatchJumpDestGC;
554 pPatchSSM->patch.pPatchBlockOffset = pPatch->patch.pPatchBlockOffset;
555 pPatchSSM->patch.cbPatchBlockSize = pPatch->patch.cbPatchBlockSize;
556 pPatchSSM->patch.uCurPatchOffset = pPatch->patch.uCurPatchOffset;
557 pPatchSSM->patch.flags = pPatch->patch.flags;
558 pPatchSSM->patch.pInstrGCLowest = pPatch->patch.pInstrGCLowest;
559 pPatchSSM->patch.pInstrGCHighest = pPatch->patch.pInstrGCHighest;
560 pPatchSSM->patch.FixupTree = pPatch->patch.FixupTree;
561 pPatchSSM->patch.nrFixups = pPatch->patch.nrFixups;
562 pPatchSSM->patch.nrJumpRecs = pPatch->patch.nrJumpRecs;
563 pPatchSSM->patch.JumpTree = pPatch->patch.JumpTree;
564 pPatchSSM->patch.Patch2GuestAddrTree = pPatch->patch.Patch2GuestAddrTree;
565 pPatchSSM->patch.Guest2PatchAddrTree = pPatch->patch.Guest2PatchAddrTree;
566 pPatchSSM->patch.nrPatch2GuestRecs = pPatch->patch.nrPatch2GuestRecs;
567 pPatchSSM->patch.unused = pPatch->patch.unused;
568 pPatchSSM->patch.pTempInfo = pPatch->patch.pTempInfo;
569 pPatchSSM->patch.cCodeWrites = pPatch->patch.cCodeWrites;
570 pPatchSSM->patch.cTraps = pPatch->patch.cTraps;
571 pPatchSSM->patch.cInvalidWrites = pPatch->patch.cInvalidWrites;
572 pPatchSSM->patch.uPatchIdx = pPatch->patch.uPatchIdx;
573 pPatchSSM->patch.bDirtyOpcode = pPatch->patch.bDirtyOpcode;
574}
575
576/**
577 * Callback function for RTAvloU32DoWithAll
578 *
579 * Saves the state of the patch that's being enumerated
580 *
581 * @returns VBox status code.
582 * @param pNode Current node
583 * @param pVM1 VM Handle
584 */
585static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
586{
587 PVM pVM = (PVM)pVM1;
588 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
589 PATMPATCHRECSSM patch;
590 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
591 int rc;
592
593 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
594
595 patmR3PatchConvertMem2SSM(&patch, pPatch);
596
597 /*
598 * Reset HC pointers that need to be recalculated when loading the state
599 */
600 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
601 ("State = %x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
602 Assert(pPatch->patch.JumpTree == 0);
603 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
604 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
605
606 /* Save the patch record itself */
607 rc = SSMR3PutMem(pSSM, &patch, sizeof(patch));
608 AssertRCReturn(rc, rc);
609
610 /*
611 * Reset HC pointers in fixup records and save them.
612 */
613#ifdef VBOX_STRICT
614 uint32_t nrFixupRecs = 0;
615 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
616 AssertMsg(nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
617#endif
618 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
619
620#ifdef VBOX_STRICT
621 uint32_t nrLookupRecords = 0;
622 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
623 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
624#endif
625
626 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
627 return VINF_SUCCESS;
628}
629
630/**
631 * Execute state save operation.
632 *
633 * @returns VBox status code.
634 * @param pVM VM Handle.
635 * @param pSSM SSM operation handle.
636 */
637DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
638{
639 PATM patmInfo = pVM->patm.s;
640 int rc;
641
642 pVM->patm.s.savedstate.pSSM = pSSM;
643
644 /*
645 * Reset HC pointers that need to be recalculated when loading the state
646 */
647 patmInfo.pPatchMemHC = NULL;
648 patmInfo.pGCStateHC = 0;
649 patmInfo.pvFaultMonitor = 0;
650
651 Assert(patmInfo.ulCallDepth == 0);
652
653 /*
654 * Count the number of patches in the tree (feeling lazy)
655 */
656 patmInfo.savedstate.cPatches = 0;
657 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
658
659 /*
660 * Save PATM structure
661 */
662 rc = SSMR3PutMem(pSSM, &patmInfo, sizeof(patmInfo));
663 AssertRCReturn(rc, rc);
664
665 /*
666 * Save patch memory contents
667 */
668 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
669 AssertRCReturn(rc, rc);
670
671 /*
672 * Save GC state memory
673 */
674 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
675 AssertRCReturn(rc, rc);
676
677 /*
678 * Save PATM stack page
679 */
680 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
681 AssertRCReturn(rc, rc);
682
683 /*
684 * Save all patches
685 */
686 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
687 AssertRCReturn(rc, rc);
688
689 /** @note patch statistics are not saved. */
690
691 return VINF_SUCCESS;
692}
693
694/**
695 * Execute state load operation.
696 *
697 * @returns VBox status code.
698 * @param pVM VM Handle.
699 * @param pSSM SSM operation handle.
700 * @param uVersion Data layout version.
701 * @param uPass The data pass.
702 */
703DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
704{
705 PATM patmInfo;
706 int rc;
707
708 if ( uVersion != PATM_SSM_VERSION
709 && uVersion != PATM_SSM_VERSION_FIXUP_HACK
710 && uVersion != PATM_SSM_VERSION_VER16
711 )
712 {
713 AssertMsgFailed(("patmR3Load: Invalid version uVersion=%d!\n", uVersion));
714 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
715 }
716 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
717
718 pVM->patm.s.savedstate.pSSM = pSSM;
719
720 /*
721 * Restore PATM structure
722 */
723 RT_ZERO(patmInfo);
724 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmFields[0], NULL);
725 AssertRCReturn(rc, rc);
726
727 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
728 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
729 if ( (pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC)
730 || (pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC)
731 || (pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC)
732 || (pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC))
733 {
734 AssertMsgFailed(("Helper function ptrs don't match!!!\n"));
735 return VERR_SSM_INVALID_STATE;
736 }
737
738 if (pVM->patm.s.cbPatchMem != patmInfo.cbPatchMem)
739 {
740 AssertMsgFailed(("Patch memory ptrs and/or sizes don't match!!!\n"));
741 return VERR_SSM_INVALID_STATE;
742 }
743 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
744 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
745 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
746 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
747
748 /* Lowest and highest patched instruction */
749 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
750 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
751
752 /* Sysenter handlers */
753 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
754 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
755 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
756
757 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
758
759 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
760 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
761 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
762 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
763
764
765 /** @note patch statistics are not restored. */
766
767 /*
768 * Restore patch memory contents
769 */
770 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
771 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
772 AssertRCReturn(rc, rc);
773
774 /*
775 * Restore GC state memory
776 */
777 RT_BZERO(pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
778 rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmGCStateFields[0], NULL);
779 AssertRCReturn(rc, rc);
780
781 /*
782 * Restore PATM stack page
783 */
784 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
785 AssertRCReturn(rc, rc);
786
787 /*
788 * Load all patches
789 */
790 for (unsigned i = 0; i < patmInfo.savedstate.cPatches; i++)
791 {
792 PATMPATCHRECSSM patch;
793 PATMPATCHREC *pPatchRec;
794
795 RT_ZERO(patch);
796 rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmPatchRecFields[0], NULL);
797 AssertRCReturn(rc, rc);
798
799 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
800
801 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
802 if (RT_FAILURE(rc))
803 {
804 AssertMsgFailed(("Out of memory!!!!\n"));
805 return VERR_NO_MEMORY;
806 }
807
808 /* Convert SSM version to memory. */
809 patmR3PatchConvertSSM2Mem(pPatchRec, &patch);
810
811 Log(("Restoring patch %RRv -> %RRv\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset));
812 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
813 Assert(ret);
814 if (pPatchRec->patch.uState != PATCH_REFUSED)
815 {
816 if (pPatchRec->patch.pPatchBlockOffset)
817 {
818 /* We actually generated code for this patch. */
819 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
820 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
821 }
822 }
823 /* Set to zero as we don't need it anymore. */
824 pPatchRec->patch.pTempInfo = 0;
825
826 PATMP2GLOOKUPREC cacheRec;
827 RT_ZERO(cacheRec);
828 cacheRec.pPatch = &pPatchRec->patch;
829
830 uint8_t *pPrivInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pPatchRec->patch.pPrivInstrGC);
831 /* Can fail due to page or page table not present. */
832
833 /*
834 * Restore fixup records and correct HC pointers in fixup records
835 */
836 pPatchRec->patch.FixupTree = 0;
837 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
838 for (unsigned j = 0; j < patch.patch.nrFixups; j++)
839 {
840 RELOCREC rec;
841 int32_t offset;
842 RTRCPTR *pFixup;
843
844 RT_ZERO(rec);
845 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRelocRec[0], NULL);
846 AssertRCReturn(rc, rc);
847
848 if (pPrivInstrHC)
849 {
850 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
851 offset = (int32_t)(intptr_t)rec.pRelocPos;
852 /* Convert to HC pointer again. */
853 PATM_ADD_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
854 pFixup = (RTRCPTR *)rec.pRelocPos;
855
856 if (pPatchRec->patch.uState != PATCH_REFUSED)
857 {
858 if ( rec.uType == FIXUP_REL_JMPTOPATCH
859 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
860 {
861 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
862 unsigned offset2 = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
863
864 rec.pRelocPos = pPrivInstrHC + offset2;
865 pFixup = (RTRCPTR *)rec.pRelocPos;
866 }
867
868 patmCorrectFixup(pVM, uVersion, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
869 }
870
871 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
872 AssertRCReturn(rc, rc);
873 }
874 }
875 /* Release previous lock if any. */
876 if (cacheRec.Lock.pvMap)
877 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
878
879 /* And all patch to guest lookup records */
880 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
881
882 pPatchRec->patch.Patch2GuestAddrTree = 0;
883 pPatchRec->patch.Guest2PatchAddrTree = 0;
884 if (pPatchRec->patch.nrPatch2GuestRecs)
885 {
886 RECPATCHTOGUEST rec;
887 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
888
889 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
890 for (uint32_t j=0;j<nrPatch2GuestRecs;j++)
891 {
892 RT_ZERO(rec);
893 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRecPatchToGuest[0], NULL);
894 AssertRCReturn(rc, rc);
895
896 patmr3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
897 }
898 Assert(pPatchRec->patch.Patch2GuestAddrTree);
899 }
900
901 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
902 {
903 /* Insert the guest page lookup records (for detection self-modifying code) */
904 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
905 AssertRCReturn(rc, rc);
906 }
907
908#if 0 /* can fail def LOG_ENABLED */
909 if ( pPatchRec->patch.uState != PATCH_REFUSED
910 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
911 {
912 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
913 Log(("Patch code ----------------------------------------------------------\n"));
914 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
915 Log(("Patch code ends -----------------------------------------------------\n"));
916 MMR3HeapFree(pPatchRec->patch.pTempInfo);
917 pPatchRec->patch.pTempInfo = NULL;
918 }
919#endif
920 /* Remove the patch in case the gc mapping is not present. */
921 if ( !pPrivInstrHC
922 && pPatchRec->patch.uState == PATCH_ENABLED)
923 {
924 Log(("Remove patch %RGv due to failed HC address translation\n", pPatchRec->patch.pPrivInstrGC));
925 PATMR3RemovePatch(pVM, pPatchRec->patch.pPrivInstrGC);
926 }
927 }
928
929 /*
930 * Correct absolute fixups in the global patch. (helper functions)
931 * Bit of a mess. Uses the new patch record, but restored patch functions.
932 */
933 PRELOCREC pRec = 0;
934 AVLPVKEY key = 0;
935
936 Log(("Correct fixups in global helper functions\n"));
937 while (true)
938 {
939 int32_t offset;
940 RTRCPTR *pFixup;
941
942 /* Get the record that's closest from above */
943 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
944 if (pRec == 0)
945 break;
946
947 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
948
949 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
950 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
951 pFixup = (RTRCPTR *)pRec->pRelocPos;
952
953 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
954 patmCorrectFixup(pVM, uVersion, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
955 }
956
957#ifdef VBOX_WITH_STATISTICS
958 /*
959 * Restore relevant old statistics
960 */
961 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
962 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
963 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
964 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
965#endif
966
967 return VINF_SUCCESS;
968}
969
970/**
971 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
972 *
973 * @returns VBox status code.
974 * @param pVM VM Handle.
975 * @param ulSSMVersion SSM version
976 * @param patmInfo Saved PATM structure
977 * @param pPatch Patch record
978 * @param pRec Relocation record
979 * @param offset Offset of referenced data/code
980 * @param pFixup Fixup address
981 */
982static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup)
983{
984 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
985
986 switch (pRec->uType)
987 {
988 case FIXUP_ABSOLUTE:
989 {
990 if (pRec->pSource && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource))
991 break;
992
993 if ( *pFixup >= patmInfo.pGCStateGC
994 && *pFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
995 {
996 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
997 *pFixup = (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
998 }
999 else
1000 if ( *pFixup >= patmInfo.pCPUMCtxGC
1001 && *pFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
1002 {
1003 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
1004
1005 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
1006 if (ulSSMVersion == PATM_SSM_VERSION_VER16)
1007 {
1008 unsigned uCPUMOffset = *pFixup - patmInfo.pCPUMCtxGC;
1009
1010 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
1011 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
1012 * function is not available in older gcc versions, at least not in gcc-3.3 */
1013 if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
1014 {
1015 LogFlow(("Changing dr[0] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[0])));
1016 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
1017 }
1018 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
1019 {
1020 LogFlow(("Changing dr[1] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[1])));
1021 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
1022 }
1023 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
1024 {
1025 LogFlow(("Changing dr[2] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[2])));
1026 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
1027 }
1028 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
1029 {
1030 LogFlow(("Changing dr[3] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[3])));
1031 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
1032 }
1033 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
1034 {
1035 LogFlow(("Changing dr[4] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[4])));
1036 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
1037 }
1038 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
1039 {
1040 LogFlow(("Changing dr[5] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[5])));
1041 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
1042 }
1043 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
1044 {
1045 LogFlow(("Changing dr[6] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[6])));
1046 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
1047 }
1048 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
1049 {
1050 LogFlow(("Changing dr[7] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[7])));
1051 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
1052 }
1053 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
1054 {
1055 LogFlow(("Changing cr0 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr0)));
1056 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
1057 }
1058 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
1059 {
1060 LogFlow(("Changing cr2 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr2)));
1061 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
1062 }
1063 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
1064 {
1065 LogFlow(("Changing cr3 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr3)));
1066 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
1067 }
1068 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
1069 {
1070 LogFlow(("Changing cr4 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr4)));
1071 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
1072 }
1073 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
1074 {
1075 LogFlow(("Changing tr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, tr)));
1076 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1077 }
1078 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
1079 {
1080 LogFlow(("Changing ldtr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, ldtr)));
1081 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1082 }
1083 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
1084 {
1085 LogFlow(("Changing pGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
1086 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1087 }
1088 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
1089 {
1090 LogFlow(("Changing cbGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
1091 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1092 }
1093 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
1094 {
1095 LogFlow(("Changing pIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
1096 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1097 }
1098 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
1099 {
1100 LogFlow(("Changing cbIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
1101 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1102 }
1103 else
1104 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", uCPUMOffset));
1105 }
1106 else
1107 *pFixup = (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
1108 }
1109 else
1110 if ( *pFixup >= patmInfo.pStatsGC
1111 && *pFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
1112 {
1113 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
1114 *pFixup = (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
1115 }
1116 else
1117 if ( *pFixup >= patmInfo.pGCStackGC
1118 && *pFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
1119 {
1120 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
1121 *pFixup = (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
1122 }
1123 else
1124 if ( *pFixup >= patmInfo.pPatchMemGC
1125 && *pFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
1126 {
1127 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
1128 *pFixup = (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
1129 }
1130 else
1131 /* Boldly ASSUMES:
1132 * 1. That pCPUMCtxGC is in the VM structure and that its location is
1133 * at the first page of the same 4 MB chunk.
1134 * 2. That the forced actions were in the first 32 bytes of the VM
1135 * structure.
1136 * 3. That the CPUM leafs are less than 8KB into the structure. */
1137 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1138 && *pFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(32))
1139 {
1140 LogFlow(("Changing fLocalForcedActions fixup from %RRv to %RRv\n", *pFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
1141 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1142 }
1143 else
1144 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1145 && *pFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(8192))
1146 {
1147 static int cCpuidFixup = 0;
1148#ifdef LOG_ENABLED
1149 RTRCPTR oldFixup = *pFixup;
1150#endif
1151 /* very dirty assumptions about the cpuid patch and cpuid ordering. */
1152 switch(cCpuidFixup & 3)
1153 {
1154 case 0:
1155 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1156 break;
1157 case 1:
1158 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1159 break;
1160 case 2:
1161 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1162 break;
1163 case 3:
1164 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1165 break;
1166 }
1167 LogFlow(("Changing cpuid fixup %d from %RRv to %RRv\n", cCpuidFixup, oldFixup, *pFixup));
1168 cCpuidFixup++;
1169 }
1170 else
1171 if (ulSSMVersion >= PATM_SSM_VERSION)
1172 {
1173#ifdef LOG_ENABLED
1174 RTRCPTR oldFixup = *pFixup;
1175#endif
1176 /* Core.Key abused to store the type of fixup */
1177 switch ((uintptr_t)pRec->Core.Key)
1178 {
1179 case PATM_FIXUP_CPU_FF_ACTION:
1180 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1181 LogFlow(("Changing cpu ff action fixup from %x to %x\n", oldFixup, *pFixup));
1182 break;
1183 case PATM_FIXUP_CPUID_DEFAULT:
1184 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1185 LogFlow(("Changing cpuid def fixup from %x to %x\n", oldFixup, *pFixup));
1186 break;
1187 case PATM_FIXUP_CPUID_STANDARD:
1188 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1189 LogFlow(("Changing cpuid std fixup from %x to %x\n", oldFixup, *pFixup));
1190 break;
1191 case PATM_FIXUP_CPUID_EXTENDED:
1192 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1193 LogFlow(("Changing cpuid ext fixup from %x to %x\n", oldFixup, *pFixup));
1194 break;
1195 case PATM_FIXUP_CPUID_CENTAUR:
1196 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1197 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", oldFixup, *pFixup));
1198 break;
1199 default:
1200 AssertMsgFailed(("Unexpected fixup value %x\n", *pFixup));
1201 break;
1202 }
1203 }
1204
1205#ifdef RT_OS_WINDOWS
1206 AssertCompile(RT_OFFSETOF(VM, fGlobalForcedActions) < 32);
1207#endif
1208 break;
1209 }
1210
1211 case FIXUP_REL_JMPTOPATCH:
1212 {
1213 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
1214
1215 if ( pPatch->uState == PATCH_ENABLED
1216 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
1217 {
1218 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
1219 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
1220 RTRCPTR pJumpOffGC;
1221 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
1222 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
1223
1224 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1225
1226 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
1227#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
1228 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
1229 {
1230 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
1231
1232 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
1233 oldJump[0] = pPatch->aPrivInstr[0];
1234 oldJump[1] = pPatch->aPrivInstr[1];
1235 *(RTRCUINTPTR *)&oldJump[2] = displOld;
1236 }
1237 else
1238#endif
1239 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
1240 {
1241 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
1242 oldJump[0] = 0xE9;
1243 *(RTRCUINTPTR *)&oldJump[1] = displOld;
1244 }
1245 else
1246 {
1247 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
1248 break;
1249 }
1250 Assert(pPatch->cbPatchJump <= sizeof(temp));
1251
1252 /*
1253 * Read old patch jump and compare it to the one we previously installed
1254 */
1255 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
1256 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1257
1258 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1259 {
1260 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
1261
1262 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
1263 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
1264 }
1265 else
1266 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
1267 {
1268 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
1269 /*
1270 * Disable patch; this is not a good solution
1271 */
1272 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
1273 pPatch->uState = PATCH_DISABLED;
1274 }
1275 else
1276 if (RT_SUCCESS(rc))
1277 {
1278 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
1279 AssertRC(rc);
1280 }
1281 else
1282 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
1283 }
1284 else
1285 Log(("Skip the guest jump to patch code for this disabled patch %08X\n", pRec->pRelocPos));
1286
1287 pRec->pDest = pTarget;
1288 break;
1289 }
1290
1291 case FIXUP_REL_JMPTOGUEST:
1292 {
1293 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1294 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1295
1296 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1297 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1298 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1299 pRec->pSource = pSource;
1300 break;
1301
1302 }
1303}
1304}
1305
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette