VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATMSSM.cpp@ 24760

Last change on this file since 24760 was 23817, checked in by vboxsync, 15 years ago

PATM: 64-bit host saved state fixes (addresses testbox burn).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 56.4 KB
Line 
1/* $Id: PATMSSM.cpp 23817 2009-10-16 11:48:31Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/hwaccm.h>
30#include <VBox/stam.h>
31#include <VBox/pgm.h>
32#include <VBox/cpum.h>
33#include <VBox/iom.h>
34#include <VBox/sup.h>
35#include <VBox/mm.h>
36#include <VBox/ssm.h>
37#include <VBox/pdm.h>
38#include <VBox/trpm.h>
39#include <VBox/param.h>
40#include <iprt/avl.h>
41#include "PATMInternal.h"
42#include "PATMPatch.h"
43#include "PATMA.h"
44#include <VBox/vm.h>
45#include <VBox/csam.h>
46
47#include <VBox/dbg.h>
48#include <VBox/err.h>
49#include <VBox/log.h>
50#include <iprt/assert.h>
51#include <iprt/asm.h>
52#include <iprt/string.h>
53#include <VBox/dis.h>
54#include <VBox/disopcode.h>
55
56/*******************************************************************************
57* Defined Constants And Macros *
58*******************************************************************************/
59#define PATM_SUBTRACT_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) - (uintptr_t)(b)
60#define PATM_ADD_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) + (uintptr_t)(b)
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup);
66
67/*******************************************************************************
68* Global Variables *
69*******************************************************************************/
70/**
71 * SSM descriptor table for the PATM structure.
72 */
73static SSMFIELD const g_aPatmFields[] =
74{
75 /** @todo there are a bunch more fields here which can be marked as ignored. */
76 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
77 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
78 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
79 SSMFIELD_ENTRY( PATM, cbPatchMem),
80 SSMFIELD_ENTRY( PATM, offPatchMem),
81 SSMFIELD_ENTRY( PATM, fOutOfMemory),
82 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
83 SSMFIELD_ENTRY( PATM, deltaReloc),
84 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
85 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
86 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
87 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
88 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
89 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
90 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
91 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
92 SSMFIELD_ENTRY( PATM, ulCallDepth),
93 SSMFIELD_ENTRY( PATM, cPageRecords),
94 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
95 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
96 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
97 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
98 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
99 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
100 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
101 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
102 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
103 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
104 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
105 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
106 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
107 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
108 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
109 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
110 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
111 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
112 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
113 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
114 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
115 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
116 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
117 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
118 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
119 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
120 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
121 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
122 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
123 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
124 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
125 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
126 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
127 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
128 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
129 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
130 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
131 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
132 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
133 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
134 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
135 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
136 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
137 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
138 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
139 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
140 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
141 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
142 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
143 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
144 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
145 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
146 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
147 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
148 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
149 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
150 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
151 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
152 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
153 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
154 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
155 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
156 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
157 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
158 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
159 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
160 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
161 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
162 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
163 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
164 SSMFIELD_ENTRY_TERM()
165};
166
167/**
168 * SSM descriptor table for the PATMGCSTATE structure.
169 */
170static SSMFIELD const g_aPatmGCStateFields[] =
171{
172 SSMFIELD_ENTRY( PATMGCSTATE, uVMFlags),
173 SSMFIELD_ENTRY( PATMGCSTATE, uPendingAction),
174 SSMFIELD_ENTRY( PATMGCSTATE, uPatchCalls),
175 SSMFIELD_ENTRY( PATMGCSTATE, uScratch),
176 SSMFIELD_ENTRY( PATMGCSTATE, uIretEFlags),
177 SSMFIELD_ENTRY( PATMGCSTATE, uIretCS),
178 SSMFIELD_ENTRY( PATMGCSTATE, uIretEIP),
179 SSMFIELD_ENTRY( PATMGCSTATE, Psp),
180 SSMFIELD_ENTRY( PATMGCSTATE, fPIF),
181 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCPtrInhibitInterrupts),
182 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallPatchTargetAddr),
183 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallReturnAddr),
184 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEAX),
185 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uECX),
186 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEDI),
187 SSMFIELD_ENTRY( PATMGCSTATE, Restore.eFlags),
188 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uFlags),
189 SSMFIELD_ENTRY_TERM()
190};
191
192/**
193 * SSM descriptor table for the PATMPATCHREC structure.
194 */
195static SSMFIELD const g_aPatmPatchRecFields[] =
196{
197 SSMFIELD_ENTRY( PATMPATCHREC, Core.Key),
198 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.pLeft),
199 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.pRight),
200 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.uchHeight),
201 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
202 SSMFIELD_ENTRY( PATMPATCHREC, CoreOffset.Key),
203 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.pLeft),
204 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.pRight),
205 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.uchHeight),
206 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
207 SSMFIELD_ENTRY( PATMPATCHREC, patch.uState),
208 SSMFIELD_ENTRY( PATMPATCHREC, patch.uOldState),
209 SSMFIELD_ENTRY( PATMPATCHREC, patch.uOpMode),
210 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pPrivInstrGC),
211 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.pPrivInstrHC),
212 SSMFIELD_ENTRY( PATMPATCHREC, patch.aPrivInstr),
213 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPrivInstr),
214 SSMFIELD_ENTRY( PATMPATCHREC, patch.opcode),
215 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPatchJump),
216 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pPatchJumpDestGC),
217 SSMFIELD_ENTRY( PATMPATCHREC, patch.pPatchBlockOffset),
218 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPatchBlockSize),
219 SSMFIELD_ENTRY( PATMPATCHREC, patch.uCurPatchOffset),
220 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHREC, patch.Alignment0, sizeof(uint32_t)),
221 SSMFIELD_ENTRY( PATMPATCHREC, patch.flags),
222 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pInstrGCLowest),
223 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pInstrGCHighest),
224 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.FixupTree),
225 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrFixups),
226 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrJumpRecs), // should be zero?
227 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.JumpTree),
228 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.Patch2GuestAddrTree),
229 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.Guest2PatchAddrTree),
230 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrPatch2GuestRecs),
231 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHREC, patch.Alignment1, sizeof(uint32_t)),
232 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.cacheRec.pPatchLocStartHC), // saved as zero
233 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.cacheRec.pPatchLocEndHC), // ditto
234 SSMFIELD_ENTRY_IGN_RCPTR( PATMPATCHREC, patch.cacheRec.pGuestLoc), // ditto
235 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, patch.cacheRec.opsize), // ditto
236 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.pTempInfo),
237 SSMFIELD_ENTRY( PATMPATCHREC, patch.cCodeWrites),
238 SSMFIELD_ENTRY( PATMPATCHREC, patch.cTraps),
239 SSMFIELD_ENTRY( PATMPATCHREC, patch.cInvalidWrites),
240 SSMFIELD_ENTRY( PATMPATCHREC, patch.uPatchIdx),
241 SSMFIELD_ENTRY( PATMPATCHREC, patch.bDirtyOpcode),
242 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, patch.Alignment2),
243 SSMFIELD_ENTRY_TERM()
244};
245
246/**
247 * SSM descriptor table for the RELOCREC structure.
248 */
249static SSMFIELD const g_aPatmRelocRec[] =
250{
251 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, Core.Key), // Used to store the relocation type
252 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pLeft),
253 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pRight),
254 SSMFIELD_ENTRY_IGNORE( RELOCREC, Core.uchHeight),
255 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
256 SSMFIELD_ENTRY( RELOCREC, uType),
257 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
258 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, pRelocPos), // converted to a patch member offset.
259 SSMFIELD_ENTRY_RCPTR( RELOCREC, pSource),
260 SSMFIELD_ENTRY_RCPTR( RELOCREC, pDest),
261 SSMFIELD_ENTRY_TERM()
262};
263
264/**
265 * SSM descriptor table for the RECPATCHTOGUEST structure.
266 */
267static SSMFIELD const g_aPatmRecPatchToGuest[] =
268{
269 SSMFIELD_ENTRY( RECPATCHTOGUEST, Core.Key),
270 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
271 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pLeft),
272 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pRight),
273 SSMFIELD_ENTRY_IGNORE( RECPATCHTOGUEST, Core.uchHeight),
274 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
275 SSMFIELD_ENTRY_RCPTR( RECPATCHTOGUEST, pOrgInstrGC),
276 SSMFIELD_ENTRY( RECPATCHTOGUEST, enmType),
277 SSMFIELD_ENTRY( RECPATCHTOGUEST, fDirty),
278 SSMFIELD_ENTRY( RECPATCHTOGUEST, fJumpTarget),
279 SSMFIELD_ENTRY( RECPATCHTOGUEST, u8DirtyOpcode),
280 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 5),
281 SSMFIELD_ENTRY_TERM()
282};
283
284
285#ifdef VBOX_STRICT
286/**
287 * Callback function for RTAvlPVDoWithAll
288 *
289 * Counts the number of patches in the tree
290 *
291 * @returns VBox status code.
292 * @param pNode Current node
293 * @param pcPatches Pointer to patch counter (uint32_t)
294 */
295static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
296{
297 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
298 return VINF_SUCCESS;
299}
300
301/**
302 * Callback function for RTAvlU32DoWithAll
303 *
304 * Counts the number of patches in the tree
305 *
306 * @returns VBox status code.
307 * @param pNode Current node
308 * @param pcPatches Pointer to patch counter (uint32_t)
309 */
310static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
311{
312 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
313 return VINF_SUCCESS;
314}
315#endif /* VBOX_STRICT */
316
317/**
318 * Callback function for RTAvloU32DoWithAll
319 *
320 * Counts the number of patches in the tree
321 *
322 * @returns VBox status code.
323 * @param pNode Current node
324 * @param pcPatches Pointer to patch counter
325 */
326static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
327{
328 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
329 return VINF_SUCCESS;
330}
331
332/**
333 * Callback function for RTAvlU32DoWithAll
334 *
335 * Saves all patch to guest lookup records.
336 *
337 * @returns VBox status code.
338 * @param pNode Current node
339 * @param pVM1 VM Handle
340 */
341static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
342{
343 PVM pVM = (PVM)pVM1;
344 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
345 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
346
347 /* Save the lookup record. */
348 int rc = SSMR3PutMem(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST));
349 AssertRCReturn(rc, rc);
350
351 return VINF_SUCCESS;
352}
353
354/**
355 * Callback function for RTAvlPVDoWithAll
356 *
357 * Saves all patch to guest lookup records.
358 *
359 * @returns VBox status code.
360 * @param pNode Current node
361 * @param pVM1 VM Handle
362 */
363static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
364{
365 PVM pVM = (PVM)pVM1;
366 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
367 RELOCREC rec = *(PRELOCREC)pNode;
368 RTRCPTR *pFixup = (RTRCPTR *)rec.pRelocPos;
369
370 Assert(rec.pRelocPos);
371 /* Convert pointer to an offset into patch memory. */
372 PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
373
374 if (rec.uType == FIXUP_ABSOLUTE)
375 {
376 /* Core.Key abused to store the fixup type. */
377 if (*pFixup == pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))
378 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPU_FF_ACTION;
379 else
380 if (*pFixup == CPUMR3GetGuestCpuIdDefRCPtr(pVM))
381 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_DEFAULT;
382 else
383 if (*pFixup == CPUMR3GetGuestCpuIdStdRCPtr(pVM))
384 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_STANDARD;
385 else
386 if (*pFixup == CPUMR3GetGuestCpuIdExtRCPtr(pVM))
387 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_EXTENDED;
388 else
389 if (*pFixup == CPUMR3GetGuestCpuIdCentaurRCPtr(pVM))
390 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_CENTAUR;
391 }
392
393 /* Save the lookup record. */
394 int rc = SSMR3PutMem(pSSM, &rec, sizeof(rec));
395 AssertRCReturn(rc, rc);
396
397 return VINF_SUCCESS;
398}
399
400
401/**
402 * Callback function for RTAvloU32DoWithAll
403 *
404 * Saves the state of the patch that's being enumerated
405 *
406 * @returns VBox status code.
407 * @param pNode Current node
408 * @param pVM1 VM Handle
409 */
410static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
411{
412 PVM pVM = (PVM)pVM1;
413 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
414 PATMPATCHREC patch = *pPatch;
415 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
416 int rc;
417
418 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
419
420 /*
421 * Reset HC pointers that need to be recalculated when loading the state
422 */
423 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
424 ("State = %x pPrivInstrHC=%08x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, patch.patch.pPrivInstrHC, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
425 Assert(pPatch->patch.JumpTree == 0);
426 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
427 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
428
429 memset(&patch.patch.cacheRec, 0, sizeof(patch.patch.cacheRec));
430
431 /* Save the patch record itself */
432 rc = SSMR3PutMem(pSSM, &patch, sizeof(patch));
433 AssertRCReturn(rc, rc);
434
435 /*
436 * Reset HC pointers in fixup records and save them.
437 */
438#ifdef VBOX_STRICT
439 uint32_t nrFixupRecs = 0;
440 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
441 AssertMsg((int32_t)nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
442#endif
443 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
444
445#ifdef VBOX_STRICT
446 uint32_t nrLookupRecords = 0;
447 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
448 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
449#endif
450
451 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
452 return VINF_SUCCESS;
453}
454
455/**
456 * Execute state save operation.
457 *
458 * @returns VBox status code.
459 * @param pVM VM Handle.
460 * @param pSSM SSM operation handle.
461 */
462DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
463{
464 PATM patmInfo = pVM->patm.s;
465 int rc;
466
467 pVM->patm.s.savedstate.pSSM = pSSM;
468
469 /*
470 * Reset HC pointers that need to be recalculated when loading the state
471 */
472 patmInfo.pPatchMemHC = NULL;
473 patmInfo.pGCStateHC = 0;
474 patmInfo.pvFaultMonitor = 0;
475
476 Assert(patmInfo.ulCallDepth == 0);
477
478 /*
479 * Count the number of patches in the tree (feeling lazy)
480 */
481 patmInfo.savedstate.cPatches = 0;
482 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
483
484 /*
485 * Save PATM structure
486 */
487 rc = SSMR3PutMem(pSSM, &patmInfo, sizeof(patmInfo));
488 AssertRCReturn(rc, rc);
489
490 /*
491 * Save patch memory contents
492 */
493 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
494 AssertRCReturn(rc, rc);
495
496 /*
497 * Save GC state memory
498 */
499 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
500 AssertRCReturn(rc, rc);
501
502 /*
503 * Save PATM stack page
504 */
505 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
506 AssertRCReturn(rc, rc);
507
508 /*
509 * Save all patches
510 */
511 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
512 AssertRCReturn(rc, rc);
513
514 /** @note patch statistics are not saved. */
515
516 return VINF_SUCCESS;
517}
518
519/**
520 * Execute state load operation.
521 *
522 * @returns VBox status code.
523 * @param pVM VM Handle.
524 * @param pSSM SSM operation handle.
525 * @param uVersion Data layout version.
526 * @param uPass The data pass.
527 */
528DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
529{
530 PATM patmInfo;
531 int rc;
532
533 if ( uVersion != PATM_SSM_VERSION
534 && uVersion != PATM_SSM_VERSION_FIXUP_HACK
535 && uVersion != PATM_SSM_VERSION_VER16
536#ifdef PATM_WITH_NEW_SSM
537 && uVersion != PATM_SSM_VERSION_GETPUTMEM)
538#else
539 )
540#endif
541 {
542 AssertMsgFailed(("patmR3Load: Invalid version uVersion=%d!\n", uVersion));
543 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
544 }
545 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
546
547 pVM->patm.s.savedstate.pSSM = pSSM;
548
549 /*
550 * Restore PATM structure
551 */
552#ifdef PATM_WITH_NEW_SSM
553 if (uVersion == PATM_SSM_VERSION_GETPUTMEM)
554 {
555#endif
556#if 0
557 rc = SSMR3GetMem(pSSM, &patmInfo, sizeof(patmInfo));
558#else
559 RT_ZERO(patmInfo);
560 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmFields[0], NULL);
561#endif
562 AssertRCReturn(rc, rc);
563
564#ifdef PATM_WITH_NEW_SSM
565 }
566 else
567 {
568 memset(&patmInfo, 0, sizeof(patmInfo));
569
570 AssertCompile(sizeof(patmInfo.pGCStateGC) == sizeof(RTRCPTR));
571 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStateGC);
572 AssertRCReturn(rc, rc);
573
574 AssertCompile(sizeof(patmInfo.pCPUMCtxGC) == sizeof(RTRCPTR));
575 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pCPUMCtxGC);
576 AssertRCReturn(rc, rc);
577
578 AssertCompile(sizeof(patmInfo.pStatsGC) == sizeof(RTRCPTR));
579 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pStatsGC);
580 AssertRCReturn(rc, rc);
581
582 AssertCompile(sizeof(patmInfo.pfnHelperCallGC) == sizeof(RTRCPTR));
583 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperCallGC);
584 AssertRCReturn(rc, rc);
585
586 AssertCompile(sizeof(patmInfo.pfnHelperRetGC) == sizeof(RTRCPTR));
587 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperRetGC);
588 AssertRCReturn(rc, rc);
589
590 AssertCompile(sizeof(patmInfo.pfnHelperJumpGC) == sizeof(RTRCPTR));
591 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperJumpGC);
592 AssertRCReturn(rc, rc);
593
594 AssertCompile(sizeof(patmInfo.pfnHelperIretGC) == sizeof(RTRCPTR));
595 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperIretGC);
596 AssertRCReturn(rc, rc);
597
598 AssertCompile(sizeof(patmInfo.pPatchMemGC) == sizeof(RTRCPTR));
599 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchMemGC);
600 AssertRCReturn(rc, rc);
601
602 AssertCompile(sizeof(patmInfo.cbPatchMem) == sizeof(uint32_t));
603 rc = SSMR3GetU32(pSSM, &patmInfo.cbPatchMem);
604 AssertRCReturn(rc, rc);
605
606 AssertCompile(sizeof(patmInfo.offPatchMem) == sizeof(uint32_t));
607 rc = SSMR3GetU32(pSSM, &patmInfo.offPatchMem);
608 AssertRCReturn(rc, rc);
609
610 AssertCompile(sizeof(patmInfo.deltaReloc) == sizeof(int32_t));
611 rc = SSMR3GetS32(pSSM, &patmInfo.deltaReloc);
612 AssertRCReturn(rc, rc);
613
614 AssertCompile(sizeof(patmInfo.uCurrentPatchIdx) == sizeof(uint32_t));
615 rc = SSMR3GetS32(pSSM, &patmInfo.uCurrentPatchIdx);
616 AssertRCReturn(rc, rc);
617
618 AssertCompile(sizeof(patmInfo.pPatchedInstrGCLowest) == sizeof(RTRCPTR));
619 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCLowest);
620 AssertRCReturn(rc, rc);
621
622 AssertCompile(sizeof(patmInfo.pPatchedInstrGCHighest) == sizeof(RTRCPTR));
623 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCHighest);
624 AssertRCReturn(rc, rc);
625
626 AssertCompile(sizeof(patmInfo.pfnSysEnterGC) == sizeof(RTRCPTR));
627 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterGC);
628 AssertRCReturn(rc, rc);
629
630 AssertCompile(sizeof(patmInfo.pfnSysEnterPatchGC) == sizeof(RTRCPTR));
631 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterPatchGC);
632 AssertRCReturn(rc, rc);
633
634 AssertCompile(sizeof(patmInfo.uSysEnterPatchIdx) == sizeof(uint32_t));
635 rc = SSMR3GetU32(pSSM, &patmInfo.uSysEnterPatchIdx);
636 AssertRCReturn(rc, rc);
637
638 AssertCompile(sizeof(patmInfo.ulCallDepth) == sizeof(uint32_t));
639 rc = SSMR3GetU32(pSSM, &patmInfo.ulCallDepth);
640 AssertRCReturn(rc, rc);
641
642 AssertCompile(sizeof(patmInfo.pGCStackGC) == sizeof(RTRCPTR));
643 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStackGC);
644 AssertRCReturn(rc, rc);
645
646 AssertCompile(sizeof(patmInfo.cPageRecords) == sizeof(uint32_t));
647 rc = SSMR3GetU32(pSSM, &patmInfo.cPageRecords);
648 AssertRCReturn(rc, rc);
649
650 AssertCompile(sizeof(patmInfo.fOutOfMemory) == sizeof(bool));
651 rc = SSMR3GetBool(pSSM, &patmInfo.fOutOfMemory);
652 AssertRCReturn(rc, rc);
653
654 AssertCompile(sizeof(patmInfo.savedstate.cPatches) == sizeof(uint32_t));
655 rc = SSMR3GetU32(pSSM, &patmInfo.savedstate.cPatches);
656 AssertRCReturn(rc, rc);
657
658 }
659#endif
660
661 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
662 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
663 if ( (pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC)
664 || (pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC)
665 || (pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC)
666 || (pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC))
667 {
668 AssertMsgFailed(("Helper function ptrs don't match!!!\n"));
669 return VERR_SSM_INVALID_STATE;
670 }
671
672 if (pVM->patm.s.cbPatchMem != patmInfo.cbPatchMem)
673 {
674 AssertMsgFailed(("Patch memory ptrs and/or sizes don't match!!!\n"));
675 return VERR_SSM_INVALID_STATE;
676 }
677 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
678 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
679 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
680 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
681
682 /* Lowest and highest patched instruction */
683 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
684 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
685
686 /* Sysenter handlers */
687 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
688 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
689 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
690
691 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
692
693 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
694 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
695 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
696 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
697
698
699 /** @note patch statistics are not restored. */
700
701 /*
702 * Restore patch memory contents
703 */
704 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
705 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
706 AssertRCReturn(rc, rc);
707
708 /*
709 * Restore GC state memory
710 */
711#ifdef PATM_WITH_NEW_SSM
712 if (uVersion == PATM_SSM_VERSION_GETPUTMEM)
713 {
714#endif
715#if 0
716 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
717#else
718 RT_BZERO(pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
719 rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmGCStateFields[0], NULL);
720#endif
721 AssertRCReturn(rc, rc);
722#ifdef PATM_WITH_NEW_SSM
723 }
724 else
725 {
726 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uVMFlags) == sizeof(uint32_t));
727 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uVMFlags);
728 AssertRCReturn(rc, rc);
729
730 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPendingAction) == sizeof(uint32_t));
731 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPendingAction);
732 AssertRCReturn(rc, rc);
733
734 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPatchCalls) == sizeof(uint32_t));
735 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPatchCalls);
736 AssertRCReturn(rc, rc);
737
738 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uScratch) == sizeof(uint32_t));
739 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uScratch);
740 AssertRCReturn(rc, rc);
741
742 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEFlags) == sizeof(uint32_t));
743 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEFlags);
744 AssertRCReturn(rc, rc);
745
746 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretCS) == sizeof(uint32_t));
747 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretCS);
748 AssertRCReturn(rc, rc);
749
750 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEIP) == sizeof(uint32_t));
751 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEIP);
752 AssertRCReturn(rc, rc);
753
754 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Psp) == sizeof(uint32_t));
755 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Psp);
756 AssertRCReturn(rc, rc);
757
758 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->fPIF) == sizeof(uint32_t));
759 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->fPIF);
760 AssertRCReturn(rc, rc);
761
762 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts) == sizeof(RTRCPTR));
763 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts);
764 AssertRCReturn(rc, rc);
765
766 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr) == sizeof(RTRCPTR));
767 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr);
768 AssertRCReturn(rc, rc);
769
770 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallReturnAddr) == sizeof(RTRCPTR));
771 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallReturnAddr);
772 AssertRCReturn(rc, rc);
773
774 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEAX) == sizeof(uint32_t));
775 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEAX);
776 AssertRCReturn(rc, rc);
777
778 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uECX) == sizeof(uint32_t));
779 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uECX);
780 AssertRCReturn(rc, rc);
781
782 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEDI) == sizeof(uint32_t));
783 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEDI);
784 AssertRCReturn(rc, rc);
785
786 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.eFlags) == sizeof(uint32_t));
787 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.eFlags);
788 AssertRCReturn(rc, rc);
789
790 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uFlags) == sizeof(uint32_t));
791 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uFlags);
792 AssertRCReturn(rc, rc);
793 }
794#endif
795
796 /*
797 * Restore PATM stack page
798 */
799 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
800 AssertRCReturn(rc, rc);
801
802 /*
803 * Load all patches
804 */
805 for (uint32_t i=0;i<patmInfo.savedstate.cPatches;i++)
806 {
807 PATMPATCHREC patch, *pPatchRec;
808
809#if 0
810 rc = SSMR3GetMem(pSSM, &patch, sizeof(patch));
811#else
812 RT_ZERO(patch);
813 rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmPatchRecFields[0], NULL);
814#endif
815 AssertRCReturn(rc, rc);
816
817 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
818
819 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
820 if (RT_FAILURE(rc))
821 {
822 AssertMsgFailed(("Out of memory!!!!\n"));
823 return VERR_NO_MEMORY;
824 }
825 /*
826 * Only restore the patch part of the tree record; not the internal data (except the key of course)
827 */
828 pPatchRec->patch = patch.patch;
829 pPatchRec->Core.Key = patch.Core.Key;
830 pPatchRec->CoreOffset.Key = patch.CoreOffset.Key;
831
832 Log(("Restoring patch %RRv -> %RRv\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset));
833 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
834 Assert(ret);
835 if (pPatchRec->patch.uState != PATCH_REFUSED)
836 {
837 if (pPatchRec->patch.pPatchBlockOffset)
838 {
839 /* We actually generated code for this patch. */
840 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
841 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
842 }
843 }
844 /* Set to zero as we don't need it anymore. */
845 pPatchRec->patch.pTempInfo = 0;
846
847 pPatchRec->patch.pPrivInstrHC = 0;
848 /* The GC virtual ptr is fixed, but we must convert it manually again to HC. */
849 int rc2 = rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
850 /* Can fail due to page or page table not present. */
851
852 /*
853 * Restore fixup records and correct HC pointers in fixup records
854 */
855 pPatchRec->patch.FixupTree = 0;
856 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
857 for (int i=0;i<patch.patch.nrFixups;i++)
858 {
859 RELOCREC rec;
860 int32_t offset;
861 RTRCPTR *pFixup;
862
863#if 0
864 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
865#else
866 RT_ZERO(rec);
867 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRelocRec[0], NULL);
868#endif
869 AssertRCReturn(rc, rc);
870
871 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
872 offset = (int32_t)(int64_t)rec.pRelocPos;
873 /* Convert to HC pointer again. */
874 PATM_ADD_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
875 pFixup = (RTRCPTR *)rec.pRelocPos;
876
877 if (pPatchRec->patch.uState != PATCH_REFUSED)
878 {
879 if ( rec.uType == FIXUP_REL_JMPTOPATCH
880 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
881 {
882 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
883 unsigned offset = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
884
885 /** @todo This will fail & crash in patmCorrectFixup if the page isn't present
886 * when we restore. Happens with my XP image here
887 * (pPrivInstrGC=0x8069e051). */
888 AssertLogRelMsg(pPatchRec->patch.pPrivInstrHC, ("%RRv rc=%Rrc uState=%u\n", pPatchRec->patch.pPrivInstrGC, rc2, pPatchRec->patch.uState));
889 rec.pRelocPos = pPatchRec->patch.pPrivInstrHC + offset;
890 pFixup = (RTRCPTR *)rec.pRelocPos;
891 }
892
893 patmCorrectFixup(pVM, uVersion, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
894 }
895
896 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
897 AssertRCReturn(rc, rc);
898 }
899
900 /* And all patch to guest lookup records */
901 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
902
903 pPatchRec->patch.Patch2GuestAddrTree = 0;
904 pPatchRec->patch.Guest2PatchAddrTree = 0;
905 if (pPatchRec->patch.nrPatch2GuestRecs)
906 {
907 RECPATCHTOGUEST rec;
908 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
909
910 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
911 for (uint32_t i=0;i<nrPatch2GuestRecs;i++)
912 {
913#if 0
914 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
915#else
916 RT_ZERO(rec);
917 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRecPatchToGuest[0], NULL);
918#endif
919
920 AssertRCReturn(rc, rc);
921
922 patmr3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
923 }
924 Assert(pPatchRec->patch.Patch2GuestAddrTree);
925 }
926
927 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
928 {
929 /* Insert the guest page lookup records (for detection self-modifying code) */
930 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
931 AssertRCReturn(rc, rc);
932 }
933
934#if 0 /* can fail def LOG_ENABLED */
935 if ( pPatchRec->patch.uState != PATCH_REFUSED
936 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
937 {
938 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
939 Log(("Patch code ----------------------------------------------------------\n"));
940 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
941 Log(("Patch code ends -----------------------------------------------------\n"));
942 MMR3HeapFree(pPatchRec->patch.pTempInfo);
943 pPatchRec->patch.pTempInfo = NULL;
944 }
945#endif
946
947 }
948
949 /*
950 * Correct absolute fixups in the global patch. (helper functions)
951 * Bit of a mess. Uses the new patch record, but restored patch functions.
952 */
953 PRELOCREC pRec = 0;
954 AVLPVKEY key = 0;
955
956 Log(("Correct fixups in global helper functions\n"));
957 while (true)
958 {
959 int32_t offset;
960 RTRCPTR *pFixup;
961
962 /* Get the record that's closest from above */
963 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
964 if (pRec == 0)
965 break;
966
967 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
968
969 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
970 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
971 pFixup = (RTRCPTR *)pRec->pRelocPos;
972
973 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
974 patmCorrectFixup(pVM, uVersion, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
975 }
976
977#ifdef VBOX_WITH_STATISTICS
978 /*
979 * Restore relevant old statistics
980 */
981 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
982 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
983 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
984 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
985#endif
986
987 return VINF_SUCCESS;
988}
989
990/**
991 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
992 *
993 * @returns VBox status code.
994 * @param pVM VM Handle.
995 * @param ulSSMVersion SSM version
996 * @param patmInfo Saved PATM structure
997 * @param pPatch Patch record
998 * @param pRec Relocation record
999 * @param offset Offset of referenced data/code
1000 * @param pFixup Fixup address
1001 */
1002static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup)
1003{
1004 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
1005
1006 switch (pRec->uType)
1007 {
1008 case FIXUP_ABSOLUTE:
1009 {
1010 if (pRec->pSource && !PATMIsPatchGCAddr(pVM, pRec->pSource))
1011 break;
1012
1013 if ( *pFixup >= patmInfo.pGCStateGC
1014 && *pFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
1015 {
1016 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
1017 *pFixup = (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
1018 }
1019 else
1020 if ( *pFixup >= patmInfo.pCPUMCtxGC
1021 && *pFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
1022 {
1023 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
1024
1025 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
1026 if (ulSSMVersion == PATM_SSM_VERSION_VER16)
1027 {
1028 unsigned uCPUMOffset = *pFixup - patmInfo.pCPUMCtxGC;
1029
1030 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
1031 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
1032 * function is not available in older gcc versions, at least not in gcc-3.3 */
1033 if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
1034 {
1035 LogFlow(("Changing dr[0] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[0])));
1036 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
1037 }
1038 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
1039 {
1040 LogFlow(("Changing dr[1] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[1])));
1041 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
1042 }
1043 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
1044 {
1045 LogFlow(("Changing dr[2] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[2])));
1046 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
1047 }
1048 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
1049 {
1050 LogFlow(("Changing dr[3] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[3])));
1051 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
1052 }
1053 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
1054 {
1055 LogFlow(("Changing dr[4] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[4])));
1056 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
1057 }
1058 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
1059 {
1060 LogFlow(("Changing dr[5] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[5])));
1061 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
1062 }
1063 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
1064 {
1065 LogFlow(("Changing dr[6] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[6])));
1066 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
1067 }
1068 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
1069 {
1070 LogFlow(("Changing dr[7] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[7])));
1071 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
1072 }
1073 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
1074 {
1075 LogFlow(("Changing cr0 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr0)));
1076 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
1077 }
1078 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
1079 {
1080 LogFlow(("Changing cr2 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr2)));
1081 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
1082 }
1083 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
1084 {
1085 LogFlow(("Changing cr3 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr3)));
1086 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
1087 }
1088 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
1089 {
1090 LogFlow(("Changing cr4 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr4)));
1091 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
1092 }
1093 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
1094 {
1095 LogFlow(("Changing tr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, tr)));
1096 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1097 }
1098 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
1099 {
1100 LogFlow(("Changing ldtr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, ldtr)));
1101 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1102 }
1103 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
1104 {
1105 LogFlow(("Changing pGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
1106 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1107 }
1108 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
1109 {
1110 LogFlow(("Changing cbGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
1111 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1112 }
1113 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
1114 {
1115 LogFlow(("Changing pIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
1116 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1117 }
1118 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
1119 {
1120 LogFlow(("Changing cbIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
1121 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1122 }
1123 else
1124 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", uCPUMOffset));
1125 }
1126 else
1127 *pFixup = (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
1128 }
1129 else
1130 if ( *pFixup >= patmInfo.pStatsGC
1131 && *pFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
1132 {
1133 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
1134 *pFixup = (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
1135 }
1136 else
1137 if ( *pFixup >= patmInfo.pGCStackGC
1138 && *pFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
1139 {
1140 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
1141 *pFixup = (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
1142 }
1143 else
1144 if ( *pFixup >= patmInfo.pPatchMemGC
1145 && *pFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
1146 {
1147 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
1148 *pFixup = (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
1149 }
1150 else
1151 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1152 && *pFixup >= pVM->pVMRC
1153 && *pFixup < pVM->pVMRC + 32)
1154 {
1155 LogFlow(("Changing fLocalForcedActions fixup from %x to %x\n", *pFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
1156 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1157 }
1158 else
1159 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1160 && *pFixup >= pVM->pVMRC
1161 && *pFixup < pVM->pVMRC + 8192)
1162 {
1163 static int cCpuidFixup = 0;
1164#ifdef LOG_ENABLED
1165 RTRCPTR oldFixup = *pFixup;
1166#endif
1167 /* very dirty assumptions about the cpuid patch and cpuid ordering. */
1168 switch(cCpuidFixup & 3)
1169 {
1170 case 0:
1171 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1172 break;
1173 case 1:
1174 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1175 break;
1176 case 2:
1177 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1178 break;
1179 case 3:
1180 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1181 break;
1182 }
1183 LogFlow(("Changing cpuid fixup %d from %x to %x\n", cCpuidFixup, oldFixup, *pFixup));
1184 cCpuidFixup++;
1185 }
1186 else
1187 if (ulSSMVersion >= PATM_SSM_VERSION)
1188 {
1189#ifdef LOG_ENABLED
1190 RTRCPTR oldFixup = *pFixup;
1191#endif
1192 /* Core.Key abused to store the type of fixup */
1193 switch ((uintptr_t)pRec->Core.Key)
1194 {
1195 case PATM_FIXUP_CPU_FF_ACTION:
1196 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1197 LogFlow(("Changing cpu ff action fixup from %x to %x\n", oldFixup, *pFixup));
1198 break;
1199 case PATM_FIXUP_CPUID_DEFAULT:
1200 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1201 LogFlow(("Changing cpuid def fixup from %x to %x\n", oldFixup, *pFixup));
1202 break;
1203 case PATM_FIXUP_CPUID_STANDARD:
1204 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1205 LogFlow(("Changing cpuid std fixup from %x to %x\n", oldFixup, *pFixup));
1206 break;
1207 case PATM_FIXUP_CPUID_EXTENDED:
1208 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1209 LogFlow(("Changing cpuid ext fixup from %x to %x\n", oldFixup, *pFixup));
1210 break;
1211 case PATM_FIXUP_CPUID_CENTAUR:
1212 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1213 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", oldFixup, *pFixup));
1214 break;
1215 default:
1216 AssertMsgFailed(("Unexpected fixup value %x\n", *pFixup));
1217 break;
1218 }
1219 }
1220
1221#ifdef RT_OS_WINDOWS
1222 AssertCompile(RT_OFFSETOF(VM, fGlobalForcedActions) < 32);
1223#endif
1224 break;
1225 }
1226
1227 case FIXUP_REL_JMPTOPATCH:
1228 {
1229 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
1230
1231 if ( pPatch->uState == PATCH_ENABLED
1232 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
1233 {
1234 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
1235 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
1236 RTRCPTR pJumpOffGC;
1237 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
1238 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
1239
1240 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1241
1242 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
1243#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
1244 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
1245 {
1246 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
1247
1248 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
1249 oldJump[0] = pPatch->aPrivInstr[0];
1250 oldJump[1] = pPatch->aPrivInstr[1];
1251 *(RTRCUINTPTR *)&oldJump[2] = displOld;
1252 }
1253 else
1254#endif
1255 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
1256 {
1257 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
1258 oldJump[0] = 0xE9;
1259 *(RTRCUINTPTR *)&oldJump[1] = displOld;
1260 }
1261 else
1262 {
1263 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
1264 break;
1265 }
1266 Assert(pPatch->cbPatchJump <= sizeof(temp));
1267
1268 /*
1269 * Read old patch jump and compare it to the one we previously installed
1270 */
1271 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
1272 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1273
1274 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1275 {
1276 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
1277
1278 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
1279 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
1280 }
1281 else
1282 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
1283 {
1284 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
1285 /*
1286 * Disable patch; this is not a good solution
1287 */
1288 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
1289 pPatch->uState = PATCH_DISABLED;
1290 }
1291 else
1292 if (RT_SUCCESS(rc))
1293 {
1294 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
1295 AssertRC(rc);
1296 }
1297 else
1298 {
1299 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
1300 }
1301 }
1302 else
1303 {
1304 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->pPrivInstrHC, pRec->pRelocPos));
1305 }
1306
1307 pRec->pDest = pTarget;
1308 break;
1309 }
1310
1311 case FIXUP_REL_JMPTOGUEST:
1312 {
1313 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1314 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1315
1316 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1317 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1318 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1319 pRec->pSource = pSource;
1320 break;
1321
1322 }
1323}
1324}
1325
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette