VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATMSSM.cpp@ 29689

Last change on this file since 29689 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 56.4 KB
Line 
1/* $Id: PATMSSM.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/patm.h>
25#include <VBox/cpum.h>
26#include <VBox/mm.h>
27#include <VBox/ssm.h>
28#include <VBox/param.h>
29#include <iprt/avl.h>
30#include "PATMInternal.h"
31#include "PATMPatch.h"
32#include "PATMA.h"
33#include <VBox/vm.h>
34#include <VBox/csam.h>
35
36#include <VBox/dbg.h>
37#include <VBox/err.h>
38#include <VBox/log.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/string.h>
42#include <VBox/dis.h>
43#include <VBox/disopcode.h>
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48#define PATM_SUBTRACT_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) - (uintptr_t)(b)
49#define PATM_ADD_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) + (uintptr_t)(b)
50
51/*******************************************************************************
52* Internal Functions *
53*******************************************************************************/
54static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup);
55
56/*******************************************************************************
57* Global Variables *
58*******************************************************************************/
59/**
60 * SSM descriptor table for the PATM structure.
61 */
62static SSMFIELD const g_aPatmFields[] =
63{
64 /** @todo there are a bunch more fields here which can be marked as ignored. */
65 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
66 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
67 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
68 SSMFIELD_ENTRY( PATM, cbPatchMem),
69 SSMFIELD_ENTRY( PATM, offPatchMem),
70 SSMFIELD_ENTRY( PATM, fOutOfMemory),
71 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
72 SSMFIELD_ENTRY( PATM, deltaReloc),
73 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
74 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
75 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
76 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
77 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
78 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
79 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
80 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
81 SSMFIELD_ENTRY( PATM, ulCallDepth),
82 SSMFIELD_ENTRY( PATM, cPageRecords),
83 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
84 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
85 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
86 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
87 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
88 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
89 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
90 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
91 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
92 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
93 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
94 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
95 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
96 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
97 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
98 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
99 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
100 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
101 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
102 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
103 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
104 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
105 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
106 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
107 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
108 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
109 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
110 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
111 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
112 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
113 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
114 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
115 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
116 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
117 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
118 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
119 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
120 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
121 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
122 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
123 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
124 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
125 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
126 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
127 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
128 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
129 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
130 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
131 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
132 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
133 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
134 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
135 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
136 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
137 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
138 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
139 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
140 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
141 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
142 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
143 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
144 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
145 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
146 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
147 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
148 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
149 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
150 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
151 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
152 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
153 SSMFIELD_ENTRY_TERM()
154};
155
156/**
157 * SSM descriptor table for the PATMGCSTATE structure.
158 */
159static SSMFIELD const g_aPatmGCStateFields[] =
160{
161 SSMFIELD_ENTRY( PATMGCSTATE, uVMFlags),
162 SSMFIELD_ENTRY( PATMGCSTATE, uPendingAction),
163 SSMFIELD_ENTRY( PATMGCSTATE, uPatchCalls),
164 SSMFIELD_ENTRY( PATMGCSTATE, uScratch),
165 SSMFIELD_ENTRY( PATMGCSTATE, uIretEFlags),
166 SSMFIELD_ENTRY( PATMGCSTATE, uIretCS),
167 SSMFIELD_ENTRY( PATMGCSTATE, uIretEIP),
168 SSMFIELD_ENTRY( PATMGCSTATE, Psp),
169 SSMFIELD_ENTRY( PATMGCSTATE, fPIF),
170 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCPtrInhibitInterrupts),
171 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallPatchTargetAddr),
172 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallReturnAddr),
173 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEAX),
174 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uECX),
175 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEDI),
176 SSMFIELD_ENTRY( PATMGCSTATE, Restore.eFlags),
177 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uFlags),
178 SSMFIELD_ENTRY_TERM()
179};
180
181/**
182 * SSM descriptor table for the PATMPATCHREC structure.
183 */
184static SSMFIELD const g_aPatmPatchRecFields[] =
185{
186 SSMFIELD_ENTRY( PATMPATCHREC, Core.Key),
187 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.pLeft),
188 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.pRight),
189 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.uchHeight),
190 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
191 SSMFIELD_ENTRY( PATMPATCHREC, CoreOffset.Key),
192 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.pLeft),
193 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.pRight),
194 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.uchHeight),
195 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
196 SSMFIELD_ENTRY( PATMPATCHREC, patch.uState),
197 SSMFIELD_ENTRY( PATMPATCHREC, patch.uOldState),
198 SSMFIELD_ENTRY( PATMPATCHREC, patch.uOpMode),
199 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pPrivInstrGC),
200 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.pPrivInstrHC),
201 SSMFIELD_ENTRY( PATMPATCHREC, patch.aPrivInstr),
202 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPrivInstr),
203 SSMFIELD_ENTRY( PATMPATCHREC, patch.opcode),
204 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPatchJump),
205 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pPatchJumpDestGC),
206 SSMFIELD_ENTRY( PATMPATCHREC, patch.pPatchBlockOffset),
207 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPatchBlockSize),
208 SSMFIELD_ENTRY( PATMPATCHREC, patch.uCurPatchOffset),
209 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHREC, patch.Alignment0, sizeof(uint32_t)),
210 SSMFIELD_ENTRY( PATMPATCHREC, patch.flags),
211 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pInstrGCLowest),
212 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pInstrGCHighest),
213 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.FixupTree),
214 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrFixups),
215 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrJumpRecs), // should be zero?
216 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.JumpTree),
217 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.Patch2GuestAddrTree),
218 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.Guest2PatchAddrTree),
219 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrPatch2GuestRecs),
220 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHREC, patch.Alignment1, sizeof(uint32_t)),
221 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.cacheRec.pPatchLocStartHC), // saved as zero
222 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.cacheRec.pPatchLocEndHC), // ditto
223 SSMFIELD_ENTRY_IGN_RCPTR( PATMPATCHREC, patch.cacheRec.pGuestLoc), // ditto
224 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, patch.cacheRec.opsize), // ditto
225 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.pTempInfo),
226 SSMFIELD_ENTRY( PATMPATCHREC, patch.cCodeWrites),
227 SSMFIELD_ENTRY( PATMPATCHREC, patch.cTraps),
228 SSMFIELD_ENTRY( PATMPATCHREC, patch.cInvalidWrites),
229 SSMFIELD_ENTRY( PATMPATCHREC, patch.uPatchIdx),
230 SSMFIELD_ENTRY( PATMPATCHREC, patch.bDirtyOpcode),
231 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, patch.Alignment2),
232 SSMFIELD_ENTRY_TERM()
233};
234
235/**
236 * SSM descriptor table for the RELOCREC structure.
237 */
238static SSMFIELD const g_aPatmRelocRec[] =
239{
240 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, Core.Key), // Used to store the relocation type
241 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pLeft),
242 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pRight),
243 SSMFIELD_ENTRY_IGNORE( RELOCREC, Core.uchHeight),
244 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
245 SSMFIELD_ENTRY( RELOCREC, uType),
246 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
247 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, pRelocPos), // converted to a patch member offset.
248 SSMFIELD_ENTRY_RCPTR( RELOCREC, pSource),
249 SSMFIELD_ENTRY_RCPTR( RELOCREC, pDest),
250 SSMFIELD_ENTRY_TERM()
251};
252
253/**
254 * SSM descriptor table for the RECPATCHTOGUEST structure.
255 */
256static SSMFIELD const g_aPatmRecPatchToGuest[] =
257{
258 SSMFIELD_ENTRY( RECPATCHTOGUEST, Core.Key),
259 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
260 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pLeft),
261 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pRight),
262 SSMFIELD_ENTRY_IGNORE( RECPATCHTOGUEST, Core.uchHeight),
263 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
264 SSMFIELD_ENTRY_RCPTR( RECPATCHTOGUEST, pOrgInstrGC),
265 SSMFIELD_ENTRY( RECPATCHTOGUEST, enmType),
266 SSMFIELD_ENTRY( RECPATCHTOGUEST, fDirty),
267 SSMFIELD_ENTRY( RECPATCHTOGUEST, fJumpTarget),
268 SSMFIELD_ENTRY( RECPATCHTOGUEST, u8DirtyOpcode),
269 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 5),
270 SSMFIELD_ENTRY_TERM()
271};
272
273
274#ifdef VBOX_STRICT
275/**
276 * Callback function for RTAvlPVDoWithAll
277 *
278 * Counts the number of patches in the tree
279 *
280 * @returns VBox status code.
281 * @param pNode Current node
282 * @param pcPatches Pointer to patch counter (uint32_t)
283 */
284static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
285{
286 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
287 return VINF_SUCCESS;
288}
289
290/**
291 * Callback function for RTAvlU32DoWithAll
292 *
293 * Counts the number of patches in the tree
294 *
295 * @returns VBox status code.
296 * @param pNode Current node
297 * @param pcPatches Pointer to patch counter (uint32_t)
298 */
299static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
300{
301 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
302 return VINF_SUCCESS;
303}
304#endif /* VBOX_STRICT */
305
306/**
307 * Callback function for RTAvloU32DoWithAll
308 *
309 * Counts the number of patches in the tree
310 *
311 * @returns VBox status code.
312 * @param pNode Current node
313 * @param pcPatches Pointer to patch counter
314 */
315static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
316{
317 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
318 return VINF_SUCCESS;
319}
320
321/**
322 * Callback function for RTAvlU32DoWithAll
323 *
324 * Saves all patch to guest lookup records.
325 *
326 * @returns VBox status code.
327 * @param pNode Current node
328 * @param pVM1 VM Handle
329 */
330static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
331{
332 PVM pVM = (PVM)pVM1;
333 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
334 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
335
336 /* Save the lookup record. */
337 int rc = SSMR3PutMem(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST));
338 AssertRCReturn(rc, rc);
339
340 return VINF_SUCCESS;
341}
342
343/**
344 * Callback function for RTAvlPVDoWithAll
345 *
346 * Saves all patch to guest lookup records.
347 *
348 * @returns VBox status code.
349 * @param pNode Current node
350 * @param pVM1 VM Handle
351 */
352static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
353{
354 PVM pVM = (PVM)pVM1;
355 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
356 RELOCREC rec = *(PRELOCREC)pNode;
357 RTRCPTR *pFixup = (RTRCPTR *)rec.pRelocPos;
358
359 Assert(rec.pRelocPos);
360 /* Convert pointer to an offset into patch memory. */
361 PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
362
363 if (rec.uType == FIXUP_ABSOLUTE)
364 {
365 /* Core.Key abused to store the fixup type. */
366 if (*pFixup == pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))
367 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPU_FF_ACTION;
368 else
369 if (*pFixup == CPUMR3GetGuestCpuIdDefRCPtr(pVM))
370 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_DEFAULT;
371 else
372 if (*pFixup == CPUMR3GetGuestCpuIdStdRCPtr(pVM))
373 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_STANDARD;
374 else
375 if (*pFixup == CPUMR3GetGuestCpuIdExtRCPtr(pVM))
376 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_EXTENDED;
377 else
378 if (*pFixup == CPUMR3GetGuestCpuIdCentaurRCPtr(pVM))
379 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_CENTAUR;
380 }
381
382 /* Save the lookup record. */
383 int rc = SSMR3PutMem(pSSM, &rec, sizeof(rec));
384 AssertRCReturn(rc, rc);
385
386 return VINF_SUCCESS;
387}
388
389
390/**
391 * Callback function for RTAvloU32DoWithAll
392 *
393 * Saves the state of the patch that's being enumerated
394 *
395 * @returns VBox status code.
396 * @param pNode Current node
397 * @param pVM1 VM Handle
398 */
399static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
400{
401 PVM pVM = (PVM)pVM1;
402 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
403 PATMPATCHREC patch = *pPatch;
404 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
405 int rc;
406
407 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
408
409 /*
410 * Reset HC pointers that need to be recalculated when loading the state
411 */
412 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
413 ("State = %x pPrivInstrHC=%08x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, patch.patch.pPrivInstrHC, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
414 Assert(pPatch->patch.JumpTree == 0);
415 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
416 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
417
418 memset(&patch.patch.cacheRec, 0, sizeof(patch.patch.cacheRec));
419
420 /* Save the patch record itself */
421 rc = SSMR3PutMem(pSSM, &patch, sizeof(patch));
422 AssertRCReturn(rc, rc);
423
424 /*
425 * Reset HC pointers in fixup records and save them.
426 */
427#ifdef VBOX_STRICT
428 uint32_t nrFixupRecs = 0;
429 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
430 AssertMsg((int32_t)nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
431#endif
432 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
433
434#ifdef VBOX_STRICT
435 uint32_t nrLookupRecords = 0;
436 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
437 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
438#endif
439
440 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
441 return VINF_SUCCESS;
442}
443
444/**
445 * Execute state save operation.
446 *
447 * @returns VBox status code.
448 * @param pVM VM Handle.
449 * @param pSSM SSM operation handle.
450 */
451DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
452{
453 PATM patmInfo = pVM->patm.s;
454 int rc;
455
456 pVM->patm.s.savedstate.pSSM = pSSM;
457
458 /*
459 * Reset HC pointers that need to be recalculated when loading the state
460 */
461 patmInfo.pPatchMemHC = NULL;
462 patmInfo.pGCStateHC = 0;
463 patmInfo.pvFaultMonitor = 0;
464
465 Assert(patmInfo.ulCallDepth == 0);
466
467 /*
468 * Count the number of patches in the tree (feeling lazy)
469 */
470 patmInfo.savedstate.cPatches = 0;
471 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
472
473 /*
474 * Save PATM structure
475 */
476 rc = SSMR3PutMem(pSSM, &patmInfo, sizeof(patmInfo));
477 AssertRCReturn(rc, rc);
478
479 /*
480 * Save patch memory contents
481 */
482 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
483 AssertRCReturn(rc, rc);
484
485 /*
486 * Save GC state memory
487 */
488 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
489 AssertRCReturn(rc, rc);
490
491 /*
492 * Save PATM stack page
493 */
494 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
495 AssertRCReturn(rc, rc);
496
497 /*
498 * Save all patches
499 */
500 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
501 AssertRCReturn(rc, rc);
502
503 /** @note patch statistics are not saved. */
504
505 return VINF_SUCCESS;
506}
507
508/**
509 * Execute state load operation.
510 *
511 * @returns VBox status code.
512 * @param pVM VM Handle.
513 * @param pSSM SSM operation handle.
514 * @param uVersion Data layout version.
515 * @param uPass The data pass.
516 */
517DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
518{
519 PATM patmInfo;
520 int rc;
521
522 if ( uVersion != PATM_SSM_VERSION
523 && uVersion != PATM_SSM_VERSION_FIXUP_HACK
524 && uVersion != PATM_SSM_VERSION_VER16
525#ifdef PATM_WITH_NEW_SSM
526 && uVersion != PATM_SSM_VERSION_GETPUTMEM)
527#else
528 )
529#endif
530 {
531 AssertMsgFailed(("patmR3Load: Invalid version uVersion=%d!\n", uVersion));
532 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
533 }
534 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
535
536 pVM->patm.s.savedstate.pSSM = pSSM;
537
538 /*
539 * Restore PATM structure
540 */
541#ifdef PATM_WITH_NEW_SSM
542 if (uVersion == PATM_SSM_VERSION_GETPUTMEM)
543 {
544#endif
545#if 0
546 rc = SSMR3GetMem(pSSM, &patmInfo, sizeof(patmInfo));
547#else
548 RT_ZERO(patmInfo);
549 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmFields[0], NULL);
550#endif
551 AssertRCReturn(rc, rc);
552
553#ifdef PATM_WITH_NEW_SSM
554 }
555 else
556 {
557 memset(&patmInfo, 0, sizeof(patmInfo));
558
559 AssertCompile(sizeof(patmInfo.pGCStateGC) == sizeof(RTRCPTR));
560 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStateGC);
561 AssertRCReturn(rc, rc);
562
563 AssertCompile(sizeof(patmInfo.pCPUMCtxGC) == sizeof(RTRCPTR));
564 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pCPUMCtxGC);
565 AssertRCReturn(rc, rc);
566
567 AssertCompile(sizeof(patmInfo.pStatsGC) == sizeof(RTRCPTR));
568 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pStatsGC);
569 AssertRCReturn(rc, rc);
570
571 AssertCompile(sizeof(patmInfo.pfnHelperCallGC) == sizeof(RTRCPTR));
572 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperCallGC);
573 AssertRCReturn(rc, rc);
574
575 AssertCompile(sizeof(patmInfo.pfnHelperRetGC) == sizeof(RTRCPTR));
576 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperRetGC);
577 AssertRCReturn(rc, rc);
578
579 AssertCompile(sizeof(patmInfo.pfnHelperJumpGC) == sizeof(RTRCPTR));
580 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperJumpGC);
581 AssertRCReturn(rc, rc);
582
583 AssertCompile(sizeof(patmInfo.pfnHelperIretGC) == sizeof(RTRCPTR));
584 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperIretGC);
585 AssertRCReturn(rc, rc);
586
587 AssertCompile(sizeof(patmInfo.pPatchMemGC) == sizeof(RTRCPTR));
588 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchMemGC);
589 AssertRCReturn(rc, rc);
590
591 AssertCompile(sizeof(patmInfo.cbPatchMem) == sizeof(uint32_t));
592 rc = SSMR3GetU32(pSSM, &patmInfo.cbPatchMem);
593 AssertRCReturn(rc, rc);
594
595 AssertCompile(sizeof(patmInfo.offPatchMem) == sizeof(uint32_t));
596 rc = SSMR3GetU32(pSSM, &patmInfo.offPatchMem);
597 AssertRCReturn(rc, rc);
598
599 AssertCompile(sizeof(patmInfo.deltaReloc) == sizeof(int32_t));
600 rc = SSMR3GetS32(pSSM, &patmInfo.deltaReloc);
601 AssertRCReturn(rc, rc);
602
603 AssertCompile(sizeof(patmInfo.uCurrentPatchIdx) == sizeof(uint32_t));
604 rc = SSMR3GetS32(pSSM, &patmInfo.uCurrentPatchIdx);
605 AssertRCReturn(rc, rc);
606
607 AssertCompile(sizeof(patmInfo.pPatchedInstrGCLowest) == sizeof(RTRCPTR));
608 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCLowest);
609 AssertRCReturn(rc, rc);
610
611 AssertCompile(sizeof(patmInfo.pPatchedInstrGCHighest) == sizeof(RTRCPTR));
612 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCHighest);
613 AssertRCReturn(rc, rc);
614
615 AssertCompile(sizeof(patmInfo.pfnSysEnterGC) == sizeof(RTRCPTR));
616 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterGC);
617 AssertRCReturn(rc, rc);
618
619 AssertCompile(sizeof(patmInfo.pfnSysEnterPatchGC) == sizeof(RTRCPTR));
620 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterPatchGC);
621 AssertRCReturn(rc, rc);
622
623 AssertCompile(sizeof(patmInfo.uSysEnterPatchIdx) == sizeof(uint32_t));
624 rc = SSMR3GetU32(pSSM, &patmInfo.uSysEnterPatchIdx);
625 AssertRCReturn(rc, rc);
626
627 AssertCompile(sizeof(patmInfo.ulCallDepth) == sizeof(uint32_t));
628 rc = SSMR3GetU32(pSSM, &patmInfo.ulCallDepth);
629 AssertRCReturn(rc, rc);
630
631 AssertCompile(sizeof(patmInfo.pGCStackGC) == sizeof(RTRCPTR));
632 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStackGC);
633 AssertRCReturn(rc, rc);
634
635 AssertCompile(sizeof(patmInfo.cPageRecords) == sizeof(uint32_t));
636 rc = SSMR3GetU32(pSSM, &patmInfo.cPageRecords);
637 AssertRCReturn(rc, rc);
638
639 AssertCompile(sizeof(patmInfo.fOutOfMemory) == sizeof(bool));
640 rc = SSMR3GetBool(pSSM, &patmInfo.fOutOfMemory);
641 AssertRCReturn(rc, rc);
642
643 AssertCompile(sizeof(patmInfo.savedstate.cPatches) == sizeof(uint32_t));
644 rc = SSMR3GetU32(pSSM, &patmInfo.savedstate.cPatches);
645 AssertRCReturn(rc, rc);
646
647 }
648#endif
649
650 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
651 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
652 if ( (pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC)
653 || (pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC)
654 || (pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC)
655 || (pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC))
656 {
657 AssertMsgFailed(("Helper function ptrs don't match!!!\n"));
658 return VERR_SSM_INVALID_STATE;
659 }
660
661 if (pVM->patm.s.cbPatchMem != patmInfo.cbPatchMem)
662 {
663 AssertMsgFailed(("Patch memory ptrs and/or sizes don't match!!!\n"));
664 return VERR_SSM_INVALID_STATE;
665 }
666 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
667 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
668 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
669 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
670
671 /* Lowest and highest patched instruction */
672 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
673 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
674
675 /* Sysenter handlers */
676 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
677 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
678 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
679
680 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
681
682 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
683 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
684 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
685 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
686
687
688 /** @note patch statistics are not restored. */
689
690 /*
691 * Restore patch memory contents
692 */
693 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
694 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
695 AssertRCReturn(rc, rc);
696
697 /*
698 * Restore GC state memory
699 */
700#ifdef PATM_WITH_NEW_SSM
701 if (uVersion == PATM_SSM_VERSION_GETPUTMEM)
702 {
703#endif
704#if 0
705 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
706#else
707 RT_BZERO(pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
708 rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmGCStateFields[0], NULL);
709#endif
710 AssertRCReturn(rc, rc);
711#ifdef PATM_WITH_NEW_SSM
712 }
713 else
714 {
715 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uVMFlags) == sizeof(uint32_t));
716 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uVMFlags);
717 AssertRCReturn(rc, rc);
718
719 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPendingAction) == sizeof(uint32_t));
720 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPendingAction);
721 AssertRCReturn(rc, rc);
722
723 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPatchCalls) == sizeof(uint32_t));
724 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPatchCalls);
725 AssertRCReturn(rc, rc);
726
727 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uScratch) == sizeof(uint32_t));
728 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uScratch);
729 AssertRCReturn(rc, rc);
730
731 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEFlags) == sizeof(uint32_t));
732 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEFlags);
733 AssertRCReturn(rc, rc);
734
735 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretCS) == sizeof(uint32_t));
736 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretCS);
737 AssertRCReturn(rc, rc);
738
739 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEIP) == sizeof(uint32_t));
740 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEIP);
741 AssertRCReturn(rc, rc);
742
743 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Psp) == sizeof(uint32_t));
744 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Psp);
745 AssertRCReturn(rc, rc);
746
747 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->fPIF) == sizeof(uint32_t));
748 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->fPIF);
749 AssertRCReturn(rc, rc);
750
751 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts) == sizeof(RTRCPTR));
752 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts);
753 AssertRCReturn(rc, rc);
754
755 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr) == sizeof(RTRCPTR));
756 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr);
757 AssertRCReturn(rc, rc);
758
759 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallReturnAddr) == sizeof(RTRCPTR));
760 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallReturnAddr);
761 AssertRCReturn(rc, rc);
762
763 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEAX) == sizeof(uint32_t));
764 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEAX);
765 AssertRCReturn(rc, rc);
766
767 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uECX) == sizeof(uint32_t));
768 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uECX);
769 AssertRCReturn(rc, rc);
770
771 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEDI) == sizeof(uint32_t));
772 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEDI);
773 AssertRCReturn(rc, rc);
774
775 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.eFlags) == sizeof(uint32_t));
776 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.eFlags);
777 AssertRCReturn(rc, rc);
778
779 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uFlags) == sizeof(uint32_t));
780 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uFlags);
781 AssertRCReturn(rc, rc);
782 }
783#endif
784
785 /*
786 * Restore PATM stack page
787 */
788 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
789 AssertRCReturn(rc, rc);
790
791 /*
792 * Load all patches
793 */
794 for (uint32_t i=0;i<patmInfo.savedstate.cPatches;i++)
795 {
796 PATMPATCHREC patch, *pPatchRec;
797
798#if 0
799 rc = SSMR3GetMem(pSSM, &patch, sizeof(patch));
800#else
801 RT_ZERO(patch);
802 rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmPatchRecFields[0], NULL);
803#endif
804 AssertRCReturn(rc, rc);
805
806 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
807
808 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
809 if (RT_FAILURE(rc))
810 {
811 AssertMsgFailed(("Out of memory!!!!\n"));
812 return VERR_NO_MEMORY;
813 }
814 /*
815 * Only restore the patch part of the tree record; not the internal data (except the key of course)
816 */
817 pPatchRec->patch = patch.patch;
818 pPatchRec->Core.Key = patch.Core.Key;
819 pPatchRec->CoreOffset.Key = patch.CoreOffset.Key;
820
821 Log(("Restoring patch %RRv -> %RRv\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset));
822 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
823 Assert(ret);
824 if (pPatchRec->patch.uState != PATCH_REFUSED)
825 {
826 if (pPatchRec->patch.pPatchBlockOffset)
827 {
828 /* We actually generated code for this patch. */
829 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
830 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
831 }
832 }
833 /* Set to zero as we don't need it anymore. */
834 pPatchRec->patch.pTempInfo = 0;
835
836 pPatchRec->patch.pPrivInstrHC = 0;
837 /* The GC virtual ptr is fixed, but we must convert it manually again to HC. */
838 int rc2 = rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
839 /* Can fail due to page or page table not present. */
840
841 /*
842 * Restore fixup records and correct HC pointers in fixup records
843 */
844 pPatchRec->patch.FixupTree = 0;
845 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
846 for (int j=0;j<patch.patch.nrFixups;j++)
847 {
848 RELOCREC rec;
849 int32_t offset;
850 RTRCPTR *pFixup;
851
852#if 0
853 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
854#else
855 RT_ZERO(rec);
856 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRelocRec[0], NULL);
857#endif
858 AssertRCReturn(rc, rc);
859
860 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
861 offset = (int32_t)(intptr_t)rec.pRelocPos;
862 /* Convert to HC pointer again. */
863 PATM_ADD_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
864 pFixup = (RTRCPTR *)rec.pRelocPos;
865
866 if (pPatchRec->patch.uState != PATCH_REFUSED)
867 {
868 if ( rec.uType == FIXUP_REL_JMPTOPATCH
869 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
870 {
871 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
872 unsigned offset2 = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
873
874 /** @todo This will fail & crash in patmCorrectFixup if the page isn't present
875 * when we restore. Happens with my XP image here
876 * (pPrivInstrGC=0x8069e051). */
877 AssertLogRelMsg(pPatchRec->patch.pPrivInstrHC, ("%RRv rc=%Rrc uState=%u\n", pPatchRec->patch.pPrivInstrGC, rc2, pPatchRec->patch.uState));
878 rec.pRelocPos = pPatchRec->patch.pPrivInstrHC + offset2;
879 pFixup = (RTRCPTR *)rec.pRelocPos;
880 }
881
882 patmCorrectFixup(pVM, uVersion, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
883 }
884
885 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
886 AssertRCReturn(rc, rc);
887 }
888
889 /* And all patch to guest lookup records */
890 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
891
892 pPatchRec->patch.Patch2GuestAddrTree = 0;
893 pPatchRec->patch.Guest2PatchAddrTree = 0;
894 if (pPatchRec->patch.nrPatch2GuestRecs)
895 {
896 RECPATCHTOGUEST rec;
897 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
898
899 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
900 for (uint32_t j=0;j<nrPatch2GuestRecs;j++)
901 {
902#if 0
903 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
904#else
905 RT_ZERO(rec);
906 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRecPatchToGuest[0], NULL);
907#endif
908
909 AssertRCReturn(rc, rc);
910
911 patmr3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
912 }
913 Assert(pPatchRec->patch.Patch2GuestAddrTree);
914 }
915
916 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
917 {
918 /* Insert the guest page lookup records (for detection self-modifying code) */
919 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
920 AssertRCReturn(rc, rc);
921 }
922
923#if 0 /* can fail def LOG_ENABLED */
924 if ( pPatchRec->patch.uState != PATCH_REFUSED
925 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
926 {
927 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
928 Log(("Patch code ----------------------------------------------------------\n"));
929 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
930 Log(("Patch code ends -----------------------------------------------------\n"));
931 MMR3HeapFree(pPatchRec->patch.pTempInfo);
932 pPatchRec->patch.pTempInfo = NULL;
933 }
934#endif
935
936 }
937
938 /*
939 * Correct absolute fixups in the global patch. (helper functions)
940 * Bit of a mess. Uses the new patch record, but restored patch functions.
941 */
942 PRELOCREC pRec = 0;
943 AVLPVKEY key = 0;
944
945 Log(("Correct fixups in global helper functions\n"));
946 while (true)
947 {
948 int32_t offset;
949 RTRCPTR *pFixup;
950
951 /* Get the record that's closest from above */
952 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
953 if (pRec == 0)
954 break;
955
956 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
957
958 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
959 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
960 pFixup = (RTRCPTR *)pRec->pRelocPos;
961
962 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
963 patmCorrectFixup(pVM, uVersion, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
964 }
965
966#ifdef VBOX_WITH_STATISTICS
967 /*
968 * Restore relevant old statistics
969 */
970 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
971 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
972 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
973 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
974#endif
975
976 return VINF_SUCCESS;
977}
978
979/**
980 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
981 *
982 * @returns VBox status code.
983 * @param pVM VM Handle.
984 * @param ulSSMVersion SSM version
985 * @param patmInfo Saved PATM structure
986 * @param pPatch Patch record
987 * @param pRec Relocation record
988 * @param offset Offset of referenced data/code
989 * @param pFixup Fixup address
990 */
991static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup)
992{
993 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
994
995 switch (pRec->uType)
996 {
997 case FIXUP_ABSOLUTE:
998 {
999 if (pRec->pSource && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource))
1000 break;
1001
1002 if ( *pFixup >= patmInfo.pGCStateGC
1003 && *pFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
1004 {
1005 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
1006 *pFixup = (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
1007 }
1008 else
1009 if ( *pFixup >= patmInfo.pCPUMCtxGC
1010 && *pFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
1011 {
1012 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
1013
1014 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
1015 if (ulSSMVersion == PATM_SSM_VERSION_VER16)
1016 {
1017 unsigned uCPUMOffset = *pFixup - patmInfo.pCPUMCtxGC;
1018
1019 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
1020 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
1021 * function is not available in older gcc versions, at least not in gcc-3.3 */
1022 if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
1023 {
1024 LogFlow(("Changing dr[0] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[0])));
1025 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
1026 }
1027 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
1028 {
1029 LogFlow(("Changing dr[1] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[1])));
1030 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
1031 }
1032 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
1033 {
1034 LogFlow(("Changing dr[2] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[2])));
1035 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
1036 }
1037 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
1038 {
1039 LogFlow(("Changing dr[3] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[3])));
1040 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
1041 }
1042 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
1043 {
1044 LogFlow(("Changing dr[4] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[4])));
1045 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
1046 }
1047 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
1048 {
1049 LogFlow(("Changing dr[5] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[5])));
1050 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
1051 }
1052 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
1053 {
1054 LogFlow(("Changing dr[6] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[6])));
1055 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
1056 }
1057 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
1058 {
1059 LogFlow(("Changing dr[7] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[7])));
1060 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
1061 }
1062 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
1063 {
1064 LogFlow(("Changing cr0 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr0)));
1065 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
1066 }
1067 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
1068 {
1069 LogFlow(("Changing cr2 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr2)));
1070 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
1071 }
1072 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
1073 {
1074 LogFlow(("Changing cr3 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr3)));
1075 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
1076 }
1077 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
1078 {
1079 LogFlow(("Changing cr4 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr4)));
1080 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
1081 }
1082 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
1083 {
1084 LogFlow(("Changing tr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, tr)));
1085 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1086 }
1087 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
1088 {
1089 LogFlow(("Changing ldtr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, ldtr)));
1090 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1091 }
1092 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
1093 {
1094 LogFlow(("Changing pGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
1095 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1096 }
1097 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
1098 {
1099 LogFlow(("Changing cbGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
1100 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1101 }
1102 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
1103 {
1104 LogFlow(("Changing pIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
1105 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1106 }
1107 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
1108 {
1109 LogFlow(("Changing cbIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
1110 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1111 }
1112 else
1113 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", uCPUMOffset));
1114 }
1115 else
1116 *pFixup = (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
1117 }
1118 else
1119 if ( *pFixup >= patmInfo.pStatsGC
1120 && *pFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
1121 {
1122 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
1123 *pFixup = (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
1124 }
1125 else
1126 if ( *pFixup >= patmInfo.pGCStackGC
1127 && *pFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
1128 {
1129 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
1130 *pFixup = (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
1131 }
1132 else
1133 if ( *pFixup >= patmInfo.pPatchMemGC
1134 && *pFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
1135 {
1136 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
1137 *pFixup = (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
1138 }
1139 else
1140 /* Boldly ASSUMES:
1141 * 1. That pCPUMCtxGC is in the VM structure and that its location is
1142 * at the first page of the same 4 MB chunk.
1143 * 2. That the forced actions were in the first 32 bytes of the VM
1144 * structure.
1145 * 3. That the CPUM leafs are less than 8KB into the structure. */
1146 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1147 && *pFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(32))
1148 {
1149 LogFlow(("Changing fLocalForcedActions fixup from %RRv to %RRv\n", *pFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
1150 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1151 }
1152 else
1153 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1154 && *pFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(8192))
1155 {
1156 static int cCpuidFixup = 0;
1157#ifdef LOG_ENABLED
1158 RTRCPTR oldFixup = *pFixup;
1159#endif
1160 /* very dirty assumptions about the cpuid patch and cpuid ordering. */
1161 switch(cCpuidFixup & 3)
1162 {
1163 case 0:
1164 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1165 break;
1166 case 1:
1167 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1168 break;
1169 case 2:
1170 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1171 break;
1172 case 3:
1173 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1174 break;
1175 }
1176 LogFlow(("Changing cpuid fixup %d from %RRv to %RRv\n", cCpuidFixup, oldFixup, *pFixup));
1177 cCpuidFixup++;
1178 }
1179 else
1180 if (ulSSMVersion >= PATM_SSM_VERSION)
1181 {
1182#ifdef LOG_ENABLED
1183 RTRCPTR oldFixup = *pFixup;
1184#endif
1185 /* Core.Key abused to store the type of fixup */
1186 switch ((uintptr_t)pRec->Core.Key)
1187 {
1188 case PATM_FIXUP_CPU_FF_ACTION:
1189 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1190 LogFlow(("Changing cpu ff action fixup from %x to %x\n", oldFixup, *pFixup));
1191 break;
1192 case PATM_FIXUP_CPUID_DEFAULT:
1193 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1194 LogFlow(("Changing cpuid def fixup from %x to %x\n", oldFixup, *pFixup));
1195 break;
1196 case PATM_FIXUP_CPUID_STANDARD:
1197 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1198 LogFlow(("Changing cpuid std fixup from %x to %x\n", oldFixup, *pFixup));
1199 break;
1200 case PATM_FIXUP_CPUID_EXTENDED:
1201 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1202 LogFlow(("Changing cpuid ext fixup from %x to %x\n", oldFixup, *pFixup));
1203 break;
1204 case PATM_FIXUP_CPUID_CENTAUR:
1205 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1206 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", oldFixup, *pFixup));
1207 break;
1208 default:
1209 AssertMsgFailed(("Unexpected fixup value %x\n", *pFixup));
1210 break;
1211 }
1212 }
1213
1214#ifdef RT_OS_WINDOWS
1215 AssertCompile(RT_OFFSETOF(VM, fGlobalForcedActions) < 32);
1216#endif
1217 break;
1218 }
1219
1220 case FIXUP_REL_JMPTOPATCH:
1221 {
1222 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
1223
1224 if ( pPatch->uState == PATCH_ENABLED
1225 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
1226 {
1227 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
1228 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
1229 RTRCPTR pJumpOffGC;
1230 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
1231 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
1232
1233 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1234
1235 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
1236#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
1237 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
1238 {
1239 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
1240
1241 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
1242 oldJump[0] = pPatch->aPrivInstr[0];
1243 oldJump[1] = pPatch->aPrivInstr[1];
1244 *(RTRCUINTPTR *)&oldJump[2] = displOld;
1245 }
1246 else
1247#endif
1248 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
1249 {
1250 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
1251 oldJump[0] = 0xE9;
1252 *(RTRCUINTPTR *)&oldJump[1] = displOld;
1253 }
1254 else
1255 {
1256 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
1257 break;
1258 }
1259 Assert(pPatch->cbPatchJump <= sizeof(temp));
1260
1261 /*
1262 * Read old patch jump and compare it to the one we previously installed
1263 */
1264 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
1265 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1266
1267 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1268 {
1269 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
1270
1271 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
1272 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
1273 }
1274 else
1275 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
1276 {
1277 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
1278 /*
1279 * Disable patch; this is not a good solution
1280 */
1281 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
1282 pPatch->uState = PATCH_DISABLED;
1283 }
1284 else
1285 if (RT_SUCCESS(rc))
1286 {
1287 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
1288 AssertRC(rc);
1289 }
1290 else
1291 {
1292 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
1293 }
1294 }
1295 else
1296 {
1297 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->pPrivInstrHC, pRec->pRelocPos));
1298 }
1299
1300 pRec->pDest = pTarget;
1301 break;
1302 }
1303
1304 case FIXUP_REL_JMPTOGUEST:
1305 {
1306 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1307 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1308
1309 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1310 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1311 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1312 pRec->pSource = pSource;
1313 break;
1314
1315 }
1316}
1317}
1318
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette