VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATMSSM.cpp@ 23792

Last change on this file since 23792 was 23789, checked in by vboxsync, 16 years ago

PATM: build fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 56.3 KB
Line 
1/* $Id: PATMSSM.cpp 23789 2009-10-15 00:46:29Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/hwaccm.h>
30#include <VBox/stam.h>
31#include <VBox/pgm.h>
32#include <VBox/cpum.h>
33#include <VBox/iom.h>
34#include <VBox/sup.h>
35#include <VBox/mm.h>
36#include <VBox/ssm.h>
37#include <VBox/pdm.h>
38#include <VBox/trpm.h>
39#include <VBox/param.h>
40#include <iprt/avl.h>
41#include "PATMInternal.h"
42#include "PATMPatch.h"
43#include "PATMA.h"
44#include <VBox/vm.h>
45#include <VBox/csam.h>
46
47#include <VBox/dbg.h>
48#include <VBox/err.h>
49#include <VBox/log.h>
50#include <iprt/assert.h>
51#include <iprt/asm.h>
52#include <iprt/string.h>
53#include <VBox/dis.h>
54#include <VBox/disopcode.h>
55
56/*******************************************************************************
57* Defined Constants And Macros *
58*******************************************************************************/
59#define PATM_SUBTRACT_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) - (uintptr_t)(b)
60#define PATM_ADD_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) + (uintptr_t)(b)
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup);
66
67/*******************************************************************************
68* Global Variables *
69*******************************************************************************/
70/**
71 * SSM descriptor table for the PATM structure.
72 */
73static SSMFIELD const g_aPatmFields[] =
74{
75 /** @todo there are a bunch more fields here which can be marked as ignored. */
76 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
77 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
78 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
79 SSMFIELD_ENTRY( PATM, cbPatchMem),
80 SSMFIELD_ENTRY( PATM, offPatchMem),
81 SSMFIELD_ENTRY( PATM, fOutOfMemory),
82 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
83 SSMFIELD_ENTRY( PATM, deltaReloc),
84 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
85 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
86 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
87 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
88 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
89 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
90 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
91 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
92 SSMFIELD_ENTRY( PATM, ulCallDepth),
93 SSMFIELD_ENTRY( PATM, cPageRecords),
94 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
95 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
96 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
97 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
98 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
99 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
100 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
101 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
102 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
103 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
104 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
105 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
106 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
107 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
108 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
109 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
110 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
111 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
112 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
113 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
114 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
115 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
116 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
117 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
118 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
119 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
120 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
121 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
122 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
123 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
124 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
125 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
126 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
127 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
128 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
129 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
130 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
131 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
132 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
133 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
134 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
135 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
136 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
137 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
138 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
139 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
140 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
141 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
142 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
143 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
144 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
145 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
146 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
147 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
148 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
149 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
150 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
151 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
152 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
153 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
154 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
155 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
156 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
157 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
158 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
159 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
160 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
161 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
162 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
163 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
164 SSMFIELD_ENTRY_TERM()
165};
166
167/**
168 * SSM descriptor table for the PATMGCSTATE structure.
169 */
170static SSMFIELD const g_aPatmGCStateFields[] =
171{
172 SSMFIELD_ENTRY( PATMGCSTATE, uVMFlags),
173 SSMFIELD_ENTRY( PATMGCSTATE, uPendingAction),
174 SSMFIELD_ENTRY( PATMGCSTATE, uPatchCalls),
175 SSMFIELD_ENTRY( PATMGCSTATE, uScratch),
176 SSMFIELD_ENTRY( PATMGCSTATE, uIretEFlags),
177 SSMFIELD_ENTRY( PATMGCSTATE, uIretCS),
178 SSMFIELD_ENTRY( PATMGCSTATE, uIretEIP),
179 SSMFIELD_ENTRY( PATMGCSTATE, Psp),
180 SSMFIELD_ENTRY( PATMGCSTATE, fPIF),
181 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCPtrInhibitInterrupts),
182 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallPatchTargetAddr),
183 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallReturnAddr),
184 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEAX),
185 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uECX),
186 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEDI),
187 SSMFIELD_ENTRY( PATMGCSTATE, Restore.eFlags),
188 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uFlags),
189 SSMFIELD_ENTRY_TERM()
190};
191
192/**
193 * SSM descriptor table for the PATMPATCHREC structure.
194 */
195static SSMFIELD const g_aPatmPatchRecFields[] =
196{
197 SSMFIELD_ENTRY( PATMPATCHREC, Core.Key),
198 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.pLeft),
199 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.pRight),
200 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, Core.uchHeight),
201 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
202 SSMFIELD_ENTRY( PATMPATCHREC, CoreOffset.Key),
203 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.pLeft),
204 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.pRight),
205 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, CoreOffset.uchHeight),
206 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
207 SSMFIELD_ENTRY( PATMPATCHREC, patch.uState),
208 SSMFIELD_ENTRY( PATMPATCHREC, patch.uOldState),
209 SSMFIELD_ENTRY( PATMPATCHREC, patch.uOpMode),
210 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pPrivInstrGC),
211 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.pPrivInstrHC),
212 SSMFIELD_ENTRY( PATMPATCHREC, patch.aPrivInstr),
213 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPrivInstr),
214 SSMFIELD_ENTRY( PATMPATCHREC, patch.opcode),
215 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPatchJump),
216 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pPatchJumpDestGC),
217 SSMFIELD_ENTRY( PATMPATCHREC, patch.pPatchBlockOffset),
218 SSMFIELD_ENTRY( PATMPATCHREC, patch.cbPatchBlockSize),
219 SSMFIELD_ENTRY( PATMPATCHREC, patch.uCurPatchOffset),
220 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHREC, patch.Alignment0, sizeof(uint32_t)),
221 SSMFIELD_ENTRY( PATMPATCHREC, patch.flags),
222 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pInstrGCLowest),
223 SSMFIELD_ENTRY_RCPTR( PATMPATCHREC, patch.pInstrGCHighest),
224 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.FixupTree),
225 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrFixups),
226 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrJumpRecs), // should be zero?
227 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.JumpTree),
228 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.Patch2GuestAddrTree),
229 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.Guest2PatchAddrTree),
230 SSMFIELD_ENTRY( PATMPATCHREC, patch.nrPatch2GuestRecs),
231 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHREC, patch.Alignment1, sizeof(uint32_t)),
232 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.cacheRec.pPatchLocStartHC), // saved as zero
233 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.cacheRec.pPatchLocEndHC), // ditto
234 SSMFIELD_ENTRY_IGN_RCPTR( PATMPATCHREC, patch.cacheRec.pGuestLoc), // ditto
235 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, patch.cacheRec.opsize), // ditto
236 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHREC, patch.pTempInfo),
237 SSMFIELD_ENTRY( PATMPATCHREC, patch.cCodeWrites),
238 SSMFIELD_ENTRY( PATMPATCHREC, patch.cTraps),
239 SSMFIELD_ENTRY( PATMPATCHREC, patch.cInvalidWrites),
240 SSMFIELD_ENTRY( PATMPATCHREC, patch.uPatchIdx),
241 SSMFIELD_ENTRY( PATMPATCHREC, patch.bDirtyOpcode),
242 SSMFIELD_ENTRY_IGNORE( PATMPATCHREC, patch.Alignment2),
243 SSMFIELD_ENTRY_TERM()
244};
245
246/**
247 * SSM descriptor table for the RELOCREC structure.
248 */
249static SSMFIELD const g_aPatmRelocRec[] =
250{
251 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, Core.Key), // Used to store the relocation type
252 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pLeft),
253 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pRight),
254 SSMFIELD_ENTRY_IGNORE( RELOCREC, Core.uchHeight),
255 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
256 SSMFIELD_ENTRY( RELOCREC, uType),
257 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
258 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, pRelocPos), // converted to a patch member offset.
259 SSMFIELD_ENTRY_RCPTR( RELOCREC, pSource),
260 SSMFIELD_ENTRY_RCPTR( RELOCREC, pDest),
261 SSMFIELD_ENTRY_TERM()
262};
263
264/**
265 * SSM descriptor table for the RECPATCHTOGUEST structure.
266 */
267static SSMFIELD const g_aPatmRecPatchToGuest[] =
268{
269 SSMFIELD_ENTRY( RECPATCHTOGUEST, Core.Key),
270 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pLeft),
271 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pRight),
272 SSMFIELD_ENTRY_IGNORE( RECPATCHTOGUEST, Core.uchHeight),
273 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
274 SSMFIELD_ENTRY_RCPTR( RECPATCHTOGUEST, pOrgInstrGC),
275 SSMFIELD_ENTRY( RECPATCHTOGUEST, enmType),
276 SSMFIELD_ENTRY( RECPATCHTOGUEST, fDirty),
277 SSMFIELD_ENTRY( RECPATCHTOGUEST, fJumpTarget),
278 SSMFIELD_ENTRY( RECPATCHTOGUEST, u8DirtyOpcode),
279 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 1),
280 SSMFIELD_ENTRY_TERM()
281};
282
283
284#ifdef VBOX_STRICT
285/**
286 * Callback function for RTAvlPVDoWithAll
287 *
288 * Counts the number of patches in the tree
289 *
290 * @returns VBox status code.
291 * @param pNode Current node
292 * @param pcPatches Pointer to patch counter (uint32_t)
293 */
294static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
295{
296 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
297 return VINF_SUCCESS;
298}
299
300/**
301 * Callback function for RTAvlU32DoWithAll
302 *
303 * Counts the number of patches in the tree
304 *
305 * @returns VBox status code.
306 * @param pNode Current node
307 * @param pcPatches Pointer to patch counter (uint32_t)
308 */
309static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
310{
311 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
312 return VINF_SUCCESS;
313}
314#endif /* VBOX_STRICT */
315
316/**
317 * Callback function for RTAvloU32DoWithAll
318 *
319 * Counts the number of patches in the tree
320 *
321 * @returns VBox status code.
322 * @param pNode Current node
323 * @param pcPatches Pointer to patch counter
324 */
325static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
326{
327 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
328 return VINF_SUCCESS;
329}
330
331/**
332 * Callback function for RTAvlU32DoWithAll
333 *
334 * Saves all patch to guest lookup records.
335 *
336 * @returns VBox status code.
337 * @param pNode Current node
338 * @param pVM1 VM Handle
339 */
340static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
341{
342 PVM pVM = (PVM)pVM1;
343 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
344 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
345
346 /* Save the lookup record. */
347 int rc = SSMR3PutMem(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST));
348 AssertRCReturn(rc, rc);
349
350 return VINF_SUCCESS;
351}
352
353/**
354 * Callback function for RTAvlPVDoWithAll
355 *
356 * Saves all patch to guest lookup records.
357 *
358 * @returns VBox status code.
359 * @param pNode Current node
360 * @param pVM1 VM Handle
361 */
362static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
363{
364 PVM pVM = (PVM)pVM1;
365 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
366 RELOCREC rec = *(PRELOCREC)pNode;
367 RTRCPTR *pFixup = (RTRCPTR *)rec.pRelocPos;
368
369 Assert(rec.pRelocPos);
370 /* Convert pointer to an offset into patch memory. */
371 PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
372
373 if (rec.uType == FIXUP_ABSOLUTE)
374 {
375 /* Core.Key abused to store the fixup type. */
376 if (*pFixup == pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))
377 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPU_FF_ACTION;
378 else
379 if (*pFixup == CPUMR3GetGuestCpuIdDefRCPtr(pVM))
380 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_DEFAULT;
381 else
382 if (*pFixup == CPUMR3GetGuestCpuIdStdRCPtr(pVM))
383 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_STANDARD;
384 else
385 if (*pFixup == CPUMR3GetGuestCpuIdExtRCPtr(pVM))
386 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_EXTENDED;
387 else
388 if (*pFixup == CPUMR3GetGuestCpuIdCentaurRCPtr(pVM))
389 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_CENTAUR;
390 }
391
392 /* Save the lookup record. */
393 int rc = SSMR3PutMem(pSSM, &rec, sizeof(rec));
394 AssertRCReturn(rc, rc);
395
396 return VINF_SUCCESS;
397}
398
399
400/**
401 * Callback function for RTAvloU32DoWithAll
402 *
403 * Saves the state of the patch that's being enumerated
404 *
405 * @returns VBox status code.
406 * @param pNode Current node
407 * @param pVM1 VM Handle
408 */
409static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
410{
411 PVM pVM = (PVM)pVM1;
412 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
413 PATMPATCHREC patch = *pPatch;
414 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
415 int rc;
416
417 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
418
419 /*
420 * Reset HC pointers that need to be recalculated when loading the state
421 */
422 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
423 ("State = %x pPrivInstrHC=%08x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, patch.patch.pPrivInstrHC, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
424 Assert(pPatch->patch.JumpTree == 0);
425 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
426 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
427
428 memset(&patch.patch.cacheRec, 0, sizeof(patch.patch.cacheRec));
429
430 /* Save the patch record itself */
431 rc = SSMR3PutMem(pSSM, &patch, sizeof(patch));
432 AssertRCReturn(rc, rc);
433
434 /*
435 * Reset HC pointers in fixup records and save them.
436 */
437#ifdef VBOX_STRICT
438 uint32_t nrFixupRecs = 0;
439 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
440 AssertMsg((int32_t)nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
441#endif
442 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
443
444#ifdef VBOX_STRICT
445 uint32_t nrLookupRecords = 0;
446 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
447 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
448#endif
449
450 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
451 return VINF_SUCCESS;
452}
453
454/**
455 * Execute state save operation.
456 *
457 * @returns VBox status code.
458 * @param pVM VM Handle.
459 * @param pSSM SSM operation handle.
460 */
461DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
462{
463 PATM patmInfo = pVM->patm.s;
464 int rc;
465
466 pVM->patm.s.savedstate.pSSM = pSSM;
467
468 /*
469 * Reset HC pointers that need to be recalculated when loading the state
470 */
471 patmInfo.pPatchMemHC = NULL;
472 patmInfo.pGCStateHC = 0;
473 patmInfo.pvFaultMonitor = 0;
474
475 Assert(patmInfo.ulCallDepth == 0);
476
477 /*
478 * Count the number of patches in the tree (feeling lazy)
479 */
480 patmInfo.savedstate.cPatches = 0;
481 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
482
483 /*
484 * Save PATM structure
485 */
486 rc = SSMR3PutMem(pSSM, &patmInfo, sizeof(patmInfo));
487 AssertRCReturn(rc, rc);
488
489 /*
490 * Save patch memory contents
491 */
492 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
493 AssertRCReturn(rc, rc);
494
495 /*
496 * Save GC state memory
497 */
498 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
499 AssertRCReturn(rc, rc);
500
501 /*
502 * Save PATM stack page
503 */
504 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
505 AssertRCReturn(rc, rc);
506
507 /*
508 * Save all patches
509 */
510 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
511 AssertRCReturn(rc, rc);
512
513 /** @note patch statistics are not saved. */
514
515 return VINF_SUCCESS;
516}
517
518/**
519 * Execute state load operation.
520 *
521 * @returns VBox status code.
522 * @param pVM VM Handle.
523 * @param pSSM SSM operation handle.
524 * @param uVersion Data layout version.
525 * @param uPass The data pass.
526 */
527DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
528{
529 PATM patmInfo;
530 int rc;
531
532 if ( uVersion != PATM_SSM_VERSION
533 && uVersion != PATM_SSM_VERSION_FIXUP_HACK
534 && uVersion != PATM_SSM_VERSION_VER16
535#ifdef PATM_WITH_NEW_SSM
536 && uVersion != PATM_SSM_VERSION_GETPUTMEM)
537#else
538 )
539#endif
540 {
541 AssertMsgFailed(("patmR3Load: Invalid version uVersion=%d!\n", uVersion));
542 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
543 }
544 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
545
546 pVM->patm.s.savedstate.pSSM = pSSM;
547
548 /*
549 * Restore PATM structure
550 */
551#ifdef PATM_WITH_NEW_SSM
552 if (uVersion == PATM_SSM_VERSION_GETPUTMEM)
553 {
554#endif
555#if 0
556 rc = SSMR3GetMem(pSSM, &patmInfo, sizeof(patmInfo));
557#else
558 RT_ZERO(patmInfo);
559 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmFields[0], NULL);
560#endif
561 AssertRCReturn(rc, rc);
562
563#ifdef PATM_WITH_NEW_SSM
564 }
565 else
566 {
567 memset(&patmInfo, 0, sizeof(patmInfo));
568
569 AssertCompile(sizeof(patmInfo.pGCStateGC) == sizeof(RTRCPTR));
570 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStateGC);
571 AssertRCReturn(rc, rc);
572
573 AssertCompile(sizeof(patmInfo.pCPUMCtxGC) == sizeof(RTRCPTR));
574 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pCPUMCtxGC);
575 AssertRCReturn(rc, rc);
576
577 AssertCompile(sizeof(patmInfo.pStatsGC) == sizeof(RTRCPTR));
578 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pStatsGC);
579 AssertRCReturn(rc, rc);
580
581 AssertCompile(sizeof(patmInfo.pfnHelperCallGC) == sizeof(RTRCPTR));
582 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperCallGC);
583 AssertRCReturn(rc, rc);
584
585 AssertCompile(sizeof(patmInfo.pfnHelperRetGC) == sizeof(RTRCPTR));
586 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperRetGC);
587 AssertRCReturn(rc, rc);
588
589 AssertCompile(sizeof(patmInfo.pfnHelperJumpGC) == sizeof(RTRCPTR));
590 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperJumpGC);
591 AssertRCReturn(rc, rc);
592
593 AssertCompile(sizeof(patmInfo.pfnHelperIretGC) == sizeof(RTRCPTR));
594 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperIretGC);
595 AssertRCReturn(rc, rc);
596
597 AssertCompile(sizeof(patmInfo.pPatchMemGC) == sizeof(RTRCPTR));
598 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchMemGC);
599 AssertRCReturn(rc, rc);
600
601 AssertCompile(sizeof(patmInfo.cbPatchMem) == sizeof(uint32_t));
602 rc = SSMR3GetU32(pSSM, &patmInfo.cbPatchMem);
603 AssertRCReturn(rc, rc);
604
605 AssertCompile(sizeof(patmInfo.offPatchMem) == sizeof(uint32_t));
606 rc = SSMR3GetU32(pSSM, &patmInfo.offPatchMem);
607 AssertRCReturn(rc, rc);
608
609 AssertCompile(sizeof(patmInfo.deltaReloc) == sizeof(int32_t));
610 rc = SSMR3GetS32(pSSM, &patmInfo.deltaReloc);
611 AssertRCReturn(rc, rc);
612
613 AssertCompile(sizeof(patmInfo.uCurrentPatchIdx) == sizeof(uint32_t));
614 rc = SSMR3GetS32(pSSM, &patmInfo.uCurrentPatchIdx);
615 AssertRCReturn(rc, rc);
616
617 AssertCompile(sizeof(patmInfo.pPatchedInstrGCLowest) == sizeof(RTRCPTR));
618 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCLowest);
619 AssertRCReturn(rc, rc);
620
621 AssertCompile(sizeof(patmInfo.pPatchedInstrGCHighest) == sizeof(RTRCPTR));
622 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCHighest);
623 AssertRCReturn(rc, rc);
624
625 AssertCompile(sizeof(patmInfo.pfnSysEnterGC) == sizeof(RTRCPTR));
626 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterGC);
627 AssertRCReturn(rc, rc);
628
629 AssertCompile(sizeof(patmInfo.pfnSysEnterPatchGC) == sizeof(RTRCPTR));
630 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterPatchGC);
631 AssertRCReturn(rc, rc);
632
633 AssertCompile(sizeof(patmInfo.uSysEnterPatchIdx) == sizeof(uint32_t));
634 rc = SSMR3GetU32(pSSM, &patmInfo.uSysEnterPatchIdx);
635 AssertRCReturn(rc, rc);
636
637 AssertCompile(sizeof(patmInfo.ulCallDepth) == sizeof(uint32_t));
638 rc = SSMR3GetU32(pSSM, &patmInfo.ulCallDepth);
639 AssertRCReturn(rc, rc);
640
641 AssertCompile(sizeof(patmInfo.pGCStackGC) == sizeof(RTRCPTR));
642 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStackGC);
643 AssertRCReturn(rc, rc);
644
645 AssertCompile(sizeof(patmInfo.cPageRecords) == sizeof(uint32_t));
646 rc = SSMR3GetU32(pSSM, &patmInfo.cPageRecords);
647 AssertRCReturn(rc, rc);
648
649 AssertCompile(sizeof(patmInfo.fOutOfMemory) == sizeof(bool));
650 rc = SSMR3GetBool(pSSM, &patmInfo.fOutOfMemory);
651 AssertRCReturn(rc, rc);
652
653 AssertCompile(sizeof(patmInfo.savedstate.cPatches) == sizeof(uint32_t));
654 rc = SSMR3GetU32(pSSM, &patmInfo.savedstate.cPatches);
655 AssertRCReturn(rc, rc);
656
657 }
658#endif
659
660 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
661 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
662 if ( (pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC)
663 || (pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC)
664 || (pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC)
665 || (pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC))
666 {
667 AssertMsgFailed(("Helper function ptrs don't match!!!\n"));
668 return VERR_SSM_INVALID_STATE;
669 }
670
671 if (pVM->patm.s.cbPatchMem != patmInfo.cbPatchMem)
672 {
673 AssertMsgFailed(("Patch memory ptrs and/or sizes don't match!!!\n"));
674 return VERR_SSM_INVALID_STATE;
675 }
676 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
677 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
678 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
679 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
680
681 /* Lowest and highest patched instruction */
682 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
683 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
684
685 /* Sysenter handlers */
686 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
687 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
688 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
689
690 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
691
692 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
693 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
694 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
695 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
696
697
698 /** @note patch statistics are not restored. */
699
700 /*
701 * Restore patch memory contents
702 */
703 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
704 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
705 AssertRCReturn(rc, rc);
706
707 /*
708 * Restore GC state memory
709 */
710#ifdef PATM_WITH_NEW_SSM
711 if (uVersion == PATM_SSM_VERSION_GETPUTMEM)
712 {
713#endif
714#if 0
715 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
716#else
717 RT_BZERO(pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
718 rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmGCStateFields[0], NULL);
719#endif
720 AssertRCReturn(rc, rc);
721#ifdef PATM_WITH_NEW_SSM
722 }
723 else
724 {
725 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uVMFlags) == sizeof(uint32_t));
726 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uVMFlags);
727 AssertRCReturn(rc, rc);
728
729 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPendingAction) == sizeof(uint32_t));
730 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPendingAction);
731 AssertRCReturn(rc, rc);
732
733 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPatchCalls) == sizeof(uint32_t));
734 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPatchCalls);
735 AssertRCReturn(rc, rc);
736
737 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uScratch) == sizeof(uint32_t));
738 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uScratch);
739 AssertRCReturn(rc, rc);
740
741 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEFlags) == sizeof(uint32_t));
742 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEFlags);
743 AssertRCReturn(rc, rc);
744
745 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretCS) == sizeof(uint32_t));
746 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretCS);
747 AssertRCReturn(rc, rc);
748
749 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEIP) == sizeof(uint32_t));
750 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEIP);
751 AssertRCReturn(rc, rc);
752
753 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Psp) == sizeof(uint32_t));
754 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Psp);
755 AssertRCReturn(rc, rc);
756
757 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->fPIF) == sizeof(uint32_t));
758 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->fPIF);
759 AssertRCReturn(rc, rc);
760
761 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts) == sizeof(RTRCPTR));
762 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts);
763 AssertRCReturn(rc, rc);
764
765 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr) == sizeof(RTRCPTR));
766 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr);
767 AssertRCReturn(rc, rc);
768
769 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallReturnAddr) == sizeof(RTRCPTR));
770 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallReturnAddr);
771 AssertRCReturn(rc, rc);
772
773 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEAX) == sizeof(uint32_t));
774 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEAX);
775 AssertRCReturn(rc, rc);
776
777 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uECX) == sizeof(uint32_t));
778 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uECX);
779 AssertRCReturn(rc, rc);
780
781 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEDI) == sizeof(uint32_t));
782 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEDI);
783 AssertRCReturn(rc, rc);
784
785 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.eFlags) == sizeof(uint32_t));
786 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.eFlags);
787 AssertRCReturn(rc, rc);
788
789 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uFlags) == sizeof(uint32_t));
790 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uFlags);
791 AssertRCReturn(rc, rc);
792 }
793#endif
794
795 /*
796 * Restore PATM stack page
797 */
798 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
799 AssertRCReturn(rc, rc);
800
801 /*
802 * Load all patches
803 */
804 for (uint32_t i=0;i<patmInfo.savedstate.cPatches;i++)
805 {
806 PATMPATCHREC patch, *pPatchRec;
807
808#if 0
809 rc = SSMR3GetMem(pSSM, &patch, sizeof(patch));
810#else
811 RT_ZERO(patch);
812 rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmPatchRecFields[0], NULL);
813#endif
814 AssertRCReturn(rc, rc);
815
816 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
817
818 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
819 if (RT_FAILURE(rc))
820 {
821 AssertMsgFailed(("Out of memory!!!!\n"));
822 return VERR_NO_MEMORY;
823 }
824 /*
825 * Only restore the patch part of the tree record; not the internal data (except the key of course)
826 */
827 pPatchRec->patch = patch.patch;
828 pPatchRec->Core.Key = patch.Core.Key;
829 pPatchRec->CoreOffset.Key = patch.CoreOffset.Key;
830
831 Log(("Restoring patch %RRv -> %RRv\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset));
832 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
833 Assert(ret);
834 if (pPatchRec->patch.uState != PATCH_REFUSED)
835 {
836 if (pPatchRec->patch.pPatchBlockOffset)
837 {
838 /* We actually generated code for this patch. */
839 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
840 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
841 }
842 }
843 /* Set to zero as we don't need it anymore. */
844 pPatchRec->patch.pTempInfo = 0;
845
846 pPatchRec->patch.pPrivInstrHC = 0;
847 /* The GC virtual ptr is fixed, but we must convert it manually again to HC. */
848 int rc2 = rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
849 /* Can fail due to page or page table not present. */
850
851 /*
852 * Restore fixup records and correct HC pointers in fixup records
853 */
854 pPatchRec->patch.FixupTree = 0;
855 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
856 for (int i=0;i<patch.patch.nrFixups;i++)
857 {
858 RELOCREC rec;
859 int32_t offset;
860 RTRCPTR *pFixup;
861
862#if 0
863 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
864#else
865 RT_ZERO(rec);
866 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRelocRec[0], NULL);
867#endif
868 AssertRCReturn(rc, rc);
869
870 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
871 offset = (int32_t)(int64_t)rec.pRelocPos;
872 /* Convert to HC pointer again. */
873 PATM_ADD_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
874 pFixup = (RTRCPTR *)rec.pRelocPos;
875
876 if (pPatchRec->patch.uState != PATCH_REFUSED)
877 {
878 if ( rec.uType == FIXUP_REL_JMPTOPATCH
879 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
880 {
881 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
882 unsigned offset = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
883
884 /** @todo This will fail & crash in patmCorrectFixup if the page isn't present
885 * when we restore. Happens with my XP image here
886 * (pPrivInstrGC=0x8069e051). */
887 AssertLogRelMsg(pPatchRec->patch.pPrivInstrHC, ("%RRv rc=%Rrc uState=%u\n", pPatchRec->patch.pPrivInstrGC, rc2, pPatchRec->patch.uState));
888 rec.pRelocPos = pPatchRec->patch.pPrivInstrHC + offset;
889 pFixup = (RTRCPTR *)rec.pRelocPos;
890 }
891
892 patmCorrectFixup(pVM, uVersion, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
893 }
894
895 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
896 AssertRCReturn(rc, rc);
897 }
898
899 /* And all patch to guest lookup records */
900 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
901
902 pPatchRec->patch.Patch2GuestAddrTree = 0;
903 pPatchRec->patch.Guest2PatchAddrTree = 0;
904 if (pPatchRec->patch.nrPatch2GuestRecs)
905 {
906 RECPATCHTOGUEST rec;
907 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
908
909 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
910 for (uint32_t i=0;i<nrPatch2GuestRecs;i++)
911 {
912#if 0
913 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
914#else
915 RT_ZERO(rec);
916 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRecPatchToGuest[0], NULL);
917#endif
918
919 AssertRCReturn(rc, rc);
920
921 patmr3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
922 }
923 Assert(pPatchRec->patch.Patch2GuestAddrTree);
924 }
925
926 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
927 {
928 /* Insert the guest page lookup records (for detection self-modifying code) */
929 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
930 AssertRCReturn(rc, rc);
931 }
932
933#if 0 /* can fail def LOG_ENABLED */
934 if ( pPatchRec->patch.uState != PATCH_REFUSED
935 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
936 {
937 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
938 Log(("Patch code ----------------------------------------------------------\n"));
939 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
940 Log(("Patch code ends -----------------------------------------------------\n"));
941 MMR3HeapFree(pPatchRec->patch.pTempInfo);
942 pPatchRec->patch.pTempInfo = NULL;
943 }
944#endif
945
946 }
947
948 /*
949 * Correct absolute fixups in the global patch. (helper functions)
950 * Bit of a mess. Uses the new patch record, but restored patch functions.
951 */
952 PRELOCREC pRec = 0;
953 AVLPVKEY key = 0;
954
955 Log(("Correct fixups in global helper functions\n"));
956 while (true)
957 {
958 int32_t offset;
959 RTRCPTR *pFixup;
960
961 /* Get the record that's closest from above */
962 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
963 if (pRec == 0)
964 break;
965
966 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
967
968 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
969 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
970 pFixup = (RTRCPTR *)pRec->pRelocPos;
971
972 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
973 patmCorrectFixup(pVM, uVersion, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
974 }
975
976#ifdef VBOX_WITH_STATISTICS
977 /*
978 * Restore relevant old statistics
979 */
980 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
981 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
982 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
983 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
984#endif
985
986 return VINF_SUCCESS;
987}
988
989/**
990 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
991 *
992 * @returns VBox status code.
993 * @param pVM VM Handle.
994 * @param ulSSMVersion SSM version
995 * @param patmInfo Saved PATM structure
996 * @param pPatch Patch record
997 * @param pRec Relocation record
998 * @param offset Offset of referenced data/code
999 * @param pFixup Fixup address
1000 */
1001static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup)
1002{
1003 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
1004
1005 switch (pRec->uType)
1006 {
1007 case FIXUP_ABSOLUTE:
1008 {
1009 if (pRec->pSource && !PATMIsPatchGCAddr(pVM, pRec->pSource))
1010 break;
1011
1012 if ( *pFixup >= patmInfo.pGCStateGC
1013 && *pFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
1014 {
1015 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
1016 *pFixup = (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
1017 }
1018 else
1019 if ( *pFixup >= patmInfo.pCPUMCtxGC
1020 && *pFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
1021 {
1022 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
1023
1024 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
1025 if (ulSSMVersion == PATM_SSM_VERSION_VER16)
1026 {
1027 unsigned uCPUMOffset = *pFixup - patmInfo.pCPUMCtxGC;
1028
1029 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
1030 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
1031 * function is not available in older gcc versions, at least not in gcc-3.3 */
1032 if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
1033 {
1034 LogFlow(("Changing dr[0] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[0])));
1035 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
1036 }
1037 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
1038 {
1039 LogFlow(("Changing dr[1] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[1])));
1040 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
1041 }
1042 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
1043 {
1044 LogFlow(("Changing dr[2] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[2])));
1045 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
1046 }
1047 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
1048 {
1049 LogFlow(("Changing dr[3] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[3])));
1050 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
1051 }
1052 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
1053 {
1054 LogFlow(("Changing dr[4] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[4])));
1055 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
1056 }
1057 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
1058 {
1059 LogFlow(("Changing dr[5] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[5])));
1060 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
1061 }
1062 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
1063 {
1064 LogFlow(("Changing dr[6] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[6])));
1065 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
1066 }
1067 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
1068 {
1069 LogFlow(("Changing dr[7] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[7])));
1070 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
1071 }
1072 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
1073 {
1074 LogFlow(("Changing cr0 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr0)));
1075 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
1076 }
1077 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
1078 {
1079 LogFlow(("Changing cr2 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr2)));
1080 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
1081 }
1082 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
1083 {
1084 LogFlow(("Changing cr3 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr3)));
1085 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
1086 }
1087 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
1088 {
1089 LogFlow(("Changing cr4 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr4)));
1090 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
1091 }
1092 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
1093 {
1094 LogFlow(("Changing tr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, tr)));
1095 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1096 }
1097 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
1098 {
1099 LogFlow(("Changing ldtr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, ldtr)));
1100 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1101 }
1102 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
1103 {
1104 LogFlow(("Changing pGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
1105 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1106 }
1107 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
1108 {
1109 LogFlow(("Changing cbGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
1110 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1111 }
1112 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
1113 {
1114 LogFlow(("Changing pIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
1115 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1116 }
1117 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
1118 {
1119 LogFlow(("Changing cbIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
1120 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1121 }
1122 else
1123 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", uCPUMOffset));
1124 }
1125 else
1126 *pFixup = (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
1127 }
1128 else
1129 if ( *pFixup >= patmInfo.pStatsGC
1130 && *pFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
1131 {
1132 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
1133 *pFixup = (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
1134 }
1135 else
1136 if ( *pFixup >= patmInfo.pGCStackGC
1137 && *pFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
1138 {
1139 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
1140 *pFixup = (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
1141 }
1142 else
1143 if ( *pFixup >= patmInfo.pPatchMemGC
1144 && *pFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
1145 {
1146 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
1147 *pFixup = (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
1148 }
1149 else
1150 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1151 && *pFixup >= pVM->pVMRC
1152 && *pFixup < pVM->pVMRC + 32)
1153 {
1154 LogFlow(("Changing fLocalForcedActions fixup from %x to %x\n", *pFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
1155 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1156 }
1157 else
1158 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1159 && *pFixup >= pVM->pVMRC
1160 && *pFixup < pVM->pVMRC + 8192)
1161 {
1162 static int cCpuidFixup = 0;
1163#ifdef LOG_ENABLED
1164 RTRCPTR oldFixup = *pFixup;
1165#endif
1166 /* very dirty assumptions about the cpuid patch and cpuid ordering. */
1167 switch(cCpuidFixup & 3)
1168 {
1169 case 0:
1170 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1171 break;
1172 case 1:
1173 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1174 break;
1175 case 2:
1176 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1177 break;
1178 case 3:
1179 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1180 break;
1181 }
1182 LogFlow(("Changing cpuid fixup %d from %x to %x\n", cCpuidFixup, oldFixup, *pFixup));
1183 cCpuidFixup++;
1184 }
1185 else
1186 if (ulSSMVersion >= PATM_SSM_VERSION)
1187 {
1188#ifdef LOG_ENABLED
1189 RTRCPTR oldFixup = *pFixup;
1190#endif
1191 /* Core.Key abused to store the type of fixup */
1192 switch ((uintptr_t)pRec->Core.Key)
1193 {
1194 case PATM_FIXUP_CPU_FF_ACTION:
1195 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1196 LogFlow(("Changing cpu ff action fixup from %x to %x\n", oldFixup, *pFixup));
1197 break;
1198 case PATM_FIXUP_CPUID_DEFAULT:
1199 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1200 LogFlow(("Changing cpuid def fixup from %x to %x\n", oldFixup, *pFixup));
1201 break;
1202 case PATM_FIXUP_CPUID_STANDARD:
1203 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1204 LogFlow(("Changing cpuid std fixup from %x to %x\n", oldFixup, *pFixup));
1205 break;
1206 case PATM_FIXUP_CPUID_EXTENDED:
1207 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1208 LogFlow(("Changing cpuid ext fixup from %x to %x\n", oldFixup, *pFixup));
1209 break;
1210 case PATM_FIXUP_CPUID_CENTAUR:
1211 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1212 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", oldFixup, *pFixup));
1213 break;
1214 default:
1215 AssertMsgFailed(("Unexpected fixup value %x\n", *pFixup));
1216 break;
1217 }
1218 }
1219
1220#ifdef RT_OS_WINDOWS
1221 AssertCompile(RT_OFFSETOF(VM, fGlobalForcedActions) < 32);
1222#endif
1223 break;
1224 }
1225
1226 case FIXUP_REL_JMPTOPATCH:
1227 {
1228 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
1229
1230 if ( pPatch->uState == PATCH_ENABLED
1231 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
1232 {
1233 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
1234 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
1235 RTRCPTR pJumpOffGC;
1236 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
1237 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
1238
1239 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1240
1241 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
1242#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
1243 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
1244 {
1245 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
1246
1247 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
1248 oldJump[0] = pPatch->aPrivInstr[0];
1249 oldJump[1] = pPatch->aPrivInstr[1];
1250 *(RTRCUINTPTR *)&oldJump[2] = displOld;
1251 }
1252 else
1253#endif
1254 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
1255 {
1256 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
1257 oldJump[0] = 0xE9;
1258 *(RTRCUINTPTR *)&oldJump[1] = displOld;
1259 }
1260 else
1261 {
1262 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
1263 break;
1264 }
1265 Assert(pPatch->cbPatchJump <= sizeof(temp));
1266
1267 /*
1268 * Read old patch jump and compare it to the one we previously installed
1269 */
1270 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
1271 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1272
1273 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1274 {
1275 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
1276
1277 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
1278 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
1279 }
1280 else
1281 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
1282 {
1283 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
1284 /*
1285 * Disable patch; this is not a good solution
1286 */
1287 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
1288 pPatch->uState = PATCH_DISABLED;
1289 }
1290 else
1291 if (RT_SUCCESS(rc))
1292 {
1293 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
1294 AssertRC(rc);
1295 }
1296 else
1297 {
1298 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
1299 }
1300 }
1301 else
1302 {
1303 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->pPrivInstrHC, pRec->pRelocPos));
1304 }
1305
1306 pRec->pDest = pTarget;
1307 break;
1308 }
1309
1310 case FIXUP_REL_JMPTOGUEST:
1311 {
1312 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1313 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1314
1315 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1316 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1317 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1318 pRec->pSource = pSource;
1319 break;
1320
1321 }
1322}
1323}
1324
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette