VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATMSSM.cpp@ 11972

Last change on this file since 11972 was 11972, checked in by vboxsync, 16 years ago

More logging

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 38.0 KB
Line 
1/* $Id: PATMSSM.cpp 11972 2008-09-02 11:33:35Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/stam.h>
30#include <VBox/pgm.h>
31#include <VBox/cpum.h>
32#include <VBox/iom.h>
33#include <VBox/sup.h>
34#include <VBox/mm.h>
35#include <VBox/ssm.h>
36#include <VBox/pdm.h>
37#include <VBox/trpm.h>
38#include <VBox/param.h>
39#include <iprt/avl.h>
40#include "PATMInternal.h"
41#include "PATMPatch.h"
42#include "PATMA.h"
43#include <VBox/vm.h>
44#include <VBox/csam.h>
45
46#include <VBox/dbg.h>
47#include <VBox/err.h>
48#include <VBox/log.h>
49#include <iprt/assert.h>
50#include <iprt/asm.h>
51#include <iprt/string.h>
52#include <VBox/dis.h>
53#include <VBox/disopcode.h>
54
55#define PATM_SUBTRACT_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) - (uintptr_t)(b)
56#define PATM_ADD_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) + (uintptr_t)(b)
57
58static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup);
59
60#ifdef VBOX_STRICT
61/**
62 * Callback function for RTAvlPVDoWithAll
63 *
64 * Counts the number of patches in the tree
65 *
66 * @returns VBox status code.
67 * @param pNode Current node
68 * @param pcPatches Pointer to patch counter (uint32_t)
69 */
70static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
71{
72 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
73 return VINF_SUCCESS;
74}
75
76/**
77 * Callback function for RTAvlU32DoWithAll
78 *
79 * Counts the number of patches in the tree
80 *
81 * @returns VBox status code.
82 * @param pNode Current node
83 * @param pcPatches Pointer to patch counter (uint32_t)
84 */
85static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
86{
87 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
88 return VINF_SUCCESS;
89}
90#endif /* VBOX_STRICT */
91
92/**
93 * Callback function for RTAvloU32DoWithAll
94 *
95 * Counts the number of patches in the tree
96 *
97 * @returns VBox status code.
98 * @param pNode Current node
99 * @param pcPatches Pointer to patch counter
100 */
101static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
102{
103 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
104 return VINF_SUCCESS;
105}
106
107/**
108 * Callback function for RTAvlU32DoWithAll
109 *
110 * Saves all patch to guest lookup records.
111 *
112 * @returns VBox status code.
113 * @param pNode Current node
114 * @param pVM1 VM Handle
115 */
116static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
117{
118 PVM pVM = (PVM)pVM1;
119 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
120 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
121
122 /* Save the lookup record. */
123 int rc = SSMR3PutMem(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST));
124 AssertRCReturn(rc, rc);
125
126 return VINF_SUCCESS;
127}
128
129/**
130 * Callback function for RTAvlPVDoWithAll
131 *
132 * Saves all patch to guest lookup records.
133 *
134 * @returns VBox status code.
135 * @param pNode Current node
136 * @param pVM1 VM Handle
137 */
138static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
139{
140 PVM pVM = (PVM)pVM1;
141 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
142 RELOCREC rec = *(PRELOCREC)pNode;
143
144 Assert(rec.pRelocPos);
145 PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
146
147 /* Save the lookup record. */
148 int rc = SSMR3PutMem(pSSM, &rec, sizeof(rec));
149 AssertRCReturn(rc, rc);
150
151 return VINF_SUCCESS;
152}
153
154
155/**
156 * Callback function for RTAvloU32DoWithAll
157 *
158 * Saves the state of the patch that's being enumerated
159 *
160 * @returns VBox status code.
161 * @param pNode Current node
162 * @param pVM1 VM Handle
163 */
164static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
165{
166 PVM pVM = (PVM)pVM1;
167 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
168 PATMPATCHREC patch = *pPatch;
169 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
170 int rc;
171
172 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
173
174 /*
175 * Reset HC pointers that need to be recalculated when loading the state
176 */
177 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
178 ("State = %x pPrivInstrHC=%08x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, patch.patch.pPrivInstrHC, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
179 Assert(pPatch->patch.JumpTree == 0);
180 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
181 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
182
183 memset(&patch.patch.cacheRec, 0, sizeof(patch.patch.cacheRec));
184
185 /* Save the patch record itself */
186 rc = SSMR3PutMem(pSSM, &patch, sizeof(patch));
187 AssertRCReturn(rc, rc);
188
189 /*
190 * Reset HC pointers in fixup records and save them.
191 */
192#ifdef VBOX_STRICT
193 uint32_t nrFixupRecs = 0;
194 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
195 AssertMsg((int32_t)nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
196#endif
197 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
198
199#ifdef VBOX_STRICT
200 uint32_t nrLookupRecords = 0;
201 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
202 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
203#endif
204
205 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
206 return VINF_SUCCESS;
207}
208
209/**
210 * Execute state save operation.
211 *
212 * @returns VBox status code.
213 * @param pVM VM Handle.
214 * @param pSSM SSM operation handle.
215 */
216DECLCALLBACK(int) patmr3Save(PVM pVM, PSSMHANDLE pSSM)
217{
218 PATM patmInfo = pVM->patm.s;
219 int rc;
220
221 pVM->patm.s.savedstate.pSSM = pSSM;
222
223 /*
224 * Reset HC pointers that need to be recalculated when loading the state
225 */
226 patmInfo.pPatchMemHC = NULL;
227 patmInfo.pGCStateHC = 0;
228 patmInfo.pvFaultMonitor = 0;
229
230 Assert(patmInfo.ulCallDepth == 0);
231
232 /*
233 * Count the number of patches in the tree (feeling lazy)
234 */
235 patmInfo.savedstate.cPatches = 0;
236 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
237
238 /*
239 * Save PATM structure
240 */
241 rc = SSMR3PutMem(pSSM, &patmInfo, sizeof(patmInfo));
242 AssertRCReturn(rc, rc);
243
244 /*
245 * Save patch memory contents
246 */
247 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
248 AssertRCReturn(rc, rc);
249
250 /*
251 * Save GC state memory
252 */
253 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
254 AssertRCReturn(rc, rc);
255
256 /*
257 * Save PATM stack page
258 */
259 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
260 AssertRCReturn(rc, rc);
261
262 /*
263 * Save all patches
264 */
265 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
266 AssertRCReturn(rc, rc);
267
268 /** @note patch statistics are not saved. */
269
270 return VINF_SUCCESS;
271}
272
273/**
274 * Execute state load operation.
275 *
276 * @returns VBox status code.
277 * @param pVM VM Handle.
278 * @param pSSM SSM operation handle.
279 * @param u32Version Data layout version.
280 */
281DECLCALLBACK(int) patmr3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
282{
283 PATM patmInfo;
284 int rc;
285
286 if ( u32Version != PATM_SSM_VERSION
287 && u32Version != PATM_SSM_VERSION_VER16
288#ifdef PATM_WITH_NEW_SSM
289 && u32Version != PATM_SSM_VERSION_GETPUTMEM)
290#else
291 )
292#endif
293 {
294 AssertMsgFailed(("patmR3Load: Invalid version u32Version=%d!\n", u32Version));
295 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
296 }
297
298 pVM->patm.s.savedstate.pSSM = pSSM;
299
300 /*
301 * Restore PATM structure
302 */
303#ifdef PATM_WITH_NEW_SSM
304 if (u32Version == PATM_SSM_VERSION_GETPUTMEM)
305 {
306#endif
307 rc = SSMR3GetMem(pSSM, &patmInfo, sizeof(patmInfo));
308 AssertRCReturn(rc, rc);
309#ifdef PATM_WITH_NEW_SSM
310 }
311 else
312 {
313 memset(&patmInfo, 0, sizeof(patmInfo));
314
315 AssertCompile(sizeof(patmInfo.pGCStateGC) == sizeof(RTRCPTR));
316 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStateGC);
317 AssertRCReturn(rc, rc);
318
319 AssertCompile(sizeof(patmInfo.pCPUMCtxGC) == sizeof(RTRCPTR));
320 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pCPUMCtxGC);
321 AssertRCReturn(rc, rc);
322
323 AssertCompile(sizeof(patmInfo.pStatsGC) == sizeof(RTRCPTR));
324 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pStatsGC);
325 AssertRCReturn(rc, rc);
326
327 AssertCompile(sizeof(patmInfo.pfnHelperCallGC) == sizeof(RTRCPTR));
328 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperCallGC);
329 AssertRCReturn(rc, rc);
330
331 AssertCompile(sizeof(patmInfo.pfnHelperRetGC) == sizeof(RTRCPTR));
332 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperRetGC);
333 AssertRCReturn(rc, rc);
334
335 AssertCompile(sizeof(patmInfo.pfnHelperJumpGC) == sizeof(RTRCPTR));
336 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperJumpGC);
337 AssertRCReturn(rc, rc);
338
339 AssertCompile(sizeof(patmInfo.pfnHelperIretGC) == sizeof(RTRCPTR));
340 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperIretGC);
341 AssertRCReturn(rc, rc);
342
343 AssertCompile(sizeof(patmInfo.pPatchMemGC) == sizeof(RTRCPTR));
344 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchMemGC);
345 AssertRCReturn(rc, rc);
346
347 AssertCompile(sizeof(patmInfo.cbPatchMem) == sizeof(uint32_t));
348 rc = SSMR3GetU32(pSSM, &patmInfo.cbPatchMem);
349 AssertRCReturn(rc, rc);
350
351 AssertCompile(sizeof(patmInfo.offPatchMem) == sizeof(uint32_t));
352 rc = SSMR3GetU32(pSSM, &patmInfo.offPatchMem);
353 AssertRCReturn(rc, rc);
354
355 AssertCompile(sizeof(patmInfo.deltaReloc) == sizeof(int32_t));
356 rc = SSMR3GetS32(pSSM, &patmInfo.deltaReloc);
357 AssertRCReturn(rc, rc);
358
359 AssertCompile(sizeof(patmInfo.uCurrentPatchIdx) == sizeof(uint32_t));
360 rc = SSMR3GetS32(pSSM, &patmInfo.uCurrentPatchIdx);
361 AssertRCReturn(rc, rc);
362
363 AssertCompile(sizeof(patmInfo.pPatchedInstrGCLowest) == sizeof(RTRCPTR));
364 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCLowest);
365 AssertRCReturn(rc, rc);
366
367 AssertCompile(sizeof(patmInfo.pPatchedInstrGCHighest) == sizeof(RTRCPTR));
368 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCHighest);
369 AssertRCReturn(rc, rc);
370
371 AssertCompile(sizeof(patmInfo.pfnSysEnterGC) == sizeof(RTRCPTR));
372 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterGC);
373 AssertRCReturn(rc, rc);
374
375 AssertCompile(sizeof(patmInfo.pfnSysEnterPatchGC) == sizeof(RTRCPTR));
376 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterPatchGC);
377 AssertRCReturn(rc, rc);
378
379 AssertCompile(sizeof(patmInfo.uSysEnterPatchIdx) == sizeof(uint32_t));
380 rc = SSMR3GetU32(pSSM, &patmInfo.uSysEnterPatchIdx);
381 AssertRCReturn(rc, rc);
382
383 AssertCompile(sizeof(patmInfo.ulCallDepth) == sizeof(uint32_t));
384 rc = SSMR3GetU32(pSSM, &patmInfo.ulCallDepth);
385 AssertRCReturn(rc, rc);
386
387 AssertCompile(sizeof(patmInfo.pGCStackGC) == sizeof(RTRCPTR));
388 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStackGC);
389 AssertRCReturn(rc, rc);
390
391 AssertCompile(sizeof(patmInfo.cPageRecords) == sizeof(uint32_t));
392 rc = SSMR3GetU32(pSSM, &patmInfo.cPageRecords);
393 AssertRCReturn(rc, rc);
394
395 AssertCompile(sizeof(patmInfo.fOutOfMemory) == sizeof(bool));
396 rc = SSMR3GetBool(pSSM, &patmInfo.fOutOfMemory);
397 AssertRCReturn(rc, rc);
398
399 AssertCompile(sizeof(patmInfo.savedstate.cPatches) == sizeof(uint32_t));
400 rc = SSMR3GetU32(pSSM, &patmInfo.savedstate.cPatches);
401 AssertRCReturn(rc, rc);
402
403 }
404#endif
405
406 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
407 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
408 if ( (pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC)
409 || (pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC)
410 || (pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC)
411 || (pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC))
412 {
413 AssertMsgFailed(("Helper function ptrs don't match!!!\n"));
414 return VERR_SSM_INVALID_STATE;
415 }
416
417 if (pVM->patm.s.cbPatchMem != patmInfo.cbPatchMem)
418 {
419 AssertMsgFailed(("Patch memory ptrs and/or sizes don't match!!!\n"));
420 return VERR_SSM_INVALID_STATE;
421 }
422 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
423 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
424 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
425 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
426
427 /* Lowest and highest patched instruction */
428 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
429 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
430
431 /* Sysenter handlers */
432 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
433 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
434 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
435
436 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
437
438 Log(("pGCStateGC %VRv vs old %VRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
439 Log(("pGCStackGC %VRv vs old %VRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
440 Log(("pCPUMCtxGC %VRv vs old %VRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
441
442 /** @note patch statistics are not restored. */
443
444 /*
445 * Restore patch memory contents
446 */
447 Log(("Restore patch memory: new %VRv old %VRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
448 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
449 AssertRCReturn(rc, rc);
450
451 /*
452 * Restore GC state memory
453 */
454#ifdef PATM_WITH_NEW_SSM
455 if (u32Version == PATM_SSM_VERSION_GETPUTMEM)
456 {
457#endif
458 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
459 AssertRCReturn(rc, rc);
460#ifdef PATM_WITH_NEW_SSM
461 }
462 else
463 {
464 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uVMFlags) == sizeof(uint32_t));
465 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uVMFlags);
466 AssertRCReturn(rc, rc);
467
468 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPendingAction) == sizeof(uint32_t));
469 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPendingAction);
470 AssertRCReturn(rc, rc);
471
472 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPatchCalls) == sizeof(uint32_t));
473 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPatchCalls);
474 AssertRCReturn(rc, rc);
475
476 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uScratch) == sizeof(uint32_t));
477 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uScratch);
478 AssertRCReturn(rc, rc);
479
480 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEFlags) == sizeof(uint32_t));
481 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEFlags);
482 AssertRCReturn(rc, rc);
483
484 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretCS) == sizeof(uint32_t));
485 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretCS);
486 AssertRCReturn(rc, rc);
487
488 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEIP) == sizeof(uint32_t));
489 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEIP);
490 AssertRCReturn(rc, rc);
491
492 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Psp) == sizeof(uint32_t));
493 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Psp);
494 AssertRCReturn(rc, rc);
495
496 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->fPIF) == sizeof(uint32_t));
497 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->fPIF);
498 AssertRCReturn(rc, rc);
499
500 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts) == sizeof(RTRCPTR));
501 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts);
502 AssertRCReturn(rc, rc);
503
504 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr) == sizeof(RTRCPTR));
505 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr);
506 AssertRCReturn(rc, rc);
507
508 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallReturnAddr) == sizeof(RTRCPTR));
509 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallReturnAddr);
510 AssertRCReturn(rc, rc);
511
512 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEAX) == sizeof(uint32_t));
513 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEAX);
514 AssertRCReturn(rc, rc);
515
516 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uECX) == sizeof(uint32_t));
517 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uECX);
518 AssertRCReturn(rc, rc);
519
520 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEDI) == sizeof(uint32_t));
521 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEDI);
522 AssertRCReturn(rc, rc);
523
524 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.eFlags) == sizeof(uint32_t));
525 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.eFlags);
526 AssertRCReturn(rc, rc);
527
528 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uFlags) == sizeof(uint32_t));
529 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uFlags);
530 AssertRCReturn(rc, rc);
531 }
532#endif
533
534 /*
535 * Restore PATM stack page
536 */
537 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
538 AssertRCReturn(rc, rc);
539
540 /*
541 * Load all patches
542 */
543 for (uint32_t i=0;i<patmInfo.savedstate.cPatches;i++)
544 {
545 PATMPATCHREC patch, *pPatchRec;
546
547 rc = SSMR3GetMem(pSSM, &patch, sizeof(patch));
548 AssertRCReturn(rc, rc);
549
550 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
551
552 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
553 if (VBOX_FAILURE(rc))
554 {
555 AssertMsgFailed(("Out of memory!!!!\n"));
556 return VERR_NO_MEMORY;
557 }
558 /*
559 * Only restore the patch part of the tree record; not the internal data (except the key of course)
560 */
561 pPatchRec->patch = patch.patch;
562 pPatchRec->Core.Key = patch.Core.Key;
563 pPatchRec->CoreOffset.Key = patch.CoreOffset.Key;
564
565 Log(("Restoring patch %VRv -> %VRv\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset));
566 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
567 Assert(ret);
568 if (pPatchRec->patch.uState != PATCH_REFUSED)
569 {
570 if (pPatchRec->patch.pPatchBlockOffset)
571 {
572 /* We actually generated code for this patch. */
573 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
574 AssertMsg(ret, ("Inserting patch %VGv offset %VGv failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
575 }
576 }
577 /* Set to zero as we don't need it anymore. */
578 pPatchRec->patch.pTempInfo = 0;
579
580 pPatchRec->patch.pPrivInstrHC = 0;
581 /* The GC virtual ptr is fixed, but we must convert it manually again to HC. */
582 rc = PGMPhysGCPtr2HCPtr(pVM, pPatchRec->patch.pPrivInstrGC, (PRTHCPTR)&pPatchRec->patch.pPrivInstrHC);
583 /* Can fail due to page or page table not present. */
584
585 /*
586 * Restore fixup records and correct HC pointers in fixup records
587 */
588 pPatchRec->patch.FixupTree = 0;
589 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
590 for (int i=0;i<patch.patch.nrFixups;i++)
591 {
592 RELOCREC rec;
593 int32_t offset;
594 RTRCPTR *pFixup;
595
596 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
597 AssertRCReturn(rc, rc);
598
599 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
600 offset = (int32_t)(int64_t)rec.pRelocPos;
601 /* Convert to HC pointer again. */
602 PATM_ADD_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
603 pFixup = (RTRCPTR *)rec.pRelocPos;
604
605 if (pPatchRec->patch.uState != PATCH_REFUSED)
606 {
607 if ( rec.uType == FIXUP_REL_JMPTOPATCH
608 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
609 {
610 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
611 unsigned offset = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
612
613 Assert(pPatchRec->patch.pPrivInstrHC);
614 rec.pRelocPos = pPatchRec->patch.pPrivInstrHC + offset;
615 pFixup = (RTRCPTR *)rec.pRelocPos;
616 }
617
618 patmCorrectFixup(pVM, u32Version, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
619 }
620
621 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
622 AssertRCReturn(rc, rc);
623 }
624
625 /* And all patch to guest lookup records */
626 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
627
628 pPatchRec->patch.Patch2GuestAddrTree = 0;
629 pPatchRec->patch.Guest2PatchAddrTree = 0;
630 if (pPatchRec->patch.nrPatch2GuestRecs)
631 {
632 RECPATCHTOGUEST rec;
633 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
634
635 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
636 for (uint32_t i=0;i<nrPatch2GuestRecs;i++)
637 {
638 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
639 AssertRCReturn(rc, rc);
640
641 patmr3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
642 }
643 Assert(pPatchRec->patch.Patch2GuestAddrTree);
644 }
645
646 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
647 {
648 /* Insert the guest page lookup records (for detection self-modifying code) */
649 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
650 AssertRCReturn(rc, rc);
651 }
652
653#ifdef LOG_ENABLED
654 if ( pPatchRec->patch.uState != PATCH_REFUSED
655 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
656 {
657 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
658 Log(("Patch code ----------------------------------------------------------\n"));
659 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
660 Log(("Patch code ends -----------------------------------------------------\n"));
661 MMR3HeapFree(pPatchRec->patch.pTempInfo);
662 pPatchRec->patch.pTempInfo = NULL;
663 }
664#endif
665
666 }
667
668 /*
669 * Correct absolute fixups in the global patch. (helper functions)
670 * Bit of a mess. Uses the new patch record, but restored patch functions.
671 */
672 PRELOCREC pRec = 0;
673 AVLPVKEY key = 0;
674
675 Log(("Correct fixups in global helper functions\n"));
676 while (true)
677 {
678 int32_t offset;
679 RTRCPTR *pFixup;
680
681 /* Get the record that's closest from above */
682 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
683 if (pRec == 0)
684 break;
685
686 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
687
688 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
689 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
690 pFixup = (RTRCPTR *)pRec->pRelocPos;
691
692 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
693 patmCorrectFixup(pVM, u32Version, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
694 }
695
696#ifdef VBOX_WITH_STATISTICS
697 /*
698 * Restore relevant old statistics
699 */
700 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
701 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
702 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
703 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
704#endif
705 return VINF_SUCCESS;
706}
707
708/**
709 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
710 *
711 * @returns VBox status code.
712 * @param pVM VM Handle.
713 * @param ulSSMVersion SSM version
714 * @param patmInfo Saved PATM structure
715 * @param pPatch Patch record
716 * @param pRec Relocation record
717 * @param offset Offset of referenced data/code
718 * @param pFixup Fixup address
719 */
720static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup)
721{
722 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
723
724 switch (pRec->uType)
725 {
726 case FIXUP_ABSOLUTE:
727 {
728 if (pRec->pSource && !PATMIsPatchGCAddr(pVM, pRec->pSource))
729 break;
730
731 if ( patmInfo.pPatchMemGC + offset >= patmInfo.pGCStateGC
732 && patmInfo.pPatchMemGC + offset < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
733 {
734 LogFlow(("Changing absolute GCState from %VRv (%VRv) to %VRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
735 *pFixup = (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
736 }
737 else
738 if ( patmInfo.pPatchMemGC + offset >= patmInfo.pCPUMCtxGC
739 && patmInfo.pPatchMemGC + offset < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
740 {
741 LogFlow(("Changing absolute CPUMCTX from %VRv (%VRv) to %VRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
742
743 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
744 if (ulSSMVersion == PATM_SSM_VERSION_VER16)
745 {
746 unsigned uCPUMOffset = *pFixup - patmInfo.pCPUMCtxGC;
747
748 switch(uCPUMOffset)
749 {
750 case RT_OFFSETOF(CPUMCTX_VER1_6, dr0):
751 Log(("Changing dr0 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr0)));
752 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr0);
753 break;
754 case RT_OFFSETOF(CPUMCTX_VER1_6, dr1):
755 Log(("Changing dr1 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr1)));
756 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr1);
757 break;
758 case RT_OFFSETOF(CPUMCTX_VER1_6, dr2):
759 Log(("Changing dr2 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr2)));
760 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr2);
761 break;
762 case RT_OFFSETOF(CPUMCTX_VER1_6, dr3):
763 Log(("Changing dr3 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr3)));
764 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr3);
765 break;
766 case RT_OFFSETOF(CPUMCTX_VER1_6, dr4):
767 Log(("Changing dr4 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr4)));
768 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr4);
769 break;
770 case RT_OFFSETOF(CPUMCTX_VER1_6, dr5):
771 Log(("Changing dr5 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr5)));
772 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr5);
773 break;
774 case RT_OFFSETOF(CPUMCTX_VER1_6, dr6):
775 Log(("Changing dr6 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr6)));
776 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr6);
777 break;
778 case RT_OFFSETOF(CPUMCTX_VER1_6, dr7):
779 Log(("Changing dr7 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr7)));
780 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr7);
781 break;
782 case RT_OFFSETOF(CPUMCTX_VER1_6, cr0):
783 Log(("Changing cr0 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr0)));
784 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
785 break;
786 case RT_OFFSETOF(CPUMCTX_VER1_6, cr2):
787 Log(("Changing cr2 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr2)));
788 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
789 break;
790 case RT_OFFSETOF(CPUMCTX_VER1_6, cr3):
791 Log(("Changing cr3 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr3)));
792 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
793 break;
794 case RT_OFFSETOF(CPUMCTX_VER1_6, cr4):
795 Log(("Changing cr4 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr4)));
796 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
797 break;
798 case RT_OFFSETOF(CPUMCTX_VER1_6, tr):
799 Log(("Changing tr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, tr)));
800 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
801 break;
802 case RT_OFFSETOF(CPUMCTX_VER1_6, ldtr):
803 Log(("Changing ldtr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, ldtr)));
804 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
805 break;
806 case RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt):
807 Log(("Changing pGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
808 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
809 break;
810 case RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt):
811 Log(("Changing cbGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
812 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
813 break;
814 case RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt):
815 Log(("Changing pIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
816 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
817 break;
818 case RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt):
819 Log(("Changing cbIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
820 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
821 break;
822 default:
823 AssertFailed();
824 break;
825 }
826 }
827 else
828 *pFixup = (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
829 }
830 else
831 if ( patmInfo.pPatchMemGC + offset >= patmInfo.pStatsGC
832 && patmInfo.pPatchMemGC + offset < patmInfo.pStatsGC + sizeof(CPUMCTX))
833 {
834 LogFlow(("Changing absolute Stats from %VRv (%VRv) to %VRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
835 *pFixup = (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
836 }
837 else
838 if ( patmInfo.pPatchMemGC + offset >= patmInfo.pGCStackGC
839 && patmInfo.pPatchMemGC + offset < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
840 {
841 LogFlow(("Changing absolute Stack from %VRv (%VRv) to %VRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
842 *pFixup = (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
843 }
844 else
845 if ( patmInfo.pPatchMemGC + offset >= patmInfo.pPatchMemGC
846 && patmInfo.pPatchMemGC + offset < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
847 {
848 LogFlow(("Changing absolute PatchMem from %VRv (%VRv) to %VRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
849 *pFixup = (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
850 }
851 else
852 AssertFailed();
853 break;
854 }
855
856 case FIXUP_REL_JMPTOPATCH:
857 {
858 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
859
860 if ( pPatch->uState == PATCH_ENABLED
861 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
862 {
863 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
864 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
865 RTRCPTR pJumpOffGC;
866 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
867 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
868
869 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
870
871 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
872#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
873 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
874 {
875 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
876
877 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
878 oldJump[0] = pPatch->aPrivInstr[0];
879 oldJump[1] = pPatch->aPrivInstr[1];
880 *(RTRCUINTPTR *)&oldJump[2] = displOld;
881 }
882 else
883#endif
884 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
885 {
886 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
887 oldJump[0] = 0xE9;
888 *(RTRCUINTPTR *)&oldJump[1] = displOld;
889 }
890 else
891 {
892 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
893 break;
894 }
895 Assert(pPatch->cbPatchJump <= sizeof(temp));
896
897 /*
898 * Read old patch jump and compare it to the one we previously installed
899 */
900 int rc = PGMPhysReadGCPtr(pVM, temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
901 Assert(VBOX_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
902
903 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
904 {
905 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
906
907 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
908 Assert(VBOX_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
909 }
910 else
911 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
912 {
913 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
914 /*
915 * Disable patch; this is not a good solution
916 */
917 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
918 pPatch->uState = PATCH_DISABLED;
919 }
920 else
921 if (VBOX_SUCCESS(rc))
922 {
923 rc = PGMPhysWriteGCPtrDirty(pVM, pJumpOffGC, &displ, sizeof(displ));
924 AssertRC(rc);
925 }
926 else
927 {
928 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
929 }
930 }
931 else
932 {
933 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->pPrivInstrHC, pRec->pRelocPos));
934 }
935
936 pRec->pDest = pTarget;
937 break;
938 }
939
940 case FIXUP_REL_JMPTOGUEST:
941 {
942 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
943 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
944
945 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
946 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
947 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
948 pRec->pSource = pSource;
949 break;
950
951 }
952}
953}
954
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette