VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATMSSM.cpp@ 13993

Last change on this file since 13993 was 13830, checked in by vboxsync, 16 years ago

VMM: Disabled VM:pVMGC, removed VM_GUEST_ADDR.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.3 KB
Line 
1/* $Id: PATMSSM.cpp 13830 2008-11-05 01:49:18Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/stam.h>
30#include <VBox/pgm.h>
31#include <VBox/cpum.h>
32#include <VBox/iom.h>
33#include <VBox/sup.h>
34#include <VBox/mm.h>
35#include <VBox/ssm.h>
36#include <VBox/pdm.h>
37#include <VBox/trpm.h>
38#include <VBox/param.h>
39#include <iprt/avl.h>
40#include "PATMInternal.h"
41#include "PATMPatch.h"
42#include "PATMA.h"
43#include <VBox/vm.h>
44#include <VBox/csam.h>
45
46#include <VBox/dbg.h>
47#include <VBox/err.h>
48#include <VBox/log.h>
49#include <iprt/assert.h>
50#include <iprt/asm.h>
51#include <iprt/string.h>
52#include <VBox/dis.h>
53#include <VBox/disopcode.h>
54
55#define PATM_SUBTRACT_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) - (uintptr_t)(b)
56#define PATM_ADD_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) + (uintptr_t)(b)
57
58static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup);
59
60#ifdef VBOX_STRICT
61/**
62 * Callback function for RTAvlPVDoWithAll
63 *
64 * Counts the number of patches in the tree
65 *
66 * @returns VBox status code.
67 * @param pNode Current node
68 * @param pcPatches Pointer to patch counter (uint32_t)
69 */
70static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
71{
72 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
73 return VINF_SUCCESS;
74}
75
76/**
77 * Callback function for RTAvlU32DoWithAll
78 *
79 * Counts the number of patches in the tree
80 *
81 * @returns VBox status code.
82 * @param pNode Current node
83 * @param pcPatches Pointer to patch counter (uint32_t)
84 */
85static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
86{
87 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
88 return VINF_SUCCESS;
89}
90#endif /* VBOX_STRICT */
91
92/**
93 * Callback function for RTAvloU32DoWithAll
94 *
95 * Counts the number of patches in the tree
96 *
97 * @returns VBox status code.
98 * @param pNode Current node
99 * @param pcPatches Pointer to patch counter
100 */
101static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
102{
103 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
104 return VINF_SUCCESS;
105}
106
107/**
108 * Callback function for RTAvlU32DoWithAll
109 *
110 * Saves all patch to guest lookup records.
111 *
112 * @returns VBox status code.
113 * @param pNode Current node
114 * @param pVM1 VM Handle
115 */
116static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
117{
118 PVM pVM = (PVM)pVM1;
119 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
120 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
121
122 /* Save the lookup record. */
123 int rc = SSMR3PutMem(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST));
124 AssertRCReturn(rc, rc);
125
126 return VINF_SUCCESS;
127}
128
129/**
130 * Callback function for RTAvlPVDoWithAll
131 *
132 * Saves all patch to guest lookup records.
133 *
134 * @returns VBox status code.
135 * @param pNode Current node
136 * @param pVM1 VM Handle
137 */
138static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
139{
140 PVM pVM = (PVM)pVM1;
141 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
142 RELOCREC rec = *(PRELOCREC)pNode;
143
144 Assert(rec.pRelocPos);
145 PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
146
147 /* Save the lookup record. */
148 int rc = SSMR3PutMem(pSSM, &rec, sizeof(rec));
149 AssertRCReturn(rc, rc);
150
151 return VINF_SUCCESS;
152}
153
154
155/**
156 * Callback function for RTAvloU32DoWithAll
157 *
158 * Saves the state of the patch that's being enumerated
159 *
160 * @returns VBox status code.
161 * @param pNode Current node
162 * @param pVM1 VM Handle
163 */
164static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
165{
166 PVM pVM = (PVM)pVM1;
167 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
168 PATMPATCHREC patch = *pPatch;
169 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
170 int rc;
171
172 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
173
174 /*
175 * Reset HC pointers that need to be recalculated when loading the state
176 */
177 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
178 ("State = %x pPrivInstrHC=%08x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, patch.patch.pPrivInstrHC, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
179 Assert(pPatch->patch.JumpTree == 0);
180 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
181 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
182
183 memset(&patch.patch.cacheRec, 0, sizeof(patch.patch.cacheRec));
184
185 /* Save the patch record itself */
186 rc = SSMR3PutMem(pSSM, &patch, sizeof(patch));
187 AssertRCReturn(rc, rc);
188
189 /*
190 * Reset HC pointers in fixup records and save them.
191 */
192#ifdef VBOX_STRICT
193 uint32_t nrFixupRecs = 0;
194 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
195 AssertMsg((int32_t)nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
196#endif
197 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
198
199#ifdef VBOX_STRICT
200 uint32_t nrLookupRecords = 0;
201 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
202 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
203#endif
204
205 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
206 return VINF_SUCCESS;
207}
208
209/**
210 * Execute state save operation.
211 *
212 * @returns VBox status code.
213 * @param pVM VM Handle.
214 * @param pSSM SSM operation handle.
215 */
216DECLCALLBACK(int) patmr3Save(PVM pVM, PSSMHANDLE pSSM)
217{
218 PATM patmInfo = pVM->patm.s;
219 int rc;
220
221 pVM->patm.s.savedstate.pSSM = pSSM;
222
223 /*
224 * Reset HC pointers that need to be recalculated when loading the state
225 */
226 patmInfo.pPatchMemHC = NULL;
227 patmInfo.pGCStateHC = 0;
228 patmInfo.pvFaultMonitor = 0;
229
230 Assert(patmInfo.ulCallDepth == 0);
231
232 /*
233 * Count the number of patches in the tree (feeling lazy)
234 */
235 patmInfo.savedstate.cPatches = 0;
236 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
237
238 /*
239 * Save PATM structure
240 */
241 rc = SSMR3PutMem(pSSM, &patmInfo, sizeof(patmInfo));
242 AssertRCReturn(rc, rc);
243
244 /*
245 * Save patch memory contents
246 */
247 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
248 AssertRCReturn(rc, rc);
249
250 /*
251 * Save GC state memory
252 */
253 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
254 AssertRCReturn(rc, rc);
255
256 /*
257 * Save PATM stack page
258 */
259 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
260 AssertRCReturn(rc, rc);
261
262 /*
263 * Save all patches
264 */
265 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
266 AssertRCReturn(rc, rc);
267
268 /** @note patch statistics are not saved. */
269
270 return VINF_SUCCESS;
271}
272
273/**
274 * Execute state load operation.
275 *
276 * @returns VBox status code.
277 * @param pVM VM Handle.
278 * @param pSSM SSM operation handle.
279 * @param u32Version Data layout version.
280 */
281DECLCALLBACK(int) patmr3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
282{
283 PATM patmInfo;
284 int rc;
285
286 if ( u32Version != PATM_SSM_VERSION
287 && u32Version != PATM_SSM_VERSION_VER16
288#ifdef PATM_WITH_NEW_SSM
289 && u32Version != PATM_SSM_VERSION_GETPUTMEM)
290#else
291 )
292#endif
293 {
294 AssertMsgFailed(("patmR3Load: Invalid version u32Version=%d!\n", u32Version));
295 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
296 }
297
298 pVM->patm.s.savedstate.pSSM = pSSM;
299
300 /*
301 * Restore PATM structure
302 */
303#ifdef PATM_WITH_NEW_SSM
304 if (u32Version == PATM_SSM_VERSION_GETPUTMEM)
305 {
306#endif
307 rc = SSMR3GetMem(pSSM, &patmInfo, sizeof(patmInfo));
308 AssertRCReturn(rc, rc);
309#ifdef PATM_WITH_NEW_SSM
310 }
311 else
312 {
313 memset(&patmInfo, 0, sizeof(patmInfo));
314
315 AssertCompile(sizeof(patmInfo.pGCStateGC) == sizeof(RTRCPTR));
316 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStateGC);
317 AssertRCReturn(rc, rc);
318
319 AssertCompile(sizeof(patmInfo.pCPUMCtxGC) == sizeof(RTRCPTR));
320 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pCPUMCtxGC);
321 AssertRCReturn(rc, rc);
322
323 AssertCompile(sizeof(patmInfo.pStatsGC) == sizeof(RTRCPTR));
324 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pStatsGC);
325 AssertRCReturn(rc, rc);
326
327 AssertCompile(sizeof(patmInfo.pfnHelperCallGC) == sizeof(RTRCPTR));
328 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperCallGC);
329 AssertRCReturn(rc, rc);
330
331 AssertCompile(sizeof(patmInfo.pfnHelperRetGC) == sizeof(RTRCPTR));
332 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperRetGC);
333 AssertRCReturn(rc, rc);
334
335 AssertCompile(sizeof(patmInfo.pfnHelperJumpGC) == sizeof(RTRCPTR));
336 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperJumpGC);
337 AssertRCReturn(rc, rc);
338
339 AssertCompile(sizeof(patmInfo.pfnHelperIretGC) == sizeof(RTRCPTR));
340 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperIretGC);
341 AssertRCReturn(rc, rc);
342
343 AssertCompile(sizeof(patmInfo.pPatchMemGC) == sizeof(RTRCPTR));
344 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchMemGC);
345 AssertRCReturn(rc, rc);
346
347 AssertCompile(sizeof(patmInfo.cbPatchMem) == sizeof(uint32_t));
348 rc = SSMR3GetU32(pSSM, &patmInfo.cbPatchMem);
349 AssertRCReturn(rc, rc);
350
351 AssertCompile(sizeof(patmInfo.offPatchMem) == sizeof(uint32_t));
352 rc = SSMR3GetU32(pSSM, &patmInfo.offPatchMem);
353 AssertRCReturn(rc, rc);
354
355 AssertCompile(sizeof(patmInfo.deltaReloc) == sizeof(int32_t));
356 rc = SSMR3GetS32(pSSM, &patmInfo.deltaReloc);
357 AssertRCReturn(rc, rc);
358
359 AssertCompile(sizeof(patmInfo.uCurrentPatchIdx) == sizeof(uint32_t));
360 rc = SSMR3GetS32(pSSM, &patmInfo.uCurrentPatchIdx);
361 AssertRCReturn(rc, rc);
362
363 AssertCompile(sizeof(patmInfo.pPatchedInstrGCLowest) == sizeof(RTRCPTR));
364 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCLowest);
365 AssertRCReturn(rc, rc);
366
367 AssertCompile(sizeof(patmInfo.pPatchedInstrGCHighest) == sizeof(RTRCPTR));
368 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCHighest);
369 AssertRCReturn(rc, rc);
370
371 AssertCompile(sizeof(patmInfo.pfnSysEnterGC) == sizeof(RTRCPTR));
372 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterGC);
373 AssertRCReturn(rc, rc);
374
375 AssertCompile(sizeof(patmInfo.pfnSysEnterPatchGC) == sizeof(RTRCPTR));
376 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterPatchGC);
377 AssertRCReturn(rc, rc);
378
379 AssertCompile(sizeof(patmInfo.uSysEnterPatchIdx) == sizeof(uint32_t));
380 rc = SSMR3GetU32(pSSM, &patmInfo.uSysEnterPatchIdx);
381 AssertRCReturn(rc, rc);
382
383 AssertCompile(sizeof(patmInfo.ulCallDepth) == sizeof(uint32_t));
384 rc = SSMR3GetU32(pSSM, &patmInfo.ulCallDepth);
385 AssertRCReturn(rc, rc);
386
387 AssertCompile(sizeof(patmInfo.pGCStackGC) == sizeof(RTRCPTR));
388 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStackGC);
389 AssertRCReturn(rc, rc);
390
391 AssertCompile(sizeof(patmInfo.cPageRecords) == sizeof(uint32_t));
392 rc = SSMR3GetU32(pSSM, &patmInfo.cPageRecords);
393 AssertRCReturn(rc, rc);
394
395 AssertCompile(sizeof(patmInfo.fOutOfMemory) == sizeof(bool));
396 rc = SSMR3GetBool(pSSM, &patmInfo.fOutOfMemory);
397 AssertRCReturn(rc, rc);
398
399 AssertCompile(sizeof(patmInfo.savedstate.cPatches) == sizeof(uint32_t));
400 rc = SSMR3GetU32(pSSM, &patmInfo.savedstate.cPatches);
401 AssertRCReturn(rc, rc);
402
403 }
404#endif
405
406 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
407 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
408 if ( (pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC)
409 || (pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC)
410 || (pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC)
411 || (pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC))
412 {
413 AssertMsgFailed(("Helper function ptrs don't match!!!\n"));
414 return VERR_SSM_INVALID_STATE;
415 }
416
417 if (pVM->patm.s.cbPatchMem != patmInfo.cbPatchMem)
418 {
419 AssertMsgFailed(("Patch memory ptrs and/or sizes don't match!!!\n"));
420 return VERR_SSM_INVALID_STATE;
421 }
422 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
423 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
424 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
425 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
426
427 /* Lowest and highest patched instruction */
428 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
429 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
430
431 /* Sysenter handlers */
432 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
433 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
434 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
435
436 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
437
438 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
439 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
440 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
441 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
442
443
444 /** @note patch statistics are not restored. */
445
446 /*
447 * Restore patch memory contents
448 */
449 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
450 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
451 AssertRCReturn(rc, rc);
452
453 /*
454 * Restore GC state memory
455 */
456#ifdef PATM_WITH_NEW_SSM
457 if (u32Version == PATM_SSM_VERSION_GETPUTMEM)
458 {
459#endif
460 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
461 AssertRCReturn(rc, rc);
462#ifdef PATM_WITH_NEW_SSM
463 }
464 else
465 {
466 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uVMFlags) == sizeof(uint32_t));
467 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uVMFlags);
468 AssertRCReturn(rc, rc);
469
470 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPendingAction) == sizeof(uint32_t));
471 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPendingAction);
472 AssertRCReturn(rc, rc);
473
474 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPatchCalls) == sizeof(uint32_t));
475 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPatchCalls);
476 AssertRCReturn(rc, rc);
477
478 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uScratch) == sizeof(uint32_t));
479 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uScratch);
480 AssertRCReturn(rc, rc);
481
482 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEFlags) == sizeof(uint32_t));
483 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEFlags);
484 AssertRCReturn(rc, rc);
485
486 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretCS) == sizeof(uint32_t));
487 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretCS);
488 AssertRCReturn(rc, rc);
489
490 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEIP) == sizeof(uint32_t));
491 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEIP);
492 AssertRCReturn(rc, rc);
493
494 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Psp) == sizeof(uint32_t));
495 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Psp);
496 AssertRCReturn(rc, rc);
497
498 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->fPIF) == sizeof(uint32_t));
499 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->fPIF);
500 AssertRCReturn(rc, rc);
501
502 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts) == sizeof(RTRCPTR));
503 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts);
504 AssertRCReturn(rc, rc);
505
506 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr) == sizeof(RTRCPTR));
507 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr);
508 AssertRCReturn(rc, rc);
509
510 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallReturnAddr) == sizeof(RTRCPTR));
511 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallReturnAddr);
512 AssertRCReturn(rc, rc);
513
514 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEAX) == sizeof(uint32_t));
515 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEAX);
516 AssertRCReturn(rc, rc);
517
518 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uECX) == sizeof(uint32_t));
519 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uECX);
520 AssertRCReturn(rc, rc);
521
522 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEDI) == sizeof(uint32_t));
523 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEDI);
524 AssertRCReturn(rc, rc);
525
526 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.eFlags) == sizeof(uint32_t));
527 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.eFlags);
528 AssertRCReturn(rc, rc);
529
530 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uFlags) == sizeof(uint32_t));
531 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uFlags);
532 AssertRCReturn(rc, rc);
533 }
534#endif
535
536 /*
537 * Restore PATM stack page
538 */
539 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
540 AssertRCReturn(rc, rc);
541
542 /*
543 * Load all patches
544 */
545 for (uint32_t i=0;i<patmInfo.savedstate.cPatches;i++)
546 {
547 PATMPATCHREC patch, *pPatchRec;
548
549 rc = SSMR3GetMem(pSSM, &patch, sizeof(patch));
550 AssertRCReturn(rc, rc);
551
552 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
553
554 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
555 if (RT_FAILURE(rc))
556 {
557 AssertMsgFailed(("Out of memory!!!!\n"));
558 return VERR_NO_MEMORY;
559 }
560 /*
561 * Only restore the patch part of the tree record; not the internal data (except the key of course)
562 */
563 pPatchRec->patch = patch.patch;
564 pPatchRec->Core.Key = patch.Core.Key;
565 pPatchRec->CoreOffset.Key = patch.CoreOffset.Key;
566
567 Log(("Restoring patch %RRv -> %RRv\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset));
568 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
569 Assert(ret);
570 if (pPatchRec->patch.uState != PATCH_REFUSED)
571 {
572 if (pPatchRec->patch.pPatchBlockOffset)
573 {
574 /* We actually generated code for this patch. */
575 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
576 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
577 }
578 }
579 /* Set to zero as we don't need it anymore. */
580 pPatchRec->patch.pTempInfo = 0;
581
582 pPatchRec->patch.pPrivInstrHC = 0;
583 /* The GC virtual ptr is fixed, but we must convert it manually again to HC. */
584 rc = PGMPhysGCPtr2HCPtr(pVM, pPatchRec->patch.pPrivInstrGC, (PRTHCPTR)&pPatchRec->patch.pPrivInstrHC);
585 /* Can fail due to page or page table not present. */
586
587 /*
588 * Restore fixup records and correct HC pointers in fixup records
589 */
590 pPatchRec->patch.FixupTree = 0;
591 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
592 for (int i=0;i<patch.patch.nrFixups;i++)
593 {
594 RELOCREC rec;
595 int32_t offset;
596 RTRCPTR *pFixup;
597
598 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
599 AssertRCReturn(rc, rc);
600
601 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
602 offset = (int32_t)(int64_t)rec.pRelocPos;
603 /* Convert to HC pointer again. */
604 PATM_ADD_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
605 pFixup = (RTRCPTR *)rec.pRelocPos;
606
607 if (pPatchRec->patch.uState != PATCH_REFUSED)
608 {
609 if ( rec.uType == FIXUP_REL_JMPTOPATCH
610 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
611 {
612 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
613 unsigned offset = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
614
615 Assert(pPatchRec->patch.pPrivInstrHC);
616 rec.pRelocPos = pPatchRec->patch.pPrivInstrHC + offset;
617 pFixup = (RTRCPTR *)rec.pRelocPos;
618 }
619
620 patmCorrectFixup(pVM, u32Version, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
621 }
622
623 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
624 AssertRCReturn(rc, rc);
625 }
626
627 /* And all patch to guest lookup records */
628 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
629
630 pPatchRec->patch.Patch2GuestAddrTree = 0;
631 pPatchRec->patch.Guest2PatchAddrTree = 0;
632 if (pPatchRec->patch.nrPatch2GuestRecs)
633 {
634 RECPATCHTOGUEST rec;
635 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
636
637 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
638 for (uint32_t i=0;i<nrPatch2GuestRecs;i++)
639 {
640 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
641 AssertRCReturn(rc, rc);
642
643 patmr3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
644 }
645 Assert(pPatchRec->patch.Patch2GuestAddrTree);
646 }
647
648 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
649 {
650 /* Insert the guest page lookup records (for detection self-modifying code) */
651 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
652 AssertRCReturn(rc, rc);
653 }
654
655#if 0 /* can fail def LOG_ENABLED */
656 if ( pPatchRec->patch.uState != PATCH_REFUSED
657 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
658 {
659 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
660 Log(("Patch code ----------------------------------------------------------\n"));
661 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
662 Log(("Patch code ends -----------------------------------------------------\n"));
663 MMR3HeapFree(pPatchRec->patch.pTempInfo);
664 pPatchRec->patch.pTempInfo = NULL;
665 }
666#endif
667
668 }
669
670 /*
671 * Correct absolute fixups in the global patch. (helper functions)
672 * Bit of a mess. Uses the new patch record, but restored patch functions.
673 */
674 PRELOCREC pRec = 0;
675 AVLPVKEY key = 0;
676
677 Log(("Correct fixups in global helper functions\n"));
678 while (true)
679 {
680 int32_t offset;
681 RTRCPTR *pFixup;
682
683 /* Get the record that's closest from above */
684 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
685 if (pRec == 0)
686 break;
687
688 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
689
690 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
691 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
692 pFixup = (RTRCPTR *)pRec->pRelocPos;
693
694 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
695 patmCorrectFixup(pVM, u32Version, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
696 }
697
698#ifdef VBOX_WITH_STATISTICS
699 /*
700 * Restore relevant old statistics
701 */
702 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
703 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
704 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
705 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
706#endif
707 return VINF_SUCCESS;
708}
709
710/**
711 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
712 *
713 * @returns VBox status code.
714 * @param pVM VM Handle.
715 * @param ulSSMVersion SSM version
716 * @param patmInfo Saved PATM structure
717 * @param pPatch Patch record
718 * @param pRec Relocation record
719 * @param offset Offset of referenced data/code
720 * @param pFixup Fixup address
721 */
722static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup)
723{
724 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
725
726 switch (pRec->uType)
727 {
728 case FIXUP_ABSOLUTE:
729 {
730 if (pRec->pSource && !PATMIsPatchGCAddr(pVM, pRec->pSource))
731 break;
732
733 if ( *pFixup >= patmInfo.pGCStateGC
734 && *pFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
735 {
736 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
737 *pFixup = (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
738 }
739 else
740 if ( *pFixup >= patmInfo.pCPUMCtxGC
741 && *pFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
742 {
743 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
744
745 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
746 if (ulSSMVersion == PATM_SSM_VERSION_VER16)
747 {
748 unsigned uCPUMOffset = *pFixup - patmInfo.pCPUMCtxGC;
749
750 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
751 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
752 * function is not available in older gcc versions, at least not in gcc-3.3 */
753 if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
754 {
755 LogFlow(("Changing dr[0] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[0])));
756 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
757 }
758 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
759 {
760 LogFlow(("Changing dr[1] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[1])));
761 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
762 }
763 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
764 {
765 LogFlow(("Changing dr[2] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[2])));
766 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
767 }
768 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
769 {
770 LogFlow(("Changing dr[3] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[3])));
771 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
772 }
773 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
774 {
775 LogFlow(("Changing dr[4] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[4])));
776 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
777 }
778 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
779 {
780 LogFlow(("Changing dr[5] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[5])));
781 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
782 }
783 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
784 {
785 LogFlow(("Changing dr[6] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[6])));
786 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
787 }
788 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
789 {
790 LogFlow(("Changing dr[7] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[7])));
791 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
792 }
793 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
794 {
795 LogFlow(("Changing cr0 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr0)));
796 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
797 }
798 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
799 {
800 LogFlow(("Changing cr2 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr2)));
801 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
802 }
803 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
804 {
805 LogFlow(("Changing cr3 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr3)));
806 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
807 }
808 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
809 {
810 LogFlow(("Changing cr4 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr4)));
811 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
812 }
813 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
814 {
815 LogFlow(("Changing tr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, tr)));
816 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
817 }
818 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
819 {
820 LogFlow(("Changing ldtr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, ldtr)));
821 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
822 }
823 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
824 {
825 LogFlow(("Changing pGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
826 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
827 }
828 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
829 {
830 LogFlow(("Changing cbGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
831 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
832 }
833 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
834 {
835 LogFlow(("Changing pIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
836 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
837 }
838 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
839 {
840 LogFlow(("Changing cbIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
841 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
842 }
843 else
844 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", uCPUMOffset));
845 }
846 else
847 *pFixup = (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
848 }
849 else
850 if ( *pFixup >= patmInfo.pStatsGC
851 && *pFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
852 {
853 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
854 *pFixup = (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
855 }
856 else
857 if ( *pFixup >= patmInfo.pGCStackGC
858 && *pFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
859 {
860 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
861 *pFixup = (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
862 }
863 else
864 if ( *pFixup >= patmInfo.pPatchMemGC
865 && *pFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
866 {
867 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
868 *pFixup = (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
869 }
870 else
871 /* Note: rather assumptive! */
872 if ( *pFixup >= pVM->pVMRC
873 && *pFixup < pVM->pVMRC + 32)
874 {
875 LogFlow(("Changing fForcedActions fixup from %x to %x\n", *pFixup, pVM->pVMRC + RT_OFFSETOF(VM, fForcedActions)));
876 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, fForcedActions);
877 }
878 else
879 if ( *pFixup >= pVM->pVMRC
880 && *pFixup < pVM->pVMRC + 8192)
881 {
882 static int cCpuidFixup = 0;
883#ifdef LOG_ENABLED
884 RTRCPTR oldFixup = *pFixup;
885#endif
886 /* very dirty assumptions about the cpuid patch and cpuid ordering. */
887 switch(cCpuidFixup & 3)
888 {
889 case 0:
890 *pFixup = CPUMGetGuestCpuIdDefRCPtr(pVM);
891 break;
892 case 1:
893 *pFixup = CPUMGetGuestCpuIdStdRCPtr(pVM);
894 break;
895 case 2:
896 *pFixup = CPUMGetGuestCpuIdExtRCPtr(pVM);
897 break;
898 case 3:
899 *pFixup = CPUMGetGuestCpuIdCentaurRCPtr(pVM);
900 break;
901 }
902 LogFlow(("Changing cpuid fixup %d from %x to %x\n", cCpuidFixup, oldFixup, *pFixup));
903 cCpuidFixup++;
904 }
905 else
906 AssertMsgFailed(("Unexpected fixup value %x\n", *pFixup));
907
908#ifdef RT_OS_WINDOWS
909 AssertCompile(RT_OFFSETOF(VM, fForcedActions) < 32);
910#endif
911 break;
912 }
913
914 case FIXUP_REL_JMPTOPATCH:
915 {
916 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
917
918 if ( pPatch->uState == PATCH_ENABLED
919 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
920 {
921 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
922 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
923 RTRCPTR pJumpOffGC;
924 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
925 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
926
927 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
928
929 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
930#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
931 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
932 {
933 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
934
935 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
936 oldJump[0] = pPatch->aPrivInstr[0];
937 oldJump[1] = pPatch->aPrivInstr[1];
938 *(RTRCUINTPTR *)&oldJump[2] = displOld;
939 }
940 else
941#endif
942 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
943 {
944 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
945 oldJump[0] = 0xE9;
946 *(RTRCUINTPTR *)&oldJump[1] = displOld;
947 }
948 else
949 {
950 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
951 break;
952 }
953 Assert(pPatch->cbPatchJump <= sizeof(temp));
954
955 /*
956 * Read old patch jump and compare it to the one we previously installed
957 */
958 int rc = PGMPhysSimpleReadGCPtr(pVM, temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
959 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
960
961 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
962 {
963 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
964
965 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
966 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
967 }
968 else
969 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
970 {
971 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
972 /*
973 * Disable patch; this is not a good solution
974 */
975 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
976 pPatch->uState = PATCH_DISABLED;
977 }
978 else
979 if (RT_SUCCESS(rc))
980 {
981 rc = PGMPhysSimpleDirtyWriteGCPtr(pVM, pJumpOffGC, &displ, sizeof(displ));
982 AssertRC(rc);
983 }
984 else
985 {
986 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
987 }
988 }
989 else
990 {
991 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->pPrivInstrHC, pRec->pRelocPos));
992 }
993
994 pRec->pDest = pTarget;
995 break;
996 }
997
998 case FIXUP_REL_JMPTOGUEST:
999 {
1000 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1001 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1002
1003 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1004 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1005 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1006 pRec->pSource = pSource;
1007 break;
1008
1009 }
1010}
1011}
1012
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette