VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATMSSM.cpp@ 14884

Last change on this file since 14884 was 14884, checked in by vboxsync, 16 years ago

Compile fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.7 KB
Line 
1/* $Id: PATMSSM.cpp 14884 2008-12-02 09:29:48Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/hwaccm.h>
30#include <VBox/stam.h>
31#include <VBox/pgm.h>
32#include <VBox/cpum.h>
33#include <VBox/iom.h>
34#include <VBox/sup.h>
35#include <VBox/mm.h>
36#include <VBox/ssm.h>
37#include <VBox/pdm.h>
38#include <VBox/trpm.h>
39#include <VBox/param.h>
40#include <iprt/avl.h>
41#include "PATMInternal.h"
42#include "PATMPatch.h"
43#include "PATMA.h"
44#include <VBox/vm.h>
45#include <VBox/csam.h>
46
47#include <VBox/dbg.h>
48#include <VBox/err.h>
49#include <VBox/log.h>
50#include <iprt/assert.h>
51#include <iprt/asm.h>
52#include <iprt/string.h>
53#include <VBox/dis.h>
54#include <VBox/disopcode.h>
55
56#define PATM_SUBTRACT_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) - (uintptr_t)(b)
57#define PATM_ADD_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) + (uintptr_t)(b)
58
59static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup);
60
61#ifdef VBOX_STRICT
62/**
63 * Callback function for RTAvlPVDoWithAll
64 *
65 * Counts the number of patches in the tree
66 *
67 * @returns VBox status code.
68 * @param pNode Current node
69 * @param pcPatches Pointer to patch counter (uint32_t)
70 */
71static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
72{
73 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
74 return VINF_SUCCESS;
75}
76
77/**
78 * Callback function for RTAvlU32DoWithAll
79 *
80 * Counts the number of patches in the tree
81 *
82 * @returns VBox status code.
83 * @param pNode Current node
84 * @param pcPatches Pointer to patch counter (uint32_t)
85 */
86static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
87{
88 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
89 return VINF_SUCCESS;
90}
91#endif /* VBOX_STRICT */
92
93/**
94 * Callback function for RTAvloU32DoWithAll
95 *
96 * Counts the number of patches in the tree
97 *
98 * @returns VBox status code.
99 * @param pNode Current node
100 * @param pcPatches Pointer to patch counter
101 */
102static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
103{
104 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
105 return VINF_SUCCESS;
106}
107
108/**
109 * Callback function for RTAvlU32DoWithAll
110 *
111 * Saves all patch to guest lookup records.
112 *
113 * @returns VBox status code.
114 * @param pNode Current node
115 * @param pVM1 VM Handle
116 */
117static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
118{
119 PVM pVM = (PVM)pVM1;
120 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
121 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
122
123 /* Save the lookup record. */
124 int rc = SSMR3PutMem(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST));
125 AssertRCReturn(rc, rc);
126
127 return VINF_SUCCESS;
128}
129
130/**
131 * Callback function for RTAvlPVDoWithAll
132 *
133 * Saves all patch to guest lookup records.
134 *
135 * @returns VBox status code.
136 * @param pNode Current node
137 * @param pVM1 VM Handle
138 */
139static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
140{
141 PVM pVM = (PVM)pVM1;
142 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
143 RELOCREC rec = *(PRELOCREC)pNode;
144
145 Assert(rec.pRelocPos);
146 PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
147
148 /* Save the lookup record. */
149 int rc = SSMR3PutMem(pSSM, &rec, sizeof(rec));
150 AssertRCReturn(rc, rc);
151
152 return VINF_SUCCESS;
153}
154
155
156/**
157 * Callback function for RTAvloU32DoWithAll
158 *
159 * Saves the state of the patch that's being enumerated
160 *
161 * @returns VBox status code.
162 * @param pNode Current node
163 * @param pVM1 VM Handle
164 */
165static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
166{
167 PVM pVM = (PVM)pVM1;
168 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
169 PATMPATCHREC patch = *pPatch;
170 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
171 int rc;
172
173 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
174
175 /*
176 * Reset HC pointers that need to be recalculated when loading the state
177 */
178 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
179 ("State = %x pPrivInstrHC=%08x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, patch.patch.pPrivInstrHC, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
180 Assert(pPatch->patch.JumpTree == 0);
181 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
182 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
183
184 memset(&patch.patch.cacheRec, 0, sizeof(patch.patch.cacheRec));
185
186 /* Save the patch record itself */
187 rc = SSMR3PutMem(pSSM, &patch, sizeof(patch));
188 AssertRCReturn(rc, rc);
189
190 /*
191 * Reset HC pointers in fixup records and save them.
192 */
193#ifdef VBOX_STRICT
194 uint32_t nrFixupRecs = 0;
195 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
196 AssertMsg((int32_t)nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
197#endif
198 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
199
200#ifdef VBOX_STRICT
201 uint32_t nrLookupRecords = 0;
202 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
203 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
204#endif
205
206 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
207 return VINF_SUCCESS;
208}
209
210/**
211 * Execute state save operation.
212 *
213 * @returns VBox status code.
214 * @param pVM VM Handle.
215 * @param pSSM SSM operation handle.
216 */
217DECLCALLBACK(int) patmr3Save(PVM pVM, PSSMHANDLE pSSM)
218{
219 PATM patmInfo = pVM->patm.s;
220 int rc;
221
222 pVM->patm.s.savedstate.pSSM = pSSM;
223
224 /*
225 * Reset HC pointers that need to be recalculated when loading the state
226 */
227 patmInfo.pPatchMemHC = NULL;
228 patmInfo.pGCStateHC = 0;
229 patmInfo.pvFaultMonitor = 0;
230
231 Assert(patmInfo.ulCallDepth == 0);
232
233 /*
234 * Count the number of patches in the tree (feeling lazy)
235 */
236 patmInfo.savedstate.cPatches = 0;
237 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
238
239 /*
240 * Save PATM structure
241 */
242 rc = SSMR3PutMem(pSSM, &patmInfo, sizeof(patmInfo));
243 AssertRCReturn(rc, rc);
244
245 /*
246 * Save patch memory contents
247 */
248 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
249 AssertRCReturn(rc, rc);
250
251 /*
252 * Save GC state memory
253 */
254 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
255 AssertRCReturn(rc, rc);
256
257 /*
258 * Save PATM stack page
259 */
260 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
261 AssertRCReturn(rc, rc);
262
263 /*
264 * Save all patches
265 */
266 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
267 AssertRCReturn(rc, rc);
268
269 /** @note patch statistics are not saved. */
270
271 return VINF_SUCCESS;
272}
273
274/**
275 * Execute state load operation.
276 *
277 * @returns VBox status code.
278 * @param pVM VM Handle.
279 * @param pSSM SSM operation handle.
280 * @param u32Version Data layout version.
281 */
282DECLCALLBACK(int) patmr3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
283{
284 PATM patmInfo;
285 int rc;
286
287 if ( u32Version != PATM_SSM_VERSION
288 && u32Version != PATM_SSM_VERSION_VER16
289#ifdef PATM_WITH_NEW_SSM
290 && u32Version != PATM_SSM_VERSION_GETPUTMEM)
291#else
292 )
293#endif
294 {
295 AssertMsgFailed(("patmR3Load: Invalid version u32Version=%d!\n", u32Version));
296 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
297 }
298
299 pVM->patm.s.savedstate.pSSM = pSSM;
300
301 /*
302 * Restore PATM structure
303 */
304#ifdef PATM_WITH_NEW_SSM
305 if (u32Version == PATM_SSM_VERSION_GETPUTMEM)
306 {
307#endif
308 rc = SSMR3GetMem(pSSM, &patmInfo, sizeof(patmInfo));
309 AssertRCReturn(rc, rc);
310#ifdef PATM_WITH_NEW_SSM
311 }
312 else
313 {
314 memset(&patmInfo, 0, sizeof(patmInfo));
315
316 AssertCompile(sizeof(patmInfo.pGCStateGC) == sizeof(RTRCPTR));
317 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStateGC);
318 AssertRCReturn(rc, rc);
319
320 AssertCompile(sizeof(patmInfo.pCPUMCtxGC) == sizeof(RTRCPTR));
321 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pCPUMCtxGC);
322 AssertRCReturn(rc, rc);
323
324 AssertCompile(sizeof(patmInfo.pStatsGC) == sizeof(RTRCPTR));
325 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pStatsGC);
326 AssertRCReturn(rc, rc);
327
328 AssertCompile(sizeof(patmInfo.pfnHelperCallGC) == sizeof(RTRCPTR));
329 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperCallGC);
330 AssertRCReturn(rc, rc);
331
332 AssertCompile(sizeof(patmInfo.pfnHelperRetGC) == sizeof(RTRCPTR));
333 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperRetGC);
334 AssertRCReturn(rc, rc);
335
336 AssertCompile(sizeof(patmInfo.pfnHelperJumpGC) == sizeof(RTRCPTR));
337 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperJumpGC);
338 AssertRCReturn(rc, rc);
339
340 AssertCompile(sizeof(patmInfo.pfnHelperIretGC) == sizeof(RTRCPTR));
341 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnHelperIretGC);
342 AssertRCReturn(rc, rc);
343
344 AssertCompile(sizeof(patmInfo.pPatchMemGC) == sizeof(RTRCPTR));
345 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchMemGC);
346 AssertRCReturn(rc, rc);
347
348 AssertCompile(sizeof(patmInfo.cbPatchMem) == sizeof(uint32_t));
349 rc = SSMR3GetU32(pSSM, &patmInfo.cbPatchMem);
350 AssertRCReturn(rc, rc);
351
352 AssertCompile(sizeof(patmInfo.offPatchMem) == sizeof(uint32_t));
353 rc = SSMR3GetU32(pSSM, &patmInfo.offPatchMem);
354 AssertRCReturn(rc, rc);
355
356 AssertCompile(sizeof(patmInfo.deltaReloc) == sizeof(int32_t));
357 rc = SSMR3GetS32(pSSM, &patmInfo.deltaReloc);
358 AssertRCReturn(rc, rc);
359
360 AssertCompile(sizeof(patmInfo.uCurrentPatchIdx) == sizeof(uint32_t));
361 rc = SSMR3GetS32(pSSM, &patmInfo.uCurrentPatchIdx);
362 AssertRCReturn(rc, rc);
363
364 AssertCompile(sizeof(patmInfo.pPatchedInstrGCLowest) == sizeof(RTRCPTR));
365 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCLowest);
366 AssertRCReturn(rc, rc);
367
368 AssertCompile(sizeof(patmInfo.pPatchedInstrGCHighest) == sizeof(RTRCPTR));
369 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pPatchedInstrGCHighest);
370 AssertRCReturn(rc, rc);
371
372 AssertCompile(sizeof(patmInfo.pfnSysEnterGC) == sizeof(RTRCPTR));
373 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterGC);
374 AssertRCReturn(rc, rc);
375
376 AssertCompile(sizeof(patmInfo.pfnSysEnterPatchGC) == sizeof(RTRCPTR));
377 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pfnSysEnterPatchGC);
378 AssertRCReturn(rc, rc);
379
380 AssertCompile(sizeof(patmInfo.uSysEnterPatchIdx) == sizeof(uint32_t));
381 rc = SSMR3GetU32(pSSM, &patmInfo.uSysEnterPatchIdx);
382 AssertRCReturn(rc, rc);
383
384 AssertCompile(sizeof(patmInfo.ulCallDepth) == sizeof(uint32_t));
385 rc = SSMR3GetU32(pSSM, &patmInfo.ulCallDepth);
386 AssertRCReturn(rc, rc);
387
388 AssertCompile(sizeof(patmInfo.pGCStackGC) == sizeof(RTRCPTR));
389 rc = SSMR3GetRCPtr(pSSM, &patmInfo.pGCStackGC);
390 AssertRCReturn(rc, rc);
391
392 AssertCompile(sizeof(patmInfo.cPageRecords) == sizeof(uint32_t));
393 rc = SSMR3GetU32(pSSM, &patmInfo.cPageRecords);
394 AssertRCReturn(rc, rc);
395
396 AssertCompile(sizeof(patmInfo.fOutOfMemory) == sizeof(bool));
397 rc = SSMR3GetBool(pSSM, &patmInfo.fOutOfMemory);
398 AssertRCReturn(rc, rc);
399
400 AssertCompile(sizeof(patmInfo.savedstate.cPatches) == sizeof(uint32_t));
401 rc = SSMR3GetU32(pSSM, &patmInfo.savedstate.cPatches);
402 AssertRCReturn(rc, rc);
403
404 }
405#endif
406
407 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
408 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
409 if ( (pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC)
410 || (pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC)
411 || (pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC)
412 || (pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC))
413 {
414 AssertMsgFailed(("Helper function ptrs don't match!!!\n"));
415 return VERR_SSM_INVALID_STATE;
416 }
417
418 if (pVM->patm.s.cbPatchMem != patmInfo.cbPatchMem)
419 {
420 AssertMsgFailed(("Patch memory ptrs and/or sizes don't match!!!\n"));
421 return VERR_SSM_INVALID_STATE;
422 }
423 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
424 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
425 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
426 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
427
428 /* Lowest and highest patched instruction */
429 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
430 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
431
432 /* Sysenter handlers */
433 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
434 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
435 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
436
437 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
438
439 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
440 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
441 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
442 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
443
444
445 /** @note patch statistics are not restored. */
446
447 /*
448 * Restore patch memory contents
449 */
450 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
451 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
452 AssertRCReturn(rc, rc);
453
454 /*
455 * Restore GC state memory
456 */
457#ifdef PATM_WITH_NEW_SSM
458 if (u32Version == PATM_SSM_VERSION_GETPUTMEM)
459 {
460#endif
461 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
462 AssertRCReturn(rc, rc);
463#ifdef PATM_WITH_NEW_SSM
464 }
465 else
466 {
467 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uVMFlags) == sizeof(uint32_t));
468 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uVMFlags);
469 AssertRCReturn(rc, rc);
470
471 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPendingAction) == sizeof(uint32_t));
472 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPendingAction);
473 AssertRCReturn(rc, rc);
474
475 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uPatchCalls) == sizeof(uint32_t));
476 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uPatchCalls);
477 AssertRCReturn(rc, rc);
478
479 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uScratch) == sizeof(uint32_t));
480 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uScratch);
481 AssertRCReturn(rc, rc);
482
483 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEFlags) == sizeof(uint32_t));
484 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEFlags);
485 AssertRCReturn(rc, rc);
486
487 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretCS) == sizeof(uint32_t));
488 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretCS);
489 AssertRCReturn(rc, rc);
490
491 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->uIretEIP) == sizeof(uint32_t));
492 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->uIretEIP);
493 AssertRCReturn(rc, rc);
494
495 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Psp) == sizeof(uint32_t));
496 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Psp);
497 AssertRCReturn(rc, rc);
498
499 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->fPIF) == sizeof(uint32_t));
500 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->fPIF);
501 AssertRCReturn(rc, rc);
502
503 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts) == sizeof(RTRCPTR));
504 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts);
505 AssertRCReturn(rc, rc);
506
507 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr) == sizeof(RTRCPTR));
508 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallPatchTargetAddr);
509 AssertRCReturn(rc, rc);
510
511 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->GCCallReturnAddr) == sizeof(RTRCPTR));
512 rc = SSMR3GetRCPtr(pSSM, &pVM->patm.s.pGCStateHC->GCCallReturnAddr);
513 AssertRCReturn(rc, rc);
514
515 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEAX) == sizeof(uint32_t));
516 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEAX);
517 AssertRCReturn(rc, rc);
518
519 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uECX) == sizeof(uint32_t));
520 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uECX);
521 AssertRCReturn(rc, rc);
522
523 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uEDI) == sizeof(uint32_t));
524 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uEDI);
525 AssertRCReturn(rc, rc);
526
527 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.eFlags) == sizeof(uint32_t));
528 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.eFlags);
529 AssertRCReturn(rc, rc);
530
531 AssertCompile(sizeof(pVM->patm.s.pGCStateHC->Restore.uFlags) == sizeof(uint32_t));
532 rc = SSMR3GetU32(pSSM, &pVM->patm.s.pGCStateHC->Restore.uFlags);
533 AssertRCReturn(rc, rc);
534 }
535#endif
536
537 /*
538 * Restore PATM stack page
539 */
540 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
541 AssertRCReturn(rc, rc);
542
543 /*
544 * Load all patches
545 */
546 for (uint32_t i=0;i<patmInfo.savedstate.cPatches;i++)
547 {
548 PATMPATCHREC patch, *pPatchRec;
549
550 rc = SSMR3GetMem(pSSM, &patch, sizeof(patch));
551 AssertRCReturn(rc, rc);
552
553 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
554
555 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
556 if (RT_FAILURE(rc))
557 {
558 AssertMsgFailed(("Out of memory!!!!\n"));
559 return VERR_NO_MEMORY;
560 }
561 /*
562 * Only restore the patch part of the tree record; not the internal data (except the key of course)
563 */
564 pPatchRec->patch = patch.patch;
565 pPatchRec->Core.Key = patch.Core.Key;
566 pPatchRec->CoreOffset.Key = patch.CoreOffset.Key;
567
568 Log(("Restoring patch %RRv -> %RRv\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset));
569 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
570 Assert(ret);
571 if (pPatchRec->patch.uState != PATCH_REFUSED)
572 {
573 if (pPatchRec->patch.pPatchBlockOffset)
574 {
575 /* We actually generated code for this patch. */
576 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
577 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
578 }
579 }
580 /* Set to zero as we don't need it anymore. */
581 pPatchRec->patch.pTempInfo = 0;
582
583 pPatchRec->patch.pPrivInstrHC = 0;
584 /* The GC virtual ptr is fixed, but we must convert it manually again to HC. */
585 rc = PGMPhysGCPtr2R3Ptr(pVM, pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
586 /* Can fail due to page or page table not present. */
587
588 /*
589 * Restore fixup records and correct HC pointers in fixup records
590 */
591 pPatchRec->patch.FixupTree = 0;
592 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
593 for (int i=0;i<patch.patch.nrFixups;i++)
594 {
595 RELOCREC rec;
596 int32_t offset;
597 RTRCPTR *pFixup;
598
599 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
600 AssertRCReturn(rc, rc);
601
602 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
603 offset = (int32_t)(int64_t)rec.pRelocPos;
604 /* Convert to HC pointer again. */
605 PATM_ADD_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
606 pFixup = (RTRCPTR *)rec.pRelocPos;
607
608 if (pPatchRec->patch.uState != PATCH_REFUSED)
609 {
610 if ( rec.uType == FIXUP_REL_JMPTOPATCH
611 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
612 {
613 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
614 unsigned offset = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
615
616 Assert(pPatchRec->patch.pPrivInstrHC);
617 rec.pRelocPos = pPatchRec->patch.pPrivInstrHC + offset;
618 pFixup = (RTRCPTR *)rec.pRelocPos;
619 }
620
621 patmCorrectFixup(pVM, u32Version, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
622 }
623
624 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
625 AssertRCReturn(rc, rc);
626 }
627
628 /* And all patch to guest lookup records */
629 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
630
631 pPatchRec->patch.Patch2GuestAddrTree = 0;
632 pPatchRec->patch.Guest2PatchAddrTree = 0;
633 if (pPatchRec->patch.nrPatch2GuestRecs)
634 {
635 RECPATCHTOGUEST rec;
636 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
637
638 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
639 for (uint32_t i=0;i<nrPatch2GuestRecs;i++)
640 {
641 rc = SSMR3GetMem(pSSM, &rec, sizeof(rec));
642 AssertRCReturn(rc, rc);
643
644 patmr3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
645 }
646 Assert(pPatchRec->patch.Patch2GuestAddrTree);
647 }
648
649 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
650 {
651 /* Insert the guest page lookup records (for detection self-modifying code) */
652 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
653 AssertRCReturn(rc, rc);
654 }
655
656#if 0 /* can fail def LOG_ENABLED */
657 if ( pPatchRec->patch.uState != PATCH_REFUSED
658 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
659 {
660 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
661 Log(("Patch code ----------------------------------------------------------\n"));
662 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
663 Log(("Patch code ends -----------------------------------------------------\n"));
664 MMR3HeapFree(pPatchRec->patch.pTempInfo);
665 pPatchRec->patch.pTempInfo = NULL;
666 }
667#endif
668
669 }
670
671 /*
672 * Correct absolute fixups in the global patch. (helper functions)
673 * Bit of a mess. Uses the new patch record, but restored patch functions.
674 */
675 PRELOCREC pRec = 0;
676 AVLPVKEY key = 0;
677
678 Log(("Correct fixups in global helper functions\n"));
679 while (true)
680 {
681 int32_t offset;
682 RTRCPTR *pFixup;
683
684 /* Get the record that's closest from above */
685 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
686 if (pRec == 0)
687 break;
688
689 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
690
691 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
692 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
693 pFixup = (RTRCPTR *)pRec->pRelocPos;
694
695 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
696 patmCorrectFixup(pVM, u32Version, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
697 }
698
699#ifdef VBOX_WITH_STATISTICS
700 /*
701 * Restore relevant old statistics
702 */
703 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
704 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
705 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
706 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
707#endif
708
709 /* We can't allow a patched VM to be restored when we're currently forced to use VT-x, because another VT-x VM is already running. */
710 if ( PATMIsEnabled(pVM)
711 && HWACCMIsEnabled(pVM))
712 {
713 VM_SET_ERROR(pVM, rc, "An active VM already uses Intel VT-x hardware acceleration. It is not "
714 "allowed to simultaneously use software virtualization.\n");
715 return VERR_ACCESS_DENIED;
716 }
717
718 return VINF_SUCCESS;
719}
720
721/**
722 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
723 *
724 * @returns VBox status code.
725 * @param pVM VM Handle.
726 * @param ulSSMVersion SSM version
727 * @param patmInfo Saved PATM structure
728 * @param pPatch Patch record
729 * @param pRec Relocation record
730 * @param offset Offset of referenced data/code
731 * @param pFixup Fixup address
732 */
733static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup)
734{
735 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
736
737 switch (pRec->uType)
738 {
739 case FIXUP_ABSOLUTE:
740 {
741 if (pRec->pSource && !PATMIsPatchGCAddr(pVM, pRec->pSource))
742 break;
743
744 if ( *pFixup >= patmInfo.pGCStateGC
745 && *pFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
746 {
747 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
748 *pFixup = (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
749 }
750 else
751 if ( *pFixup >= patmInfo.pCPUMCtxGC
752 && *pFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
753 {
754 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
755
756 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
757 if (ulSSMVersion == PATM_SSM_VERSION_VER16)
758 {
759 unsigned uCPUMOffset = *pFixup - patmInfo.pCPUMCtxGC;
760
761 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
762 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
763 * function is not available in older gcc versions, at least not in gcc-3.3 */
764 if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
765 {
766 LogFlow(("Changing dr[0] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[0])));
767 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
768 }
769 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
770 {
771 LogFlow(("Changing dr[1] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[1])));
772 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
773 }
774 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
775 {
776 LogFlow(("Changing dr[2] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[2])));
777 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
778 }
779 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
780 {
781 LogFlow(("Changing dr[3] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[3])));
782 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
783 }
784 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
785 {
786 LogFlow(("Changing dr[4] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[4])));
787 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
788 }
789 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
790 {
791 LogFlow(("Changing dr[5] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[5])));
792 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
793 }
794 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
795 {
796 LogFlow(("Changing dr[6] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[6])));
797 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
798 }
799 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
800 {
801 LogFlow(("Changing dr[7] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[7])));
802 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
803 }
804 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
805 {
806 LogFlow(("Changing cr0 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr0)));
807 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
808 }
809 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
810 {
811 LogFlow(("Changing cr2 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr2)));
812 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
813 }
814 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
815 {
816 LogFlow(("Changing cr3 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr3)));
817 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
818 }
819 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
820 {
821 LogFlow(("Changing cr4 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr4)));
822 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
823 }
824 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
825 {
826 LogFlow(("Changing tr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, tr)));
827 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
828 }
829 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
830 {
831 LogFlow(("Changing ldtr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, ldtr)));
832 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
833 }
834 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
835 {
836 LogFlow(("Changing pGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
837 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
838 }
839 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
840 {
841 LogFlow(("Changing cbGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
842 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
843 }
844 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
845 {
846 LogFlow(("Changing pIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
847 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
848 }
849 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
850 {
851 LogFlow(("Changing cbIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
852 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
853 }
854 else
855 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", uCPUMOffset));
856 }
857 else
858 *pFixup = (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
859 }
860 else
861 if ( *pFixup >= patmInfo.pStatsGC
862 && *pFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
863 {
864 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
865 *pFixup = (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
866 }
867 else
868 if ( *pFixup >= patmInfo.pGCStackGC
869 && *pFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
870 {
871 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
872 *pFixup = (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
873 }
874 else
875 if ( *pFixup >= patmInfo.pPatchMemGC
876 && *pFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
877 {
878 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
879 *pFixup = (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
880 }
881 else
882 /* Note: rather assumptive! */
883 if ( *pFixup >= pVM->pVMRC
884 && *pFixup < pVM->pVMRC + 32)
885 {
886 LogFlow(("Changing fForcedActions fixup from %x to %x\n", *pFixup, pVM->pVMRC + RT_OFFSETOF(VM, fForcedActions)));
887 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, fForcedActions);
888 }
889 else
890 if ( *pFixup >= pVM->pVMRC
891 && *pFixup < pVM->pVMRC + 8192)
892 {
893 static int cCpuidFixup = 0;
894#ifdef LOG_ENABLED
895 RTRCPTR oldFixup = *pFixup;
896#endif
897 /* very dirty assumptions about the cpuid patch and cpuid ordering. */
898 switch(cCpuidFixup & 3)
899 {
900 case 0:
901 *pFixup = CPUMGetGuestCpuIdDefRCPtr(pVM);
902 break;
903 case 1:
904 *pFixup = CPUMGetGuestCpuIdStdRCPtr(pVM);
905 break;
906 case 2:
907 *pFixup = CPUMGetGuestCpuIdExtRCPtr(pVM);
908 break;
909 case 3:
910 *pFixup = CPUMGetGuestCpuIdCentaurRCPtr(pVM);
911 break;
912 }
913 LogFlow(("Changing cpuid fixup %d from %x to %x\n", cCpuidFixup, oldFixup, *pFixup));
914 cCpuidFixup++;
915 }
916 else
917 AssertMsgFailed(("Unexpected fixup value %x\n", *pFixup));
918
919#ifdef RT_OS_WINDOWS
920 AssertCompile(RT_OFFSETOF(VM, fForcedActions) < 32);
921#endif
922 break;
923 }
924
925 case FIXUP_REL_JMPTOPATCH:
926 {
927 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
928
929 if ( pPatch->uState == PATCH_ENABLED
930 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
931 {
932 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
933 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
934 RTRCPTR pJumpOffGC;
935 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
936 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
937
938 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
939
940 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
941#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
942 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
943 {
944 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
945
946 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
947 oldJump[0] = pPatch->aPrivInstr[0];
948 oldJump[1] = pPatch->aPrivInstr[1];
949 *(RTRCUINTPTR *)&oldJump[2] = displOld;
950 }
951 else
952#endif
953 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
954 {
955 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
956 oldJump[0] = 0xE9;
957 *(RTRCUINTPTR *)&oldJump[1] = displOld;
958 }
959 else
960 {
961 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
962 break;
963 }
964 Assert(pPatch->cbPatchJump <= sizeof(temp));
965
966 /*
967 * Read old patch jump and compare it to the one we previously installed
968 */
969 int rc = PGMPhysSimpleReadGCPtr(pVM, temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
970 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
971
972 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
973 {
974 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
975
976 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
977 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
978 }
979 else
980 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
981 {
982 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
983 /*
984 * Disable patch; this is not a good solution
985 */
986 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
987 pPatch->uState = PATCH_DISABLED;
988 }
989 else
990 if (RT_SUCCESS(rc))
991 {
992 rc = PGMPhysSimpleDirtyWriteGCPtr(pVM, pJumpOffGC, &displ, sizeof(displ));
993 AssertRC(rc);
994 }
995 else
996 {
997 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
998 }
999 }
1000 else
1001 {
1002 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->pPrivInstrHC, pRec->pRelocPos));
1003 }
1004
1005 pRec->pDest = pTarget;
1006 break;
1007 }
1008
1009 case FIXUP_REL_JMPTOGUEST:
1010 {
1011 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1012 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1013
1014 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1015 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1016 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1017 pRec->pSource = pSource;
1018 break;
1019
1020 }
1021}
1022}
1023
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette