VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp@ 45457

Last change on this file since 45457 was 45276, checked in by vboxsync, 12 years ago

Ring-1 compression patches, courtesy of trivirt AG:

  • main: diff to remove the hwvirt requirement for QNX
  • rem: diff for dealing with raw ring 0/1 selectors and general changes to allowed guest execution states
  • vmm: changes for using the guest's TSS selector index as our hypervisor TSS selector (makes str safe) (VBOX_WITH_SAFE_STR )
  • vmm: changes for dealing with guest ring 1 code (VBOX_WITH_RAW_RING1)
  • vmm: change to emulate smsw in RC/R0 (QNX uses this old style instruction a lot so going to qemu for emulation is very expensive)
  • vmm: change (hack) to kick out patm virtual handlers in case they conflict with guest GDT/TSS write monitors; we should allow multiple handlers per page, but that change would be rather invasive
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 57.2 KB
Line 
1/* $Id: PATMSSM.cpp 45276 2013-04-02 08:17:11Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2013 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/cpumctx-v1_6.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/ssm.h>
29#include <VBox/param.h>
30#include <iprt/avl.h>
31#include "PATMInternal.h"
32#include "PATMPatch.h"
33#include "PATMA.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/vmm/csam.h>
36#include "internal/pgm.h"
37#include <VBox/dbg.h>
38#include <VBox/err.h>
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <VBox/dis.h>
44#include <VBox/disopcode.h>
45
46/**
47 * Patch information - SSM version.
48 *
49 * the difference is the missing pTrampolinePatchesHead member
50 * to avoid changing the saved state version for now (will come later).
51 */
52typedef struct PATCHINFOSSM
53{
54 uint32_t uState;
55 uint32_t uOldState;
56 DISCPUMODE uOpMode;
57
58 /* GC pointer of privileged instruction */
59 RCPTRTYPE(uint8_t *) pPrivInstrGC;
60 R3PTRTYPE(uint8_t *) unusedHC; /* todo Can't remove due to structure size dependencies in saved states. */
61 uint8_t aPrivInstr[MAX_INSTR_SIZE];
62 uint32_t cbPrivInstr;
63 uint32_t opcode; //opcode for priv instr (OP_*)
64 uint32_t cbPatchJump; //patch jump size
65
66 /* Only valid for PATMFL_JUMP_CONFLICT patches */
67 RTRCPTR pPatchJumpDestGC;
68
69 RTGCUINTPTR32 pPatchBlockOffset;
70 uint32_t cbPatchBlockSize;
71 uint32_t uCurPatchOffset;
72#if HC_ARCH_BITS == 64
73 uint32_t Alignment0; /**< Align flags correctly. */
74#endif
75
76 uint64_t flags;
77
78 /**
79 * Lowest and highest patched GC instruction address. To optimize searches.
80 */
81 RTRCPTR pInstrGCLowest;
82 RTRCPTR pInstrGCHighest;
83
84 /* Tree of fixup records for the patch. */
85 R3PTRTYPE(PAVLPVNODECORE) FixupTree;
86 uint32_t nrFixups;
87
88 /* Tree of jumps inside the generated patch code. */
89 uint32_t nrJumpRecs;
90 R3PTRTYPE(PAVLPVNODECORE) JumpTree;
91
92 /**
93 * Lookup trees for determining the corresponding guest address of an
94 * instruction in the patch block.
95 */
96 R3PTRTYPE(PAVLU32NODECORE) Patch2GuestAddrTree;
97 R3PTRTYPE(PAVLU32NODECORE) Guest2PatchAddrTree;
98 uint32_t nrPatch2GuestRecs;
99#if HC_ARCH_BITS == 64
100 uint32_t Alignment1;
101#endif
102
103 /* Unused, but can't remove due to structure size dependencies in the saved state. */
104 PATMP2GLOOKUPREC_OBSOLETE unused;
105
106 /* Temporary information during patch creation. Don't waste hypervisor memory for this. */
107 R3PTRTYPE(PPATCHINFOTEMP) pTempInfo;
108
109 /* Count the number of writes to the corresponding guest code. */
110 uint32_t cCodeWrites;
111
112 /* Count the number of invalid writes to pages monitored for the patch. */
113 //some statistics to determine if we should keep this patch activated
114 uint32_t cTraps;
115
116 uint32_t cInvalidWrites;
117
118 // Index into the uPatchRun and uPatchTrap arrays (0..MAX_PATCHES-1)
119 uint32_t uPatchIdx;
120
121 /* First opcode byte, that's overwritten when a patch is marked dirty. */
122 uint8_t bDirtyOpcode;
123 uint8_t Alignment2[7]; /**< Align the structure size on a 8-byte boundary. */
124} PATCHINFOSSM, *PPATCHINFOSSM;
125
126/**
127 * Lookup record for patches - SSM version.
128 */
129typedef struct PATMPATCHRECSSM
130{
131 /** The key is a GC virtual address. */
132 AVLOU32NODECORE Core;
133 /** The key is a patch offset. */
134 AVLOU32NODECORE CoreOffset;
135
136 PATCHINFOSSM patch;
137} PATMPATCHRECSSM, *PPATMPATCHRECSSM;
138
139/*******************************************************************************
140* Defined Constants And Macros *
141*******************************************************************************/
142#define PATM_SUBTRACT_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) - (uintptr_t)(b)
143#define PATM_ADD_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) + (uintptr_t)(b)
144
145/*******************************************************************************
146* Internal Functions *
147*******************************************************************************/
148static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup);
149
150/*******************************************************************************
151* Global Variables *
152*******************************************************************************/
153/**
154 * SSM descriptor table for the PATM structure.
155 */
156static SSMFIELD const g_aPatmFields[] =
157{
158 /** @todo there are a bunch more fields here which can be marked as ignored. */
159 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
160 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
161 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
162 SSMFIELD_ENTRY( PATM, cbPatchMem),
163 SSMFIELD_ENTRY( PATM, offPatchMem),
164 SSMFIELD_ENTRY( PATM, fOutOfMemory),
165 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
166 SSMFIELD_ENTRY( PATM, deltaReloc),
167 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
168 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
169 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
170 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
171 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
172 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
173 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
174 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
175 SSMFIELD_ENTRY( PATM, ulCallDepth),
176 SSMFIELD_ENTRY( PATM, cPageRecords),
177 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
178 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
179 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
180 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
181 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
182 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
183 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
184 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
185 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
186 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
187 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
188 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
189 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
190 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
191 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
192 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
193 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
194 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
195 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
196 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
197 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
198 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
199 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
200 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
201 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
202 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
203 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
204 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
205 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
206 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
207 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
208 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
209 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
210 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
211 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
212 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
213 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
214 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
215 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
216 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
217 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
218 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
219 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
220 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
221 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
222 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
223 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
224 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
225 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
226 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
227 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
228 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
229 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
230 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
231 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
232 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
233 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
234 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
235 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
236 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
237 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
238 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
239 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
240 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
241 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
242 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
243 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
244 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
245 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
246 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
247 SSMFIELD_ENTRY_TERM()
248};
249
250/**
251 * SSM descriptor table for the PATMGCSTATE structure.
252 */
253static SSMFIELD const g_aPatmGCStateFields[] =
254{
255 SSMFIELD_ENTRY( PATMGCSTATE, uVMFlags),
256 SSMFIELD_ENTRY( PATMGCSTATE, uPendingAction),
257 SSMFIELD_ENTRY( PATMGCSTATE, uPatchCalls),
258 SSMFIELD_ENTRY( PATMGCSTATE, uScratch),
259 SSMFIELD_ENTRY( PATMGCSTATE, uIretEFlags),
260 SSMFIELD_ENTRY( PATMGCSTATE, uIretCS),
261 SSMFIELD_ENTRY( PATMGCSTATE, uIretEIP),
262 SSMFIELD_ENTRY( PATMGCSTATE, Psp),
263 SSMFIELD_ENTRY( PATMGCSTATE, fPIF),
264 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCPtrInhibitInterrupts),
265 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallPatchTargetAddr),
266 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallReturnAddr),
267 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEAX),
268 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uECX),
269 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEDI),
270 SSMFIELD_ENTRY( PATMGCSTATE, Restore.eFlags),
271 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uFlags),
272 SSMFIELD_ENTRY_TERM()
273};
274
275/**
276 * SSM descriptor table for the PATMPATCHREC structure.
277 */
278static SSMFIELD const g_aPatmPatchRecFields[] =
279{
280 SSMFIELD_ENTRY( PATMPATCHRECSSM, Core.Key),
281 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pLeft),
282 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pRight),
283 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.uchHeight),
284 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
285 SSMFIELD_ENTRY( PATMPATCHRECSSM, CoreOffset.Key),
286 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pLeft),
287 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pRight),
288 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.uchHeight),
289 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
290 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uState),
291 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOldState),
292 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOpMode),
293 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPrivInstrGC),
294 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unusedHC),
295 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.aPrivInstr),
296 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPrivInstr),
297 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.opcode),
298 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchJump),
299 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPatchJumpDestGC),
300 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.pPatchBlockOffset),
301 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchBlockSize),
302 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uCurPatchOffset),
303 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment0, sizeof(uint32_t)),
304 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.flags),
305 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCLowest),
306 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCHighest),
307 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.FixupTree),
308 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrFixups),
309 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrJumpRecs), // should be zero?
310 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.JumpTree),
311 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Patch2GuestAddrTree),
312 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Guest2PatchAddrTree),
313 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrPatch2GuestRecs),
314 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment1, sizeof(uint32_t)),
315 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocStartHC), // saved as zero
316 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocEndHC), // ditto
317 SSMFIELD_ENTRY_IGN_RCPTR( PATMPATCHRECSSM, patch.unused.pGuestLoc), // ditto
318 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.unused.opsize), // ditto
319 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.pTempInfo),
320 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cCodeWrites),
321 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cTraps),
322 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cInvalidWrites),
323 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uPatchIdx),
324 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.bDirtyOpcode),
325 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.Alignment2),
326 SSMFIELD_ENTRY_TERM()
327};
328
329/**
330 * SSM descriptor table for the RELOCREC structure.
331 */
332static SSMFIELD const g_aPatmRelocRec[] =
333{
334 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, Core.Key), // Used to store the relocation type
335 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pLeft),
336 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pRight),
337 SSMFIELD_ENTRY_IGNORE( RELOCREC, Core.uchHeight),
338 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
339 SSMFIELD_ENTRY( RELOCREC, uType),
340 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
341 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, pRelocPos), // converted to a patch member offset.
342 SSMFIELD_ENTRY_RCPTR( RELOCREC, pSource),
343 SSMFIELD_ENTRY_RCPTR( RELOCREC, pDest),
344 SSMFIELD_ENTRY_TERM()
345};
346
347/**
348 * SSM descriptor table for the RECPATCHTOGUEST structure.
349 */
350static SSMFIELD const g_aPatmRecPatchToGuest[] =
351{
352 SSMFIELD_ENTRY( RECPATCHTOGUEST, Core.Key),
353 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
354 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pLeft),
355 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pRight),
356 SSMFIELD_ENTRY_IGNORE( RECPATCHTOGUEST, Core.uchHeight),
357 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
358 SSMFIELD_ENTRY_RCPTR( RECPATCHTOGUEST, pOrgInstrGC),
359 SSMFIELD_ENTRY( RECPATCHTOGUEST, enmType),
360 SSMFIELD_ENTRY( RECPATCHTOGUEST, fDirty),
361 SSMFIELD_ENTRY( RECPATCHTOGUEST, fJumpTarget),
362 SSMFIELD_ENTRY( RECPATCHTOGUEST, u8DirtyOpcode),
363 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 5),
364 SSMFIELD_ENTRY_TERM()
365};
366
367#ifdef VBOX_STRICT
368
369/**
370 * Callback function for RTAvlPVDoWithAll
371 *
372 * Counts the number of patches in the tree
373 *
374 * @returns VBox status code.
375 * @param pNode Current node
376 * @param pcPatches Pointer to patch counter (uint32_t)
377 */
378static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
379{
380 NOREF(pNode);
381 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
382 return VINF_SUCCESS;
383}
384
385/**
386 * Callback function for RTAvlU32DoWithAll
387 *
388 * Counts the number of patches in the tree
389 *
390 * @returns VBox status code.
391 * @param pNode Current node
392 * @param pcPatches Pointer to patch counter (uint32_t)
393 */
394static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
395{
396 NOREF(pNode);
397 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
398 return VINF_SUCCESS;
399}
400
401#endif /* VBOX_STRICT */
402
403/**
404 * Callback function for RTAvloU32DoWithAll
405 *
406 * Counts the number of patches in the tree
407 *
408 * @returns VBox status code.
409 * @param pNode Current node
410 * @param pcPatches Pointer to patch counter
411 */
412static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
413{
414 NOREF(pNode);
415 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
416 return VINF_SUCCESS;
417}
418
419/**
420 * Callback function for RTAvlU32DoWithAll
421 *
422 * Saves all patch to guest lookup records.
423 *
424 * @returns VBox status code.
425 * @param pNode Current node
426 * @param pVM1 Pointer to the VM
427 */
428static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
429{
430 PVM pVM = (PVM)pVM1;
431 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
432 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
433
434 /* Save the lookup record. */
435 int rc = SSMR3PutMem(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST));
436 AssertRCReturn(rc, rc);
437
438 return VINF_SUCCESS;
439}
440
441/**
442 * Callback function for RTAvlPVDoWithAll
443 *
444 * Saves all patch to guest lookup records.
445 *
446 * @returns VBox status code.
447 * @param pNode Current node
448 * @param pVM1 Pointer to the VM
449 */
450static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
451{
452 PVM pVM = (PVM)pVM1;
453 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
454 RELOCREC rec = *(PRELOCREC)pNode;
455 RTRCPTR *pFixup = (RTRCPTR *)rec.pRelocPos;
456
457 Assert(rec.pRelocPos);
458 /* Convert pointer to an offset into patch memory. */
459 PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
460
461 if (rec.uType == FIXUP_ABSOLUTE)
462 {
463 /* Core.Key abused to store the fixup type. */
464 if (*pFixup == pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))
465 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPU_FF_ACTION;
466 else
467 if (*pFixup == CPUMR3GetGuestCpuIdDefRCPtr(pVM))
468 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_DEFAULT;
469 else
470 if (*pFixup == CPUMR3GetGuestCpuIdStdRCPtr(pVM))
471 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_STANDARD;
472 else
473 if (*pFixup == CPUMR3GetGuestCpuIdExtRCPtr(pVM))
474 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_EXTENDED;
475 else
476 if (*pFixup == CPUMR3GetGuestCpuIdCentaurRCPtr(pVM))
477 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_CENTAUR;
478 }
479
480 /* Save the lookup record. */
481 int rc = SSMR3PutMem(pSSM, &rec, sizeof(rec));
482 AssertRCReturn(rc, rc);
483
484 return VINF_SUCCESS;
485}
486
487/**
488 * Converts a saved state patch record to the memory record.
489 *
490 * @returns nothing.
491 * @param pPatch The memory record.
492 * @param pPatchSSM The SSM version of the patch record.
493 */
494static void patmR3PatchConvertSSM2Mem(PPATMPATCHREC pPatch, PPATMPATCHRECSSM pPatchSSM)
495{
496 /*
497 * Only restore the patch part of the tree record; not the internal data (except the key of course)
498 */
499 pPatch->Core.Key = pPatchSSM->Core.Key;
500 pPatch->CoreOffset.Key = pPatchSSM->CoreOffset.Key;
501 pPatch->patch.uState = pPatchSSM->patch.uState;
502 pPatch->patch.uOldState = pPatchSSM->patch.uOldState;
503 pPatch->patch.uOpMode = pPatchSSM->patch.uOpMode;
504 pPatch->patch.pPrivInstrGC = pPatchSSM->patch.pPrivInstrGC;
505 pPatch->patch.unusedHC = pPatchSSM->patch.unusedHC;
506 memcpy(&pPatch->patch.aPrivInstr[0], &pPatchSSM->patch.aPrivInstr[0], MAX_INSTR_SIZE);
507 pPatch->patch.cbPrivInstr = pPatchSSM->patch.cbPrivInstr;
508 pPatch->patch.opcode = pPatchSSM->patch.opcode;
509 pPatch->patch.cbPatchJump = pPatchSSM->patch.cbPatchJump;
510 pPatch->patch.pPatchJumpDestGC = pPatchSSM->patch.pPatchJumpDestGC;
511 pPatch->patch.pPatchBlockOffset = pPatchSSM->patch.pPatchBlockOffset;
512 pPatch->patch.cbPatchBlockSize = pPatchSSM->patch.cbPatchBlockSize;
513 pPatch->patch.uCurPatchOffset = pPatchSSM->patch.uCurPatchOffset;
514 pPatch->patch.flags = pPatchSSM->patch.flags;
515 pPatch->patch.pInstrGCLowest = pPatchSSM->patch.pInstrGCLowest;
516 pPatch->patch.pInstrGCHighest = pPatchSSM->patch.pInstrGCHighest;
517 pPatch->patch.FixupTree = pPatchSSM->patch.FixupTree;
518 pPatch->patch.nrFixups = pPatchSSM->patch.nrFixups;
519 pPatch->patch.nrJumpRecs = pPatchSSM->patch.nrJumpRecs;
520 pPatch->patch.JumpTree = pPatchSSM->patch.JumpTree;
521 pPatch->patch.Patch2GuestAddrTree = pPatchSSM->patch.Patch2GuestAddrTree;
522 pPatch->patch.Guest2PatchAddrTree = pPatchSSM->patch.Guest2PatchAddrTree;
523 pPatch->patch.nrPatch2GuestRecs = pPatchSSM->patch.nrPatch2GuestRecs;
524 pPatch->patch.unused = pPatchSSM->patch.unused;
525 pPatch->patch.pTempInfo = pPatchSSM->patch.pTempInfo;
526 pPatch->patch.cCodeWrites = pPatchSSM->patch.cCodeWrites;
527 pPatch->patch.cTraps = pPatchSSM->patch.cTraps;
528 pPatch->patch.cInvalidWrites = pPatchSSM->patch.cInvalidWrites;
529 pPatch->patch.uPatchIdx = pPatchSSM->patch.uPatchIdx;
530 pPatch->patch.bDirtyOpcode = pPatchSSM->patch.bDirtyOpcode;
531 pPatch->patch.pTrampolinePatchesHead = NULL;
532}
533
534/**
535 * Converts a memory patch record to the saved state version.
536 *
537 * @returns nothing.
538 * @param pPatchSSM The saved state record.
539 * @param pPatch The memory version to save.
540 */
541static void patmR3PatchConvertMem2SSM(PPATMPATCHRECSSM pPatchSSM, PPATMPATCHREC pPatch)
542{
543 pPatchSSM->Core = pPatch->Core;
544 pPatchSSM->CoreOffset = pPatch->CoreOffset;
545 pPatchSSM->patch.uState = pPatch->patch.uState;
546 pPatchSSM->patch.uOldState = pPatch->patch.uOldState;
547 pPatchSSM->patch.uOpMode = pPatch->patch.uOpMode;
548 pPatchSSM->patch.pPrivInstrGC = pPatch->patch.pPrivInstrGC;
549 pPatchSSM->patch.unusedHC = pPatch->patch.unusedHC;
550 memcpy(&pPatchSSM->patch.aPrivInstr[0], &pPatch->patch.aPrivInstr[0], MAX_INSTR_SIZE);
551 pPatchSSM->patch.cbPrivInstr = pPatch->patch.cbPrivInstr;
552 pPatchSSM->patch.opcode = pPatch->patch.opcode;
553 pPatchSSM->patch.cbPatchJump = pPatch->patch.cbPatchJump;
554 pPatchSSM->patch.pPatchJumpDestGC = pPatch->patch.pPatchJumpDestGC;
555 pPatchSSM->patch.pPatchBlockOffset = pPatch->patch.pPatchBlockOffset;
556 pPatchSSM->patch.cbPatchBlockSize = pPatch->patch.cbPatchBlockSize;
557 pPatchSSM->patch.uCurPatchOffset = pPatch->patch.uCurPatchOffset;
558 pPatchSSM->patch.flags = pPatch->patch.flags;
559 pPatchSSM->patch.pInstrGCLowest = pPatch->patch.pInstrGCLowest;
560 pPatchSSM->patch.pInstrGCHighest = pPatch->patch.pInstrGCHighest;
561 pPatchSSM->patch.FixupTree = pPatch->patch.FixupTree;
562 pPatchSSM->patch.nrFixups = pPatch->patch.nrFixups;
563 pPatchSSM->patch.nrJumpRecs = pPatch->patch.nrJumpRecs;
564 pPatchSSM->patch.JumpTree = pPatch->patch.JumpTree;
565 pPatchSSM->patch.Patch2GuestAddrTree = pPatch->patch.Patch2GuestAddrTree;
566 pPatchSSM->patch.Guest2PatchAddrTree = pPatch->patch.Guest2PatchAddrTree;
567 pPatchSSM->patch.nrPatch2GuestRecs = pPatch->patch.nrPatch2GuestRecs;
568 pPatchSSM->patch.unused = pPatch->patch.unused;
569 pPatchSSM->patch.pTempInfo = pPatch->patch.pTempInfo;
570 pPatchSSM->patch.cCodeWrites = pPatch->patch.cCodeWrites;
571 pPatchSSM->patch.cTraps = pPatch->patch.cTraps;
572 pPatchSSM->patch.cInvalidWrites = pPatch->patch.cInvalidWrites;
573 pPatchSSM->patch.uPatchIdx = pPatch->patch.uPatchIdx;
574 pPatchSSM->patch.bDirtyOpcode = pPatch->patch.bDirtyOpcode;
575}
576
577/**
578 * Callback function for RTAvloU32DoWithAll
579 *
580 * Saves the state of the patch that's being enumerated
581 *
582 * @returns VBox status code.
583 * @param pNode Current node
584 * @param pVM1 Pointer to the VM
585 */
586static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
587{
588 PVM pVM = (PVM)pVM1;
589 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
590 PATMPATCHRECSSM patch;
591 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
592 int rc;
593
594 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
595
596 patmR3PatchConvertMem2SSM(&patch, pPatch);
597
598 /*
599 * Reset HC pointers that need to be recalculated when loading the state
600 */
601 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
602 ("State = %x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
603 Assert(pPatch->patch.JumpTree == 0);
604 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
605 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
606
607 /* Save the patch record itself */
608 rc = SSMR3PutMem(pSSM, &patch, sizeof(patch));
609 AssertRCReturn(rc, rc);
610
611 /*
612 * Reset HC pointers in fixup records and save them.
613 */
614#ifdef VBOX_STRICT
615 uint32_t nrFixupRecs = 0;
616 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
617 AssertMsg(nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
618#endif
619 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
620
621#ifdef VBOX_STRICT
622 uint32_t nrLookupRecords = 0;
623 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
624 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
625#endif
626
627 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
628 return VINF_SUCCESS;
629}
630
631/**
632 * Execute state save operation.
633 *
634 * @returns VBox status code.
635 * @param pVM Pointer to the VM.
636 * @param pSSM SSM operation handle.
637 */
638DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
639{
640 PATM patmInfo = pVM->patm.s;
641 int rc;
642
643 pVM->patm.s.savedstate.pSSM = pSSM;
644
645 /*
646 * Reset HC pointers that need to be recalculated when loading the state
647 */
648 patmInfo.pPatchMemHC = NULL;
649 patmInfo.pGCStateHC = 0;
650 patmInfo.pvFaultMonitor = 0;
651
652 Assert(patmInfo.ulCallDepth == 0);
653
654 /*
655 * Count the number of patches in the tree (feeling lazy)
656 */
657 patmInfo.savedstate.cPatches = 0;
658 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
659
660 /*
661 * Save PATM structure
662 */
663 rc = SSMR3PutMem(pSSM, &patmInfo, sizeof(patmInfo));
664 AssertRCReturn(rc, rc);
665
666 /*
667 * Save patch memory contents
668 */
669 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
670 AssertRCReturn(rc, rc);
671
672 /*
673 * Save GC state memory
674 */
675 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
676 AssertRCReturn(rc, rc);
677
678 /*
679 * Save PATM stack page
680 */
681 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
682 AssertRCReturn(rc, rc);
683
684 /*
685 * Save all patches
686 */
687 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
688 AssertRCReturn(rc, rc);
689
690 /** @note patch statistics are not saved. */
691
692 return VINF_SUCCESS;
693}
694
695/**
696 * Execute state load operation.
697 *
698 * @returns VBox status code.
699 * @param pVM Pointer to the VM.
700 * @param pSSM SSM operation handle.
701 * @param uVersion Data layout version.
702 * @param uPass The data pass.
703 */
704DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
705{
706 PATM patmInfo;
707 int rc;
708
709 if ( uVersion != PATM_SSM_VERSION
710 && uVersion != PATM_SSM_VERSION_FIXUP_HACK
711 && uVersion != PATM_SSM_VERSION_VER16
712 )
713 {
714 AssertMsgFailed(("patmR3Load: Invalid version uVersion=%d!\n", uVersion));
715 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
716 }
717 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
718
719 pVM->patm.s.savedstate.pSSM = pSSM;
720
721 /*
722 * Restore PATM structure
723 */
724 RT_ZERO(patmInfo);
725 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmFields[0], NULL);
726 AssertRCReturn(rc, rc);
727
728 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
729 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
730 if ( (pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC)
731 || (pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC)
732 || (pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC)
733 || (pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) != (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC))
734 {
735 AssertMsgFailed(("Helper function ptrs don't match!!!\n"));
736 return VERR_SSM_INVALID_STATE;
737 }
738
739 if (pVM->patm.s.cbPatchMem != patmInfo.cbPatchMem)
740 {
741 AssertMsgFailed(("Patch memory ptrs and/or sizes don't match!!!\n"));
742 return VERR_SSM_INVALID_STATE;
743 }
744 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
745 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
746 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
747 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
748
749 /* Lowest and highest patched instruction */
750 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
751 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
752
753 /* Sysenter handlers */
754 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
755 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
756 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
757
758 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
759
760 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
761 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
762 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
763 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
764
765
766 /** @note patch statistics are not restored. */
767
768 /*
769 * Restore patch memory contents
770 */
771 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
772 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
773 AssertRCReturn(rc, rc);
774
775 /*
776 * Restore GC state memory
777 */
778 RT_BZERO(pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
779 rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmGCStateFields[0], NULL);
780 AssertRCReturn(rc, rc);
781
782 /*
783 * Restore PATM stack page
784 */
785 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
786 AssertRCReturn(rc, rc);
787
788 /*
789 * Load all patches
790 */
791 for (unsigned i = 0; i < patmInfo.savedstate.cPatches; i++)
792 {
793 PATMPATCHRECSSM patch;
794 PATMPATCHREC *pPatchRec;
795
796 RT_ZERO(patch);
797 rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmPatchRecFields[0], NULL);
798 AssertRCReturn(rc, rc);
799
800 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
801
802 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
803 if (RT_FAILURE(rc))
804 {
805 AssertMsgFailed(("Out of memory!!!!\n"));
806 return VERR_NO_MEMORY;
807 }
808
809 /* Convert SSM version to memory. */
810 patmR3PatchConvertSSM2Mem(pPatchRec, &patch);
811
812 Log(("Restoring patch %RRv -> %RRv state %x\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset, pPatchRec->patch.uState));
813 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
814 Assert(ret);
815 if (pPatchRec->patch.uState != PATCH_REFUSED)
816 {
817 if (pPatchRec->patch.pPatchBlockOffset)
818 {
819 /* We actually generated code for this patch. */
820 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
821 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
822 }
823 }
824 /* Set to zero as we don't need it anymore. */
825 pPatchRec->patch.pTempInfo = 0;
826
827 PATMP2GLOOKUPREC cacheRec;
828 RT_ZERO(cacheRec);
829 cacheRec.pPatch = &pPatchRec->patch;
830
831 uint8_t *pPrivInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pPatchRec->patch.pPrivInstrGC);
832 /* Can fail due to page or page table not present. */
833
834 /*
835 * Restore fixup records and correct HC pointers in fixup records
836 */
837 pPatchRec->patch.FixupTree = 0;
838 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
839 for (unsigned j = 0; j < patch.patch.nrFixups; j++)
840 {
841 RELOCREC rec;
842 int32_t offset;
843 RTRCPTR *pFixup;
844
845 RT_ZERO(rec);
846 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRelocRec[0], NULL);
847 AssertRCReturn(rc, rc);
848
849 if (pPrivInstrHC)
850 {
851 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
852 offset = (int32_t)(intptr_t)rec.pRelocPos;
853 /* Convert to HC pointer again. */
854 PATM_ADD_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
855 pFixup = (RTRCPTR *)rec.pRelocPos;
856
857 if (pPatchRec->patch.uState != PATCH_REFUSED)
858 {
859 if ( rec.uType == FIXUP_REL_JMPTOPATCH
860 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
861 {
862 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
863 unsigned offset2 = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
864
865 rec.pRelocPos = pPrivInstrHC + offset2;
866 pFixup = (RTRCPTR *)rec.pRelocPos;
867 }
868
869 patmCorrectFixup(pVM, uVersion, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
870 }
871
872 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
873 AssertRCReturn(rc, rc);
874 }
875 }
876 /* Release previous lock if any. */
877 if (cacheRec.Lock.pvMap)
878 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
879
880 /* And all patch to guest lookup records */
881 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
882
883 pPatchRec->patch.Patch2GuestAddrTree = 0;
884 pPatchRec->patch.Guest2PatchAddrTree = 0;
885 if (pPatchRec->patch.nrPatch2GuestRecs)
886 {
887 RECPATCHTOGUEST rec;
888 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
889
890 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
891 for (uint32_t j=0;j<nrPatch2GuestRecs;j++)
892 {
893 RT_ZERO(rec);
894 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aPatmRecPatchToGuest[0], NULL);
895 AssertRCReturn(rc, rc);
896
897 patmR3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
898 }
899 Assert(pPatchRec->patch.Patch2GuestAddrTree);
900 }
901
902 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
903 {
904 /* Insert the guest page lookup records (for detection self-modifying code) */
905 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
906 AssertRCReturn(rc, rc);
907 }
908
909#if 0 /* can fail def LOG_ENABLED */
910 if ( pPatchRec->patch.uState != PATCH_REFUSED
911 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
912 {
913 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
914 Log(("Patch code ----------------------------------------------------------\n"));
915 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
916 Log(("Patch code ends -----------------------------------------------------\n"));
917 MMR3HeapFree(pPatchRec->patch.pTempInfo);
918 pPatchRec->patch.pTempInfo = NULL;
919 }
920#endif
921 /* Remove the patch in case the gc mapping is not present. */
922 if ( !pPrivInstrHC
923 && pPatchRec->patch.uState == PATCH_ENABLED)
924 {
925 Log(("Remove patch %RGv due to failed HC address translation\n", pPatchRec->patch.pPrivInstrGC));
926 PATMR3RemovePatch(pVM, pPatchRec->patch.pPrivInstrGC);
927 }
928 }
929
930 /*
931 * Correct absolute fixups in the global patch. (helper functions)
932 * Bit of a mess. Uses the new patch record, but restored patch functions.
933 */
934 PRELOCREC pRec = 0;
935 AVLPVKEY key = 0;
936
937 Log(("Correct fixups in global helper functions\n"));
938 while (true)
939 {
940 int32_t offset;
941 RTRCPTR *pFixup;
942
943 /* Get the record that's closest from above */
944 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
945 if (pRec == 0)
946 break;
947
948 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
949
950 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
951 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
952 pFixup = (RTRCPTR *)pRec->pRelocPos;
953
954 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
955 patmCorrectFixup(pVM, uVersion, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
956 }
957
958#ifdef VBOX_WITH_STATISTICS
959 /*
960 * Restore relevant old statistics
961 */
962 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
963 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
964 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
965 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
966#endif
967
968 return VINF_SUCCESS;
969}
970
971/**
972 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
973 *
974 * @returns VBox status code.
975 * @param pVM Pointer to the VM.
976 * @param ulSSMVersion SSM version
977 * @param patmInfo Saved PATM structure
978 * @param pPatch Patch record
979 * @param pRec Relocation record
980 * @param offset Offset of referenced data/code
981 * @param pFixup Fixup address
982 */
983static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup)
984{
985 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
986
987 switch (pRec->uType)
988 {
989 case FIXUP_ABSOLUTE:
990 {
991 if (pRec->pSource && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource))
992 break;
993
994 if ( *pFixup >= patmInfo.pGCStateGC
995 && *pFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
996 {
997 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
998 *pFixup = (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
999 }
1000 else
1001 if ( *pFixup >= patmInfo.pCPUMCtxGC
1002 && *pFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
1003 {
1004 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
1005
1006 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
1007 if (ulSSMVersion == PATM_SSM_VERSION_VER16)
1008 {
1009 unsigned uCPUMOffset = *pFixup - patmInfo.pCPUMCtxGC;
1010
1011 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
1012 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
1013 * function is not available in older gcc versions, at least not in gcc-3.3 */
1014 if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
1015 {
1016 LogFlow(("Changing dr[0] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[0])));
1017 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
1018 }
1019 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
1020 {
1021 LogFlow(("Changing dr[1] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[1])));
1022 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
1023 }
1024 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
1025 {
1026 LogFlow(("Changing dr[2] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[2])));
1027 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
1028 }
1029 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
1030 {
1031 LogFlow(("Changing dr[3] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[3])));
1032 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
1033 }
1034 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
1035 {
1036 LogFlow(("Changing dr[4] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[4])));
1037 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
1038 }
1039 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
1040 {
1041 LogFlow(("Changing dr[5] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[5])));
1042 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
1043 }
1044 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
1045 {
1046 LogFlow(("Changing dr[6] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[6])));
1047 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
1048 }
1049 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
1050 {
1051 LogFlow(("Changing dr[7] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[7])));
1052 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
1053 }
1054 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
1055 {
1056 LogFlow(("Changing cr0 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr0)));
1057 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
1058 }
1059 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
1060 {
1061 LogFlow(("Changing cr2 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr2)));
1062 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
1063 }
1064 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
1065 {
1066 LogFlow(("Changing cr3 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr3)));
1067 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
1068 }
1069 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
1070 {
1071 LogFlow(("Changing cr4 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr4)));
1072 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
1073 }
1074 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
1075 {
1076 LogFlow(("Changing tr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, tr)));
1077 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1078 }
1079 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
1080 {
1081 LogFlow(("Changing ldtr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, ldtr)));
1082 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1083 }
1084 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
1085 {
1086 LogFlow(("Changing pGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
1087 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1088 }
1089 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
1090 {
1091 LogFlow(("Changing cbGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
1092 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1093 }
1094 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
1095 {
1096 LogFlow(("Changing pIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
1097 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1098 }
1099 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
1100 {
1101 LogFlow(("Changing cbIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
1102 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1103 }
1104 else
1105 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", uCPUMOffset));
1106 }
1107 else
1108 *pFixup = (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
1109 }
1110 else
1111 if ( *pFixup >= patmInfo.pStatsGC
1112 && *pFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
1113 {
1114 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
1115 *pFixup = (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
1116 }
1117 else
1118 if ( *pFixup >= patmInfo.pGCStackGC
1119 && *pFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
1120 {
1121 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
1122 *pFixup = (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
1123 }
1124 else
1125 if ( *pFixup >= patmInfo.pPatchMemGC
1126 && *pFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
1127 {
1128 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
1129 *pFixup = (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
1130 }
1131 else
1132 /* Boldly ASSUMES:
1133 * 1. That pCPUMCtxGC is in the VM structure and that its location is
1134 * at the first page of the same 4 MB chunk.
1135 * 2. That the forced actions were in the first 32 bytes of the VM
1136 * structure.
1137 * 3. That the CPUM leafs are less than 8KB into the structure. */
1138 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1139 && *pFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(32))
1140 {
1141 LogFlow(("Changing fLocalForcedActions fixup from %RRv to %RRv\n", *pFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
1142 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1143 }
1144 else
1145 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1146 && *pFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(8192))
1147 {
1148 static int cCpuidFixup = 0;
1149#ifdef LOG_ENABLED
1150 RTRCPTR oldFixup = *pFixup;
1151#endif
1152 /* very dirty assumptions about the cpuid patch and cpuid ordering. */
1153 switch(cCpuidFixup & 3)
1154 {
1155 case 0:
1156 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1157 break;
1158 case 1:
1159 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1160 break;
1161 case 2:
1162 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1163 break;
1164 case 3:
1165 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1166 break;
1167 }
1168 LogFlow(("Changing cpuid fixup %d from %RRv to %RRv\n", cCpuidFixup, oldFixup, *pFixup));
1169 cCpuidFixup++;
1170 }
1171 else
1172 if (ulSSMVersion >= PATM_SSM_VERSION)
1173 {
1174#ifdef LOG_ENABLED
1175 RTRCPTR oldFixup = *pFixup;
1176#endif
1177 /* Core.Key abused to store the type of fixup */
1178 switch ((uintptr_t)pRec->Core.Key)
1179 {
1180 case PATM_FIXUP_CPU_FF_ACTION:
1181 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1182 LogFlow(("Changing cpu ff action fixup from %x to %x\n", oldFixup, *pFixup));
1183 break;
1184 case PATM_FIXUP_CPUID_DEFAULT:
1185 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1186 LogFlow(("Changing cpuid def fixup from %x to %x\n", oldFixup, *pFixup));
1187 break;
1188 case PATM_FIXUP_CPUID_STANDARD:
1189 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1190 LogFlow(("Changing cpuid std fixup from %x to %x\n", oldFixup, *pFixup));
1191 break;
1192 case PATM_FIXUP_CPUID_EXTENDED:
1193 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1194 LogFlow(("Changing cpuid ext fixup from %x to %x\n", oldFixup, *pFixup));
1195 break;
1196 case PATM_FIXUP_CPUID_CENTAUR:
1197 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1198 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", oldFixup, *pFixup));
1199 break;
1200 default:
1201 AssertMsgFailed(("Unexpected fixup value %x\n", *pFixup));
1202 break;
1203 }
1204 }
1205
1206#ifdef RT_OS_WINDOWS
1207 AssertCompile(RT_OFFSETOF(VM, fGlobalForcedActions) < 32);
1208#endif
1209 break;
1210 }
1211
1212 case FIXUP_REL_JMPTOPATCH:
1213 {
1214 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
1215
1216 if ( pPatch->uState == PATCH_ENABLED
1217 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
1218 {
1219 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
1220 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
1221 RTRCPTR pJumpOffGC;
1222 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
1223 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
1224
1225 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1226
1227 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
1228#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
1229 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
1230 {
1231 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
1232
1233 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
1234 oldJump[0] = pPatch->aPrivInstr[0];
1235 oldJump[1] = pPatch->aPrivInstr[1];
1236 *(RTRCUINTPTR *)&oldJump[2] = displOld;
1237 }
1238 else
1239#endif
1240 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
1241 {
1242 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
1243 oldJump[0] = 0xE9;
1244 *(RTRCUINTPTR *)&oldJump[1] = displOld;
1245 }
1246 else
1247 {
1248 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
1249 break;
1250 }
1251 Assert(pPatch->cbPatchJump <= sizeof(temp));
1252
1253 /*
1254 * Read old patch jump and compare it to the one we previously installed
1255 */
1256 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
1257 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1258
1259 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1260 {
1261 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
1262
1263 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
1264 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
1265 }
1266 else
1267 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
1268 {
1269 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
1270 /*
1271 * Disable patch; this is not a good solution
1272 */
1273 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
1274 pPatch->uState = PATCH_DISABLED;
1275 }
1276 else
1277 if (RT_SUCCESS(rc))
1278 {
1279 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
1280 AssertRC(rc);
1281 }
1282 else
1283 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
1284 }
1285 else
1286 Log(("Skip the guest jump to patch code for this disabled patch %08X\n", pRec->pRelocPos));
1287
1288 pRec->pDest = pTarget;
1289 break;
1290 }
1291
1292 case FIXUP_REL_JMPTOGUEST:
1293 {
1294 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1295 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1296
1297 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1298 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1299 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1300 pRec->pSource = pSource;
1301 break;
1302
1303 }
1304}
1305}
1306
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette