VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp@ 61744

Last change on this file since 61744 was 58122, checked in by vboxsync, 9 years ago

VMM: Made @param pVM more uniform and to the point.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 71.7 KB
Line 
1/* $Id: PATMSSM.cpp 58122 2015-10-08 17:11:58Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2015 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20
21/*********************************************************************************************************************************
22* Header Files *
23*********************************************************************************************************************************/
24#define LOG_GROUP LOG_GROUP_PATM
25#include <VBox/vmm/patm.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumctx-v1_6.h>
29#include <VBox/vmm/mm.h>
30#include <VBox/vmm/ssm.h>
31#include <VBox/param.h>
32#include <iprt/avl.h>
33#include "PATMInternal.h"
34#include "PATMPatch.h"
35#include "PATMA.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/csam.h>
38#include "internal/pgm.h"
39#include <VBox/dbg.h>
40#include <VBox/err.h>
41#include <VBox/log.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/dis.h>
46#include <VBox/disopcode.h>
47#include <VBox/version.h>
48
49/**
50 * Patch information - SSM version.
51 *
52 * the difference is the missing pTrampolinePatchesHead member
53 * to avoid changing the saved state version for now (will come later).
54 */
55typedef struct PATCHINFOSSM
56{
57 uint32_t uState;
58 uint32_t uOldState;
59 DISCPUMODE uOpMode;
60
61 /* GC pointer of privileged instruction */
62 RCPTRTYPE(uint8_t *) pPrivInstrGC;
63 R3PTRTYPE(uint8_t *) unusedHC; /* todo Can't remove due to structure size dependencies in saved states. */
64 uint8_t aPrivInstr[MAX_INSTR_SIZE];
65 uint32_t cbPrivInstr;
66 uint32_t opcode; //opcode for priv instr (OP_*)
67 uint32_t cbPatchJump; //patch jump size
68
69 /* Only valid for PATMFL_JUMP_CONFLICT patches */
70 RTRCPTR pPatchJumpDestGC;
71
72 RTGCUINTPTR32 pPatchBlockOffset;
73 uint32_t cbPatchBlockSize;
74 uint32_t uCurPatchOffset;
75#if HC_ARCH_BITS == 64
76 uint32_t Alignment0; /**< Align flags correctly. */
77#endif
78
79 uint64_t flags;
80
81 /**
82 * Lowest and highest patched GC instruction address. To optimize searches.
83 */
84 RTRCPTR pInstrGCLowest;
85 RTRCPTR pInstrGCHighest;
86
87 /* Tree of fixup records for the patch. */
88 R3PTRTYPE(PAVLPVNODECORE) FixupTree;
89 uint32_t nrFixups;
90
91 /* Tree of jumps inside the generated patch code. */
92 uint32_t nrJumpRecs;
93 R3PTRTYPE(PAVLPVNODECORE) JumpTree;
94
95 /**
96 * Lookup trees for determining the corresponding guest address of an
97 * instruction in the patch block.
98 */
99 R3PTRTYPE(PAVLU32NODECORE) Patch2GuestAddrTree;
100 R3PTRTYPE(PAVLU32NODECORE) Guest2PatchAddrTree;
101 uint32_t nrPatch2GuestRecs;
102#if HC_ARCH_BITS == 64
103 uint32_t Alignment1;
104#endif
105
106 /* Unused, but can't remove due to structure size dependencies in the saved state. */
107 PATMP2GLOOKUPREC_OBSOLETE unused;
108
109 /* Temporary information during patch creation. Don't waste hypervisor memory for this. */
110 R3PTRTYPE(PPATCHINFOTEMP) pTempInfo;
111
112 /* Count the number of writes to the corresponding guest code. */
113 uint32_t cCodeWrites;
114
115 /* Count the number of invalid writes to pages monitored for the patch. */
116 //some statistics to determine if we should keep this patch activated
117 uint32_t cTraps;
118
119 uint32_t cInvalidWrites;
120
121 // Index into the uPatchRun and uPatchTrap arrays (0..MAX_PATCHES-1)
122 uint32_t uPatchIdx;
123
124 /* First opcode byte, that's overwritten when a patch is marked dirty. */
125 uint8_t bDirtyOpcode;
126 uint8_t Alignment2[7]; /**< Align the structure size on a 8-byte boundary. */
127} PATCHINFOSSM, *PPATCHINFOSSM;
128
129/**
130 * Lookup record for patches - SSM version.
131 */
132typedef struct PATMPATCHRECSSM
133{
134 /** The key is a GC virtual address. */
135 AVLOU32NODECORE Core;
136 /** The key is a patch offset. */
137 AVLOU32NODECORE CoreOffset;
138
139 PATCHINFOSSM patch;
140} PATMPATCHRECSSM, *PPATMPATCHRECSSM;
141
142
143/**
144 * Callback arguments.
145 */
146typedef struct PATMCALLBACKARGS
147{
148 PVM pVM;
149 PSSMHANDLE pSSM;
150 PPATMPATCHREC pPatchRec;
151} PATMCALLBACKARGS;
152typedef PATMCALLBACKARGS *PPATMCALLBACKARGS;
153
154
155/*********************************************************************************************************************************
156* Internal Functions *
157*********************************************************************************************************************************/
158static int patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec,
159 int32_t offset, RTRCPTR *pFixup);
160
161
162/*********************************************************************************************************************************
163* Global Variables *
164*********************************************************************************************************************************/
165/**
166 * SSM descriptor table for the PATM structure.
167 */
168static SSMFIELD const g_aPatmFields[] =
169{
170 /** @todo there are a bunch more fields here which can be marked as ignored. */
171 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
172 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
173 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
174 SSMFIELD_ENTRY( PATM, cbPatchMem),
175 SSMFIELD_ENTRY( PATM, offPatchMem),
176 SSMFIELD_ENTRY( PATM, fOutOfMemory),
177 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
178 SSMFIELD_ENTRY( PATM, deltaReloc),
179 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
180 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
181 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
182 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
183 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
184 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
185 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
186 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
187 SSMFIELD_ENTRY( PATM, ulCallDepth),
188 SSMFIELD_ENTRY( PATM, cPageRecords),
189 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
190 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
191 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
192 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
193 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
194 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
195 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
196 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
197 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
198 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
199 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
200 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
201 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
202 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
203 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
204 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
205 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
206 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
207 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
208 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
209 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
210 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
211 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
212 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
213 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
214 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
215 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
216 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
217 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
218 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
219 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
220 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
221 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
222 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
223 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
224 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
225 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
226 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
227 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
228 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
229 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
230 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
231 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
232 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
233 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
234 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
235 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
236 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
237 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
238 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
239 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
240 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
241 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
242 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
243 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
244 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
245 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
246 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
247 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
248 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
249 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
250 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
251 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
252 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
253 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
254 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
255 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
256 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
257 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
258 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
259 SSMFIELD_ENTRY_TERM()
260};
261
262/**
263 * SSM descriptor table for the PATM structure starting with r86139.
264 */
265static SSMFIELD const g_aPatmFields86139[] =
266{
267 /** @todo there are a bunch more fields here which can be marked as ignored. */
268 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
269 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
270 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
271 SSMFIELD_ENTRY( PATM, cbPatchMem),
272 SSMFIELD_ENTRY( PATM, offPatchMem),
273 SSMFIELD_ENTRY( PATM, fOutOfMemory),
274 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
275 SSMFIELD_ENTRY( PATM, deltaReloc),
276 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
277 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
278 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
279 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
280 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
281 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
282 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
283 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
284 SSMFIELD_ENTRY( PATM, ulCallDepth),
285 SSMFIELD_ENTRY( PATM, cPageRecords),
286 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
287 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
288 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
289 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
290 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
291 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
292 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
293 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
294 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
295 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
296 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
297 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
298 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
299 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
300 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
301 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
302 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
303 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
304 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
305 SSMFIELD_ENTRY_IGN_HCPTR( PATM, hDbgModPatchMem),
306 SSMFIELD_ENTRY_PAD_HC32( PATM, Alignment0, sizeof(uint32_t)),
307 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
308 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
309 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
310 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
311 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
312 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
313 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
314 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
315 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
316 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
317 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
318 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
319 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
320 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
321 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
322 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
323 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
324 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
325 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
326 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
327 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
328 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
329 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
330 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
331 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
332 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
333 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
334 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
335 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
336 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
337 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
338 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
339 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
340 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
341 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
342 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
343 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
344 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
345 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
346 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
347 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
348 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
349 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
350 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
351 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
352 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
353 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
354 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
355 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
356 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
357 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
358 SSMFIELD_ENTRY_TERM()
359};
360
361/**
362 * SSM descriptor table for the PATMGCSTATE structure.
363 */
364static SSMFIELD const g_aPatmGCStateFields[] =
365{
366 SSMFIELD_ENTRY( PATMGCSTATE, uVMFlags),
367 SSMFIELD_ENTRY( PATMGCSTATE, uPendingAction),
368 SSMFIELD_ENTRY( PATMGCSTATE, uPatchCalls),
369 SSMFIELD_ENTRY( PATMGCSTATE, uScratch),
370 SSMFIELD_ENTRY( PATMGCSTATE, uIretEFlags),
371 SSMFIELD_ENTRY( PATMGCSTATE, uIretCS),
372 SSMFIELD_ENTRY( PATMGCSTATE, uIretEIP),
373 SSMFIELD_ENTRY( PATMGCSTATE, Psp),
374 SSMFIELD_ENTRY( PATMGCSTATE, fPIF),
375 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCPtrInhibitInterrupts),
376 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallPatchTargetAddr),
377 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallReturnAddr),
378 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEAX),
379 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uECX),
380 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEDI),
381 SSMFIELD_ENTRY( PATMGCSTATE, Restore.eFlags),
382 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uFlags),
383 SSMFIELD_ENTRY_TERM()
384};
385
386/**
387 * SSM descriptor table for the PATMPATCHREC structure.
388 */
389static SSMFIELD const g_aPatmPatchRecFields[] =
390{
391 SSMFIELD_ENTRY( PATMPATCHRECSSM, Core.Key),
392 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pLeft),
393 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pRight),
394 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.uchHeight),
395 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
396 SSMFIELD_ENTRY( PATMPATCHRECSSM, CoreOffset.Key),
397 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pLeft),
398 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pRight),
399 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.uchHeight),
400 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
401 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uState),
402 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOldState),
403 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOpMode),
404 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPrivInstrGC),
405 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unusedHC),
406 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.aPrivInstr),
407 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPrivInstr),
408 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.opcode),
409 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchJump),
410 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPatchJumpDestGC),
411 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.pPatchBlockOffset),
412 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchBlockSize),
413 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uCurPatchOffset),
414 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment0, sizeof(uint32_t)),
415 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.flags),
416 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCLowest),
417 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCHighest),
418 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.FixupTree),
419 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrFixups),
420 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrJumpRecs), // should be zero?
421 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.JumpTree),
422 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Patch2GuestAddrTree),
423 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Guest2PatchAddrTree),
424 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrPatch2GuestRecs),
425 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment1, sizeof(uint32_t)),
426 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocStartHC), // saved as zero
427 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocEndHC), // ditto
428 SSMFIELD_ENTRY_IGN_RCPTR( PATMPATCHRECSSM, patch.unused.pGuestLoc), // ditto
429 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.unused.opsize), // ditto
430 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.pTempInfo),
431 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cCodeWrites),
432 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cTraps),
433 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cInvalidWrites),
434 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uPatchIdx),
435 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.bDirtyOpcode),
436 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.Alignment2),
437 SSMFIELD_ENTRY_TERM()
438};
439
440/**
441 * SSM descriptor table for the RELOCREC structure.
442 */
443static SSMFIELD const g_aPatmRelocRec[] =
444{
445 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, Core.Key), // Used to store the relocation type
446 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pLeft),
447 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pRight),
448 SSMFIELD_ENTRY_IGNORE( RELOCREC, Core.uchHeight),
449 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
450 SSMFIELD_ENTRY( RELOCREC, uType),
451 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
452 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, pRelocPos), // converted to a patch member offset.
453 SSMFIELD_ENTRY_RCPTR( RELOCREC, pSource),
454 SSMFIELD_ENTRY_RCPTR( RELOCREC, pDest),
455 SSMFIELD_ENTRY_TERM()
456};
457
458/**
459 * SSM descriptor table for the RECPATCHTOGUEST structure.
460 */
461static SSMFIELD const g_aPatmRecPatchToGuest[] =
462{
463 SSMFIELD_ENTRY( RECPATCHTOGUEST, Core.Key),
464 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
465 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pLeft),
466 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pRight),
467 SSMFIELD_ENTRY_IGNORE( RECPATCHTOGUEST, Core.uchHeight),
468 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
469 SSMFIELD_ENTRY_RCPTR( RECPATCHTOGUEST, pOrgInstrGC),
470 SSMFIELD_ENTRY( RECPATCHTOGUEST, enmType),
471 SSMFIELD_ENTRY( RECPATCHTOGUEST, fDirty),
472 SSMFIELD_ENTRY( RECPATCHTOGUEST, fJumpTarget),
473 SSMFIELD_ENTRY( RECPATCHTOGUEST, u8DirtyOpcode),
474 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 5),
475 SSMFIELD_ENTRY_TERM()
476};
477
478#ifdef VBOX_STRICT
479
480/**
481 * Callback function for RTAvlPVDoWithAll
482 *
483 * Counts the number of patches in the tree
484 *
485 * @returns VBox status code.
486 * @param pNode Current node
487 * @param pcPatches Pointer to patch counter (uint32_t)
488 */
489static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
490{
491 NOREF(pNode);
492 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
493 return VINF_SUCCESS;
494}
495
496/**
497 * Callback function for RTAvlU32DoWithAll
498 *
499 * Counts the number of patches in the tree
500 *
501 * @returns VBox status code.
502 * @param pNode Current node
503 * @param pcPatches Pointer to patch counter (uint32_t)
504 */
505static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
506{
507 NOREF(pNode);
508 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
509 return VINF_SUCCESS;
510}
511
512#endif /* VBOX_STRICT */
513
514/**
515 * Callback function for RTAvloU32DoWithAll
516 *
517 * Counts the number of patches in the tree
518 *
519 * @returns VBox status code.
520 * @param pNode Current node
521 * @param pcPatches Pointer to patch counter
522 */
523static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
524{
525 NOREF(pNode);
526 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
527 return VINF_SUCCESS;
528}
529
530/**
531 * Callback function for RTAvlU32DoWithAll
532 *
533 * Saves all patch to guest lookup records.
534 *
535 * @returns VBox status code.
536 * @param pNode Current node
537 * @param pvUser Pointer to PATMCALLBACKARGS.
538 */
539static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pvUser)
540{
541 PPATMCALLBACKARGS pArgs = (PPATMCALLBACKARGS)pvUser;
542 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
543
544 /* Save the lookup record. */
545 int rc = SSMR3PutStructEx(pArgs->pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST), 0 /*fFlags*/,
546 &g_aPatmRecPatchToGuest[0], NULL);
547 AssertRCReturn(rc, rc);
548
549 return VINF_SUCCESS;
550}
551
552/**
553 * Callback function for RTAvlPVDoWithAll
554 *
555 * Saves all patch to guest lookup records.
556 *
557 * @returns VBox status code.
558 * @param pNode Current node
559 * @param pvUser Pointer to PATMCALLBACKARGS.
560 */
561static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pvUser)
562{
563 PPATMCALLBACKARGS pArgs = (PPATMCALLBACKARGS)pvUser;
564 RELOCREC rec = *(PRELOCREC)pNode;
565
566 /* Convert pointer to an offset into patch memory. May not be applicable
567 to all fixup types, thus the UINT32_MAX. */
568 AssertMsg( rec.pRelocPos
569 || ( rec.uType == FIXUP_REL_JMPTOPATCH
570 && !(pArgs->pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)),
571 ("uState=%#x uType=%#x flags=%#RX64\n", pArgs->pPatchRec->patch.uState, rec.uType, pArgs->pPatchRec->patch.flags));
572 uintptr_t offRelocPos = (uintptr_t)rec.pRelocPos - (uintptr_t)pArgs->pVM->patm.s.pPatchMemHC;
573 if (offRelocPos > pArgs->pVM->patm.s.cbPatchMem)
574 offRelocPos = UINT32_MAX;
575 rec.pRelocPos = (uint8_t *)offRelocPos;
576
577 /* Zero rec.Core.Key since it's unused and may trigger SSM check due to the hack below. */
578 rec.Core.Key = 0;
579
580 /* Save the lookup record. */
581 int rc = SSMR3PutStructEx(pArgs->pSSM, &rec, sizeof(rec), 0 /*fFlags*/, &g_aPatmRelocRec[0], NULL);
582 AssertRCReturn(rc, rc);
583
584 return VINF_SUCCESS;
585}
586
587/**
588 * Converts a saved state patch record to the memory record.
589 *
590 * @returns nothing.
591 * @param pPatch The memory record.
592 * @param pPatchSSM The SSM version of the patch record.
593 */
594static void patmR3PatchConvertSSM2Mem(PPATMPATCHREC pPatch, PPATMPATCHRECSSM pPatchSSM)
595{
596 /*
597 * Only restore the patch part of the tree record; not the internal data (except the key of course)
598 */
599 pPatch->Core.Key = pPatchSSM->Core.Key;
600 pPatch->CoreOffset.Key = pPatchSSM->CoreOffset.Key;
601 pPatch->patch.uState = pPatchSSM->patch.uState;
602 pPatch->patch.uOldState = pPatchSSM->patch.uOldState;
603 pPatch->patch.uOpMode = pPatchSSM->patch.uOpMode;
604 pPatch->patch.pPrivInstrGC = pPatchSSM->patch.pPrivInstrGC;
605 pPatch->patch.unusedHC = pPatchSSM->patch.unusedHC;
606 memcpy(&pPatch->patch.aPrivInstr[0], &pPatchSSM->patch.aPrivInstr[0], MAX_INSTR_SIZE);
607 pPatch->patch.cbPrivInstr = pPatchSSM->patch.cbPrivInstr;
608 pPatch->patch.opcode = pPatchSSM->patch.opcode;
609 pPatch->patch.cbPatchJump = pPatchSSM->patch.cbPatchJump;
610 pPatch->patch.pPatchJumpDestGC = pPatchSSM->patch.pPatchJumpDestGC;
611 pPatch->patch.pPatchBlockOffset = pPatchSSM->patch.pPatchBlockOffset;
612 pPatch->patch.cbPatchBlockSize = pPatchSSM->patch.cbPatchBlockSize;
613 pPatch->patch.uCurPatchOffset = pPatchSSM->patch.uCurPatchOffset;
614 pPatch->patch.flags = pPatchSSM->patch.flags;
615 pPatch->patch.pInstrGCLowest = pPatchSSM->patch.pInstrGCLowest;
616 pPatch->patch.pInstrGCHighest = pPatchSSM->patch.pInstrGCHighest;
617 pPatch->patch.FixupTree = pPatchSSM->patch.FixupTree;
618 pPatch->patch.nrFixups = pPatchSSM->patch.nrFixups;
619 pPatch->patch.nrJumpRecs = pPatchSSM->patch.nrJumpRecs;
620 pPatch->patch.JumpTree = pPatchSSM->patch.JumpTree;
621 pPatch->patch.Patch2GuestAddrTree = pPatchSSM->patch.Patch2GuestAddrTree;
622 pPatch->patch.Guest2PatchAddrTree = pPatchSSM->patch.Guest2PatchAddrTree;
623 pPatch->patch.nrPatch2GuestRecs = pPatchSSM->patch.nrPatch2GuestRecs;
624 pPatch->patch.unused = pPatchSSM->patch.unused;
625 pPatch->patch.pTempInfo = pPatchSSM->patch.pTempInfo;
626 pPatch->patch.cCodeWrites = pPatchSSM->patch.cCodeWrites;
627 pPatch->patch.cTraps = pPatchSSM->patch.cTraps;
628 pPatch->patch.cInvalidWrites = pPatchSSM->patch.cInvalidWrites;
629 pPatch->patch.uPatchIdx = pPatchSSM->patch.uPatchIdx;
630 pPatch->patch.bDirtyOpcode = pPatchSSM->patch.bDirtyOpcode;
631 pPatch->patch.pTrampolinePatchesHead = NULL;
632}
633
634/**
635 * Converts a memory patch record to the saved state version.
636 *
637 * @returns nothing.
638 * @param pPatchSSM The saved state record.
639 * @param pPatch The memory version to save.
640 */
641static void patmR3PatchConvertMem2SSM(PPATMPATCHRECSSM pPatchSSM, PPATMPATCHREC pPatch)
642{
643 pPatchSSM->Core = pPatch->Core;
644 pPatchSSM->CoreOffset = pPatch->CoreOffset;
645 pPatchSSM->patch.uState = pPatch->patch.uState;
646 pPatchSSM->patch.uOldState = pPatch->patch.uOldState;
647 pPatchSSM->patch.uOpMode = pPatch->patch.uOpMode;
648 pPatchSSM->patch.pPrivInstrGC = pPatch->patch.pPrivInstrGC;
649 pPatchSSM->patch.unusedHC = pPatch->patch.unusedHC;
650 memcpy(&pPatchSSM->patch.aPrivInstr[0], &pPatch->patch.aPrivInstr[0], MAX_INSTR_SIZE);
651 pPatchSSM->patch.cbPrivInstr = pPatch->patch.cbPrivInstr;
652 pPatchSSM->patch.opcode = pPatch->patch.opcode;
653 pPatchSSM->patch.cbPatchJump = pPatch->patch.cbPatchJump;
654 pPatchSSM->patch.pPatchJumpDestGC = pPatch->patch.pPatchJumpDestGC;
655 pPatchSSM->patch.pPatchBlockOffset = pPatch->patch.pPatchBlockOffset;
656 pPatchSSM->patch.cbPatchBlockSize = pPatch->patch.cbPatchBlockSize;
657 pPatchSSM->patch.uCurPatchOffset = pPatch->patch.uCurPatchOffset;
658 pPatchSSM->patch.flags = pPatch->patch.flags;
659 pPatchSSM->patch.pInstrGCLowest = pPatch->patch.pInstrGCLowest;
660 pPatchSSM->patch.pInstrGCHighest = pPatch->patch.pInstrGCHighest;
661 pPatchSSM->patch.FixupTree = pPatch->patch.FixupTree;
662 pPatchSSM->patch.nrFixups = pPatch->patch.nrFixups;
663 pPatchSSM->patch.nrJumpRecs = pPatch->patch.nrJumpRecs;
664 pPatchSSM->patch.JumpTree = pPatch->patch.JumpTree;
665 pPatchSSM->patch.Patch2GuestAddrTree = pPatch->patch.Patch2GuestAddrTree;
666 pPatchSSM->patch.Guest2PatchAddrTree = pPatch->patch.Guest2PatchAddrTree;
667 pPatchSSM->patch.nrPatch2GuestRecs = pPatch->patch.nrPatch2GuestRecs;
668 pPatchSSM->patch.unused = pPatch->patch.unused;
669 pPatchSSM->patch.pTempInfo = pPatch->patch.pTempInfo;
670 pPatchSSM->patch.cCodeWrites = pPatch->patch.cCodeWrites;
671 pPatchSSM->patch.cTraps = pPatch->patch.cTraps;
672 pPatchSSM->patch.cInvalidWrites = pPatch->patch.cInvalidWrites;
673 pPatchSSM->patch.uPatchIdx = pPatch->patch.uPatchIdx;
674 pPatchSSM->patch.bDirtyOpcode = pPatch->patch.bDirtyOpcode;
675}
676
677/**
678 * Callback function for RTAvloU32DoWithAll
679 *
680 * Saves the state of the patch that's being enumerated
681 *
682 * @returns VBox status code.
683 * @param pNode Current node
684 * @param pvUser Pointer to PATMCALLBACKARGS.
685 */
686static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pvUser)
687{
688 PPATMCALLBACKARGS pArgs = (PPATMCALLBACKARGS)pvUser;
689 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
690 PATMPATCHRECSSM patch;
691 int rc;
692
693 pArgs->pPatchRec = pPatch;
694 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
695
696 patmR3PatchConvertMem2SSM(&patch, pPatch);
697 Log4(("patmSavePatchState: cbPatchJump=%u uCurPathOffset=%#x pInstrGCLowest/Higest=%#x/%#x nrFixups=%#x nrJumpRecs=%#x\n",
698 patch.patch.cbPatchJump, patch.patch.uCurPatchOffset, patch.patch.pInstrGCLowest, patch.patch.pInstrGCHighest,
699 patch.patch.nrFixups, patch.patch.nrJumpRecs));
700
701 /*
702 * Reset HC pointers that need to be recalculated when loading the state
703 */
704#ifdef VBOX_STRICT
705 PVM pVM = pArgs->pVM; /* For PATCHCODE_PTR_HC. */
706 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
707 ("State = %x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
708#endif
709 Assert(pPatch->patch.JumpTree == 0);
710 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
711 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
712
713 /* Save the patch record itself */
714 rc = SSMR3PutStructEx(pArgs->pSSM, &patch, sizeof(patch), 0 /*fFlags*/, &g_aPatmPatchRecFields[0], NULL);
715 AssertRCReturn(rc, rc);
716
717 /*
718 * Reset HC pointers in fixup records and save them.
719 */
720#ifdef VBOX_STRICT
721 uint32_t nrFixupRecs = 0;
722 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
723 AssertMsg(nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
724#endif
725 rc = RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pArgs);
726 AssertRCReturn(rc, rc);
727
728#ifdef VBOX_STRICT
729 uint32_t nrLookupRecords = 0;
730 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
731 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
732#endif
733
734 rc = RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pArgs);
735 AssertRCReturn(rc, rc);
736
737 pArgs->pPatchRec = NULL;
738 return VINF_SUCCESS;
739}
740
741/**
742 * Execute state save operation.
743 *
744 * @returns VBox status code.
745 * @param pVM The cross context VM structure.
746 * @param pSSM SSM operation handle.
747 */
748DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
749{
750 PATM patmInfo = pVM->patm.s;
751 int rc;
752
753 pVM->patm.s.savedstate.pSSM = pSSM;
754
755 /*
756 * Reset HC pointers that need to be recalculated when loading the state
757 */
758 patmInfo.pPatchMemHC = NULL;
759 patmInfo.pGCStateHC = 0;
760 patmInfo.pvFaultMonitor = 0;
761
762 Assert(patmInfo.ulCallDepth == 0);
763
764 /*
765 * Count the number of patches in the tree (feeling lazy)
766 */
767 patmInfo.savedstate.cPatches = 0;
768 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
769
770 /*
771 * Save PATM structure
772 */
773 rc = SSMR3PutStructEx(pSSM, &patmInfo, sizeof(patmInfo), 0 /*fFlags*/, &g_aPatmFields[0], NULL);
774 AssertRCReturn(rc, rc);
775
776 /*
777 * Save patch memory contents
778 */
779 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
780 AssertRCReturn(rc, rc);
781
782 /*
783 * Save GC state memory
784 */
785 rc = SSMR3PutStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), 0 /*fFlags*/, &g_aPatmGCStateFields[0], NULL);
786 AssertRCReturn(rc, rc);
787
788 /*
789 * Save PATM stack page
790 */
791 SSMR3PutU32(pSSM, PATM_STACK_TOTAL_SIZE);
792 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
793 AssertRCReturn(rc, rc);
794
795 /*
796 * Save all patches
797 */
798 PATMCALLBACKARGS Args;
799 Args.pVM = pVM;
800 Args.pSSM = pSSM;
801 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, &Args);
802 AssertRCReturn(rc, rc);
803
804 /* Note! Patch statistics are not saved. */
805
806 return VINF_SUCCESS;
807}
808
809
810/**
811 * Execute state load operation.
812 *
813 * @returns VBox status code.
814 * @param pVM The cross context VM structure.
815 * @param pSSM SSM operation handle.
816 * @param uVersion Data layout version.
817 * @param uPass The data pass.
818 */
819DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
820{
821 PATM patmInfo;
822 int rc;
823
824 if ( uVersion != PATM_SAVED_STATE_VERSION
825 && uVersion != PATM_SAVED_STATE_VERSION_NO_RAW_MEM
826 && uVersion != PATM_SAVED_STATE_VERSION_MEM
827 && uVersion != PATM_SAVED_STATE_VERSION_FIXUP_HACK
828 && uVersion != PATM_SAVED_STATE_VERSION_VER16
829 )
830 {
831 AssertMsgFailed(("patmR3Load: Invalid version uVersion=%d!\n", uVersion));
832 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
833 }
834 uint32_t const fStructRestoreFlags = uVersion <= PATM_SAVED_STATE_VERSION_MEM ? SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED : 0;
835 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
836
837 pVM->patm.s.savedstate.pSSM = pSSM;
838
839 /*
840 * Restore PATM structure
841 */
842 RT_ZERO(patmInfo);
843 if ( uVersion == PATM_SAVED_STATE_VERSION_MEM
844 && SSMR3HandleRevision(pSSM) >= 86139
845 && SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(4, 2, 51))
846 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED,
847 &g_aPatmFields86139[0], NULL);
848 else
849 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), fStructRestoreFlags, &g_aPatmFields[0], NULL);
850 AssertRCReturn(rc, rc);
851
852 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
853 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
854 AssertLogRelReturn((pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC),
855 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
856 AssertLogRelReturn((pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC),
857 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
858 AssertLogRelReturn((pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC),
859 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
860 AssertLogRelReturn((pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC),
861 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
862 AssertLogRelReturn(pVM->patm.s.cbPatchMem == patmInfo.cbPatchMem, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
863
864 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
865 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
866 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
867 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
868
869 /* Lowest and highest patched instruction */
870 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
871 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
872
873 /* Sysenter handlers */
874 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
875 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
876 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
877
878 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
879
880 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
881 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
882 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
883 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
884
885
886 /** @note patch statistics are not restored. */
887
888 /*
889 * Restore patch memory contents
890 */
891 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
892 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
893 AssertRCReturn(rc, rc);
894
895 /*
896 * Restore GC state memory
897 */
898 RT_BZERO(pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
899 rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), fStructRestoreFlags, &g_aPatmGCStateFields[0], NULL);
900 AssertRCReturn(rc, rc);
901
902 /*
903 * Restore PATM stack page
904 */
905 uint32_t cbStack = PATM_STACK_TOTAL_SIZE;
906 if (uVersion > PATM_SAVED_STATE_VERSION_MEM)
907 {
908 rc = SSMR3GetU32(pSSM, &cbStack);
909 AssertRCReturn(rc, rc);
910 }
911 AssertCompile(!(PATM_STACK_TOTAL_SIZE & 31));
912 AssertLogRelMsgReturn(cbStack > 0 && cbStack <= PATM_STACK_TOTAL_SIZE && !(cbStack & 31),
913 ("cbStack=%#x vs %#x", cbStack, PATM_STACK_TOTAL_SIZE),
914 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
915 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, cbStack);
916 AssertRCReturn(rc, rc);
917 if (cbStack < PATM_STACK_TOTAL_SIZE)
918 memset((uint8_t *)pVM->patm.s.pGCStackHC + cbStack, 0, PATM_STACK_TOTAL_SIZE - cbStack);
919
920 /*
921 * Load all patches
922 */
923 for (unsigned i = 0; i < patmInfo.savedstate.cPatches; i++)
924 {
925 PATMPATCHRECSSM patch;
926 PATMPATCHREC *pPatchRec;
927
928 RT_ZERO(patch);
929 rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), fStructRestoreFlags, &g_aPatmPatchRecFields[0], NULL);
930 AssertRCReturn(rc, rc);
931 Log4(("patmR3Load: cbPatchJump=%u uCurPathOffset=%#x pInstrGCLowest/Higest=%#x/%#x nrFixups=%#x nrJumpRecs=%#x\n",
932 patch.patch.cbPatchJump, patch.patch.uCurPatchOffset, patch.patch.pInstrGCLowest, patch.patch.pInstrGCHighest,
933 patch.patch.nrFixups, patch.patch.nrJumpRecs));
934
935 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
936
937 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
938 if (RT_FAILURE(rc))
939 {
940 AssertMsgFailed(("Out of memory!!!!\n"));
941 return VERR_NO_MEMORY;
942 }
943
944 /* Convert SSM version to memory. */
945 patmR3PatchConvertSSM2Mem(pPatchRec, &patch);
946
947 Log(("Restoring patch %RRv -> %RRv state %x\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset, pPatchRec->patch.uState));
948 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
949 Assert(ret);
950 if (pPatchRec->patch.uState != PATCH_REFUSED)
951 {
952 if (pPatchRec->patch.pPatchBlockOffset)
953 {
954 /* We actually generated code for this patch. */
955 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
956 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
957 }
958 }
959 /* Set to zero as we don't need it anymore. */
960 pPatchRec->patch.pTempInfo = 0;
961
962 PATMP2GLOOKUPREC cacheRec;
963 RT_ZERO(cacheRec);
964 cacheRec.pPatch = &pPatchRec->patch;
965
966 uint8_t *pPrivInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pPatchRec->patch.pPrivInstrGC);
967 /* Can fail due to page or page table not present. */
968
969 /*
970 * Restore fixup records and correct HC pointers in fixup records
971 */
972 pPatchRec->patch.FixupTree = 0;
973 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
974 for (unsigned j = 0; j < patch.patch.nrFixups; j++)
975 {
976 RELOCREC rec;
977 int32_t offset;
978 RTRCPTR *pFixup;
979
980 RT_ZERO(rec);
981 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), fStructRestoreFlags, &g_aPatmRelocRec[0], NULL);
982 AssertRCReturn(rc, rc);
983
984 if (pPrivInstrHC)
985 {
986 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
987 offset = (int32_t)(intptr_t)rec.pRelocPos;
988 /* Convert to HC pointer again. */
989 if ((uintptr_t)rec.pRelocPos < pVM->patm.s.cbPatchMem)
990 rec.pRelocPos = pVM->patm.s.pPatchMemHC + (uintptr_t)rec.pRelocPos;
991 else
992 rec.pRelocPos = NULL;
993 pFixup = (RTRCPTR *)rec.pRelocPos;
994
995 if (pPatchRec->patch.uState != PATCH_REFUSED)
996 {
997 if ( rec.uType == FIXUP_REL_JMPTOPATCH
998 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
999 {
1000 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
1001 unsigned offset2 = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
1002
1003 rec.pRelocPos = pPrivInstrHC + offset2;
1004 pFixup = (RTRCPTR *)rec.pRelocPos;
1005 }
1006
1007 rc = patmCorrectFixup(pVM, uVersion, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
1008 AssertRCReturn(rc, rc);
1009 }
1010
1011 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
1012 AssertRCReturn(rc, rc);
1013 }
1014 }
1015 /* Release previous lock if any. */
1016 if (cacheRec.Lock.pvMap)
1017 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
1018
1019 /* And all patch to guest lookup records */
1020 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
1021
1022 pPatchRec->patch.Patch2GuestAddrTree = 0;
1023 pPatchRec->patch.Guest2PatchAddrTree = 0;
1024 if (pPatchRec->patch.nrPatch2GuestRecs)
1025 {
1026 RECPATCHTOGUEST rec;
1027 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
1028
1029 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
1030 for (uint32_t j=0;j<nrPatch2GuestRecs;j++)
1031 {
1032 RT_ZERO(rec);
1033 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), fStructRestoreFlags, &g_aPatmRecPatchToGuest[0], NULL);
1034 AssertRCReturn(rc, rc);
1035
1036 patmR3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
1037 }
1038 Assert(pPatchRec->patch.Patch2GuestAddrTree);
1039 }
1040
1041 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
1042 {
1043 /* Insert the guest page lookup records (for detection self-modifying code) */
1044 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
1045 AssertRCReturn(rc, rc);
1046 }
1047
1048#if 0 /* can fail def LOG_ENABLED */
1049 if ( pPatchRec->patch.uState != PATCH_REFUSED
1050 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
1051 {
1052 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
1053 Log(("Patch code ----------------------------------------------------------\n"));
1054 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
1055 Log(("Patch code ends -----------------------------------------------------\n"));
1056 MMR3HeapFree(pPatchRec->patch.pTempInfo);
1057 pPatchRec->patch.pTempInfo = NULL;
1058 }
1059#endif
1060 /* Remove the patch in case the gc mapping is not present. */
1061 if ( !pPrivInstrHC
1062 && pPatchRec->patch.uState == PATCH_ENABLED)
1063 {
1064 Log(("Remove patch %RGv due to failed HC address translation\n", pPatchRec->patch.pPrivInstrGC));
1065 PATMR3RemovePatch(pVM, pPatchRec->patch.pPrivInstrGC);
1066 }
1067 }
1068
1069 /*
1070 * Correct absolute fixups in the global patch. (helper functions)
1071 * Bit of a mess. Uses the new patch record, but restored patch functions.
1072 */
1073 PRELOCREC pRec = 0;
1074 AVLPVKEY key = 0;
1075
1076 Log(("Correct fixups in global helper functions\n"));
1077 while (true)
1078 {
1079 int32_t offset;
1080 RTRCPTR *pFixup;
1081
1082 /* Get the record that's closest from above */
1083 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
1084 if (pRec == 0)
1085 break;
1086
1087 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
1088
1089 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
1090 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
1091 pFixup = (RTRCPTR *)pRec->pRelocPos;
1092
1093 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
1094 rc = patmCorrectFixup(pVM, uVersion, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
1095 AssertRCReturn(rc, rc);
1096 }
1097
1098#ifdef VBOX_WITH_STATISTICS
1099 /*
1100 * Restore relevant old statistics
1101 */
1102 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
1103 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
1104 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
1105 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
1106#endif
1107
1108 return VINF_SUCCESS;
1109}
1110
1111/**
1112 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
1113 *
1114 * @returns VBox status code.
1115 * @param pVM The cross context VM structure.
1116 * @param uVersion Saved state version.
1117 * @param patmInfo Saved PATM structure
1118 * @param pPatch Patch record
1119 * @param pRec Relocation record
1120 * @param offset Offset of referenced data/code
1121 * @param pFixup Fixup address
1122 */
1123static int patmCorrectFixup(PVM pVM, unsigned uVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec,
1124 int32_t offset, RTRCPTR *pFixup)
1125{
1126 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
1127
1128 switch (pRec->uType)
1129 {
1130 case FIXUP_ABSOLUTE:
1131 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
1132 {
1133 Assert( pRec->uType != PATM_SAVED_STATE_VERSION_NO_RAW_MEM
1134 || (pRec->pSource == pRec->pDest && PATM_IS_ASMFIX(pRec->pSource)) );
1135
1136 /* bird: What is this for exactly? Only the MMIO fixups used to have pSource set. */
1137 if ( pRec->pSource
1138 && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource)
1139 && pRec->uType != FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL)
1140 break;
1141
1142 RTRCPTR const uFixup = *pFixup;
1143 if ( uFixup >= patmInfo.pGCStateGC
1144 && uFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
1145 {
1146 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
1147 *pFixup = (uFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
1148 }
1149 else if ( uFixup >= patmInfo.pCPUMCtxGC
1150 && uFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
1151 {
1152 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
1153
1154 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
1155 if (uVersion == PATM_SAVED_STATE_VERSION_VER16)
1156 {
1157 unsigned offCpumCtx = uFixup - patmInfo.pCPUMCtxGC;
1158
1159 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
1160 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
1161 * function is not available in older gcc versions, at least not in gcc-3.3 */
1162 if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
1163 {
1164 LogFlow(("Changing dr[0] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[0])));
1165 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
1166 }
1167 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
1168 {
1169 LogFlow(("Changing dr[1] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[1])));
1170 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
1171 }
1172 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
1173 {
1174 LogFlow(("Changing dr[2] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[2])));
1175 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
1176 }
1177 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
1178 {
1179 LogFlow(("Changing dr[3] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[3])));
1180 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
1181 }
1182 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
1183 {
1184 LogFlow(("Changing dr[4] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[4])));
1185 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
1186 }
1187 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
1188 {
1189 LogFlow(("Changing dr[5] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[5])));
1190 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
1191 }
1192 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
1193 {
1194 LogFlow(("Changing dr[6] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[6])));
1195 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
1196 }
1197 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
1198 {
1199 LogFlow(("Changing dr[7] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[7])));
1200 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
1201 }
1202 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
1203 {
1204 LogFlow(("Changing cr0 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr0)));
1205 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
1206 }
1207 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
1208 {
1209 LogFlow(("Changing cr2 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr2)));
1210 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
1211 }
1212 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
1213 {
1214 LogFlow(("Changing cr3 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr3)));
1215 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
1216 }
1217 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
1218 {
1219 LogFlow(("Changing cr4 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr4)));
1220 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
1221 }
1222 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
1223 {
1224 LogFlow(("Changing tr offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, tr)));
1225 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1226 }
1227 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
1228 {
1229 LogFlow(("Changing ldtr offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, ldtr)));
1230 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1231 }
1232 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
1233 {
1234 LogFlow(("Changing pGdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
1235 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1236 }
1237 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
1238 {
1239 LogFlow(("Changing cbGdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
1240 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1241 }
1242 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
1243 {
1244 LogFlow(("Changing pIdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
1245 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1246 }
1247 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
1248 {
1249 LogFlow(("Changing cbIdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
1250 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1251 }
1252 else
1253 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", offCpumCtx));
1254 }
1255 else
1256 *pFixup = (uFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
1257 }
1258 else if ( uFixup >= patmInfo.pStatsGC
1259 && uFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
1260 {
1261 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
1262 *pFixup = (uFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
1263 }
1264 else if ( uFixup >= patmInfo.pGCStackGC
1265 && uFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
1266 {
1267 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
1268 *pFixup = (uFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
1269 }
1270 else if ( uFixup >= patmInfo.pPatchMemGC
1271 && uFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
1272 {
1273 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
1274 *pFixup = (uFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
1275 }
1276 /*
1277 * For PATM_SAVED_STATE_VERSION_FIXUP_HACK and earlier boldly ASSUME:
1278 * 1. That pCPUMCtxGC is in the VM structure and that its location is
1279 * at the first page of the same 4 MB chunk.
1280 * 2. That the forced actions were in the first 32 bytes of the VM
1281 * structure.
1282 * 3. That the CPUM leaves are less than 8KB into the structure.
1283 */
1284 else if ( uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK
1285 && uFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(32))
1286 {
1287 LogFlow(("Changing fLocalForcedActions fixup from %RRv to %RRv\n", uFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
1288 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1289 pRec->pSource = pRec->pDest = PATM_ASMFIX_VM_FORCEDACTIONS;
1290 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1291 }
1292 else if ( uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK
1293 && uFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(8192))
1294 {
1295 static int cCpuidFixup = 0;
1296
1297 /* Very dirty assumptions about the cpuid patch and cpuid ordering. */
1298 switch (cCpuidFixup & 3)
1299 {
1300 case 0:
1301 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
1302 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_DEF_PTR;
1303 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1304 break;
1305 case 1:
1306 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
1307 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_STD_PTR;
1308 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1309 break;
1310 case 2:
1311 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
1312 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_EXT_PTR;
1313 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1314 break;
1315 case 3:
1316 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
1317 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_CENTAUR_PTR;
1318 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1319 break;
1320 }
1321 LogFlow(("Changing cpuid fixup %d from %RRv to %RRv\n", cCpuidFixup, uFixup, *pFixup));
1322 cCpuidFixup++;
1323 }
1324 /*
1325 * For PATM_SAVED_STATE_VERSION_MEM thru PATM_SAVED_STATE_VERSION_NO_RAW_MEM
1326 * we abused Core.Key to store the type for fixups needing correcting on load.
1327 */
1328 else if ( uVersion >= PATM_SAVED_STATE_VERSION_MEM
1329 && uVersion <= PATM_SAVED_STATE_VERSION_NO_RAW_MEM)
1330 {
1331 /* Core.Key abused to store the type of fixup. */
1332 switch ((uintptr_t)pRec->Core.Key)
1333 {
1334 case PATM_FIXUP_CPU_FF_ACTION:
1335 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1336 pRec->pSource = pRec->pDest = PATM_ASMFIX_VM_FORCEDACTIONS;
1337 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1338 LogFlow(("Changing cpu ff action fixup from %x to %x\n", uFixup, *pFixup));
1339 break;
1340 case PATM_FIXUP_CPUID_DEFAULT:
1341 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
1342 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_DEF_PTR;
1343 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1344 LogFlow(("Changing cpuid def fixup from %x to %x\n", uFixup, *pFixup));
1345 break;
1346 case PATM_FIXUP_CPUID_STANDARD:
1347 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
1348 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_STD_PTR;
1349 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1350 LogFlow(("Changing cpuid std fixup from %x to %x\n", uFixup, *pFixup));
1351 break;
1352 case PATM_FIXUP_CPUID_EXTENDED:
1353 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
1354 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_EXT_PTR;
1355 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1356 LogFlow(("Changing cpuid ext fixup from %x to %x\n", uFixup, *pFixup));
1357 break;
1358 case PATM_FIXUP_CPUID_CENTAUR:
1359 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
1360 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_CENTAUR_PTR;
1361 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1362 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", uFixup, *pFixup));
1363 break;
1364 default:
1365 AssertMsgFailed(("Unexpected fixup value %p\n", (uintptr_t)pRec->Core.Key));
1366 break;
1367 }
1368 }
1369 /*
1370 * After PATM_SAVED_STATE_VERSION_NO_RAW_MEM we changed the fixup type
1371 * and instead put the patch fixup code in the source and target addresses.
1372 */
1373 else if ( uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM
1374 && pRec->uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL)
1375 {
1376 Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_ASMFIX(pRec->pSource));
1377 switch (pRec->pSource)
1378 {
1379 case PATM_ASMFIX_VM_FORCEDACTIONS:
1380 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1381 break;
1382 case PATM_ASMFIX_CPUID_DEF_PTR:
1383 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
1384 break;
1385 case PATM_ASMFIX_CPUID_STD_PTR: /* Saved again patches only. */
1386 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
1387 break;
1388 case PATM_ASMFIX_CPUID_EXT_PTR: /* Saved again patches only. */
1389 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
1390 break;
1391 case PATM_ASMFIX_CPUID_CENTAUR_PTR: /* Saved again patches only. */
1392 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
1393 break;
1394 case PATM_ASMFIX_REUSE_LATER_0: /* Was only used for a few days. Don't want to keep this legacy around. */
1395 case PATM_ASMFIX_REUSE_LATER_1:
1396 AssertLogRelMsgFailedReturn(("Unsupported PATM fixup. You have to discard this saved state or snapshot."),
1397 VERR_INTERNAL_ERROR);
1398 break;
1399 }
1400 }
1401 /*
1402 * Constant that may change between VM version needs fixing up.
1403 */
1404 else if (pRec->uType == FIXUP_CONSTANT_IN_PATCH_ASM_TMPL)
1405 {
1406 AssertLogRelReturn(uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1407 Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_ASMFIX(pRec->pSource));
1408 switch (pRec->pSource)
1409 {
1410 case PATM_ASMFIX_REUSE_LATER_2: /* Was only used for a few days. Don't want to keep this legacy around. */
1411 case PATM_ASMFIX_REUSE_LATER_3:
1412 AssertLogRelMsgFailedReturn(("Unsupported PATM fixup. You have to discard this saved state or snapshot."),
1413 VERR_INTERNAL_ERROR);
1414 break;
1415 default:
1416 AssertLogRelMsgFailed(("Unknown FIXUP_CONSTANT_IN_PATCH_ASM_TMPL fixup: %#x\n", pRec->pSource));
1417 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1418 }
1419 }
1420 /*
1421 * Relative fixups for calling or jumping to helper functions inside VMMRC.
1422 * (The distance between the helper function and the patch is subject to
1423 * new code being added to VMMRC as well as VM configurations influencing
1424 * heap allocations and so on and so forth.)
1425 */
1426 else if (pRec->uType == FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL)
1427 {
1428 AssertLogRelReturn(uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1429 Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_ASMFIX(pRec->pSource));
1430 int rc;
1431 RTRCPTR uRCPtrDest;
1432 switch (pRec->pSource)
1433 {
1434 case PATM_ASMFIX_HELPER_CPUM_CPUID:
1435 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "CPUMPatchHlpCpuId", &uRCPtrDest);
1436 AssertLogRelRCReturn(rc, rc);
1437 break;
1438 default:
1439 AssertLogRelMsgFailed(("Unknown FIXUP_REL_HLP_CALL_IN_PATCH_ASM_TMPL fixup: %#x\n", pRec->pSource));
1440 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1441 }
1442 RTRCPTR uRCPtrAfter = pVM->patm.s.pPatchMemGC + ((uintptr_t)&pFixup[1] - (uintptr_t)pVM->patm.s.pPatchMemHC);
1443 *pFixup = uRCPtrDest - uRCPtrAfter;
1444 }
1445
1446#ifdef RT_OS_WINDOWS
1447 AssertCompile(RT_OFFSETOF(VM, fGlobalForcedActions) < 32);
1448#endif
1449 break;
1450 }
1451
1452 case FIXUP_REL_JMPTOPATCH:
1453 {
1454 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
1455
1456 if ( pPatch->uState == PATCH_ENABLED
1457 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
1458 {
1459 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
1460 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
1461 RTRCPTR pJumpOffGC;
1462 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
1463 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
1464
1465 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1466
1467 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
1468#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
1469 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
1470 {
1471 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
1472
1473 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
1474 oldJump[0] = pPatch->aPrivInstr[0];
1475 oldJump[1] = pPatch->aPrivInstr[1];
1476 *(RTRCUINTPTR *)&oldJump[2] = displOld;
1477 }
1478 else
1479#endif
1480 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
1481 {
1482 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
1483 oldJump[0] = 0xE9;
1484 *(RTRCUINTPTR *)&oldJump[1] = displOld;
1485 }
1486 else
1487 {
1488 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
1489 break;
1490 }
1491 Assert(pPatch->cbPatchJump <= sizeof(temp));
1492
1493 /*
1494 * Read old patch jump and compare it to the one we previously installed
1495 */
1496 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
1497 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1498
1499 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1500 {
1501 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
1502 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
1503 pPage,
1504 pPage + (PAGE_SIZE - 1) /* inclusive! */,
1505 (void *)(uintptr_t)pPage, pPage, NULL /*pszDesc*/);
1506 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
1507 }
1508 else
1509 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
1510 {
1511 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
1512 /*
1513 * Disable patch; this is not a good solution
1514 */
1515 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
1516 pPatch->uState = PATCH_DISABLED;
1517 }
1518 else
1519 if (RT_SUCCESS(rc))
1520 {
1521 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
1522 AssertRC(rc);
1523 }
1524 else
1525 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
1526 }
1527 else
1528 Log(("Skip the guest jump to patch code for this disabled patch %08X\n", pRec->pRelocPos));
1529
1530 pRec->pDest = pTarget;
1531 break;
1532 }
1533
1534 case FIXUP_REL_JMPTOGUEST:
1535 {
1536 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1537 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1538
1539 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1540 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1541 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1542 pRec->pSource = pSource;
1543 break;
1544
1545 }
1546 }
1547 return VINF_SUCCESS;
1548}
1549
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette