VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp@ 55705

Last change on this file since 55705 was 54764, checked in by vboxsync, 10 years ago

Added an infix 'ASMFIX' to the PATMA.h fixup types used in the patch template code in PATMA.asm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 70.4 KB
Line 
1/* $Id: PATMSSM.cpp 54764 2015-03-15 03:25:11Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2015 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/pdmapi.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/cpumctx-v1_6.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/ssm.h>
30#include <VBox/param.h>
31#include <iprt/avl.h>
32#include "PATMInternal.h"
33#include "PATMPatch.h"
34#include "PATMA.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/csam.h>
37#include "internal/pgm.h"
38#include <VBox/dbg.h>
39#include <VBox/err.h>
40#include <VBox/log.h>
41#include <iprt/assert.h>
42#include <iprt/asm.h>
43#include <iprt/string.h>
44#include <VBox/dis.h>
45#include <VBox/disopcode.h>
46#include <VBox/version.h>
47
48/**
49 * Patch information - SSM version.
50 *
51 * the difference is the missing pTrampolinePatchesHead member
52 * to avoid changing the saved state version for now (will come later).
53 */
54typedef struct PATCHINFOSSM
55{
56 uint32_t uState;
57 uint32_t uOldState;
58 DISCPUMODE uOpMode;
59
60 /* GC pointer of privileged instruction */
61 RCPTRTYPE(uint8_t *) pPrivInstrGC;
62 R3PTRTYPE(uint8_t *) unusedHC; /* todo Can't remove due to structure size dependencies in saved states. */
63 uint8_t aPrivInstr[MAX_INSTR_SIZE];
64 uint32_t cbPrivInstr;
65 uint32_t opcode; //opcode for priv instr (OP_*)
66 uint32_t cbPatchJump; //patch jump size
67
68 /* Only valid for PATMFL_JUMP_CONFLICT patches */
69 RTRCPTR pPatchJumpDestGC;
70
71 RTGCUINTPTR32 pPatchBlockOffset;
72 uint32_t cbPatchBlockSize;
73 uint32_t uCurPatchOffset;
74#if HC_ARCH_BITS == 64
75 uint32_t Alignment0; /**< Align flags correctly. */
76#endif
77
78 uint64_t flags;
79
80 /**
81 * Lowest and highest patched GC instruction address. To optimize searches.
82 */
83 RTRCPTR pInstrGCLowest;
84 RTRCPTR pInstrGCHighest;
85
86 /* Tree of fixup records for the patch. */
87 R3PTRTYPE(PAVLPVNODECORE) FixupTree;
88 uint32_t nrFixups;
89
90 /* Tree of jumps inside the generated patch code. */
91 uint32_t nrJumpRecs;
92 R3PTRTYPE(PAVLPVNODECORE) JumpTree;
93
94 /**
95 * Lookup trees for determining the corresponding guest address of an
96 * instruction in the patch block.
97 */
98 R3PTRTYPE(PAVLU32NODECORE) Patch2GuestAddrTree;
99 R3PTRTYPE(PAVLU32NODECORE) Guest2PatchAddrTree;
100 uint32_t nrPatch2GuestRecs;
101#if HC_ARCH_BITS == 64
102 uint32_t Alignment1;
103#endif
104
105 /* Unused, but can't remove due to structure size dependencies in the saved state. */
106 PATMP2GLOOKUPREC_OBSOLETE unused;
107
108 /* Temporary information during patch creation. Don't waste hypervisor memory for this. */
109 R3PTRTYPE(PPATCHINFOTEMP) pTempInfo;
110
111 /* Count the number of writes to the corresponding guest code. */
112 uint32_t cCodeWrites;
113
114 /* Count the number of invalid writes to pages monitored for the patch. */
115 //some statistics to determine if we should keep this patch activated
116 uint32_t cTraps;
117
118 uint32_t cInvalidWrites;
119
120 // Index into the uPatchRun and uPatchTrap arrays (0..MAX_PATCHES-1)
121 uint32_t uPatchIdx;
122
123 /* First opcode byte, that's overwritten when a patch is marked dirty. */
124 uint8_t bDirtyOpcode;
125 uint8_t Alignment2[7]; /**< Align the structure size on a 8-byte boundary. */
126} PATCHINFOSSM, *PPATCHINFOSSM;
127
128/**
129 * Lookup record for patches - SSM version.
130 */
131typedef struct PATMPATCHRECSSM
132{
133 /** The key is a GC virtual address. */
134 AVLOU32NODECORE Core;
135 /** The key is a patch offset. */
136 AVLOU32NODECORE CoreOffset;
137
138 PATCHINFOSSM patch;
139} PATMPATCHRECSSM, *PPATMPATCHRECSSM;
140
141
142/*******************************************************************************
143* Internal Functions *
144*******************************************************************************/
145static int patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec,
146 int32_t offset, RTRCPTR *pFixup);
147
148
149/*******************************************************************************
150* Global Variables *
151*******************************************************************************/
152/**
153 * SSM descriptor table for the PATM structure.
154 */
155static SSMFIELD const g_aPatmFields[] =
156{
157 /** @todo there are a bunch more fields here which can be marked as ignored. */
158 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
159 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
160 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
161 SSMFIELD_ENTRY( PATM, cbPatchMem),
162 SSMFIELD_ENTRY( PATM, offPatchMem),
163 SSMFIELD_ENTRY( PATM, fOutOfMemory),
164 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
165 SSMFIELD_ENTRY( PATM, deltaReloc),
166 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
167 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
168 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
169 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
170 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
171 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
172 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
173 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
174 SSMFIELD_ENTRY( PATM, ulCallDepth),
175 SSMFIELD_ENTRY( PATM, cPageRecords),
176 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
177 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
178 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
179 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
180 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
181 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
182 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
183 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
184 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
185 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
186 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
187 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
188 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
189 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
190 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
191 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
192 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
193 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
194 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
195 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
196 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
197 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
198 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
199 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
200 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
201 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
202 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
203 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
204 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
205 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
206 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
207 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
208 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
209 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
210 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
211 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
212 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
213 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
214 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
215 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
216 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
217 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
218 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
219 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
220 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
221 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
222 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
223 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
224 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
225 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
226 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
227 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
228 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
229 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
230 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
231 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
232 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
233 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
234 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
235 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
236 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
237 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
238 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
239 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
240 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
241 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
242 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
243 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
244 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
245 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
246 SSMFIELD_ENTRY_TERM()
247};
248
249/**
250 * SSM descriptor table for the PATM structure starting with r86139.
251 */
252static SSMFIELD const g_aPatmFields86139[] =
253{
254 /** @todo there are a bunch more fields here which can be marked as ignored. */
255 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
256 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
257 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
258 SSMFIELD_ENTRY( PATM, cbPatchMem),
259 SSMFIELD_ENTRY( PATM, offPatchMem),
260 SSMFIELD_ENTRY( PATM, fOutOfMemory),
261 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
262 SSMFIELD_ENTRY( PATM, deltaReloc),
263 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
264 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
265 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
266 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
267 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
268 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
269 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
270 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
271 SSMFIELD_ENTRY( PATM, ulCallDepth),
272 SSMFIELD_ENTRY( PATM, cPageRecords),
273 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
274 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
275 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
276 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
277 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
278 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
279 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
280 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
281 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
282 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
283 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
284 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
285 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
286 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
287 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
288 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
289 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
290 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
291 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
292 SSMFIELD_ENTRY_IGN_HCPTR( PATM, hDbgModPatchMem),
293 SSMFIELD_ENTRY_PAD_HC32( PATM, Alignment0, sizeof(uint32_t)),
294 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
295 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
296 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
297 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
298 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
299 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
300 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
301 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
302 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
303 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
304 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
305 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
306 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
307 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
308 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
309 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
310 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
311 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
312 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
313 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
314 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
315 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
316 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
317 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
318 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
319 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
320 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
321 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
322 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
323 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
324 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
325 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
326 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
327 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
328 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
329 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
330 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
331 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
332 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
333 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
334 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
335 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
336 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
337 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
338 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
339 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
340 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
341 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
342 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
343 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
344 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
345 SSMFIELD_ENTRY_TERM()
346};
347
348/**
349 * SSM descriptor table for the PATMGCSTATE structure.
350 */
351static SSMFIELD const g_aPatmGCStateFields[] =
352{
353 SSMFIELD_ENTRY( PATMGCSTATE, uVMFlags),
354 SSMFIELD_ENTRY( PATMGCSTATE, uPendingAction),
355 SSMFIELD_ENTRY( PATMGCSTATE, uPatchCalls),
356 SSMFIELD_ENTRY( PATMGCSTATE, uScratch),
357 SSMFIELD_ENTRY( PATMGCSTATE, uIretEFlags),
358 SSMFIELD_ENTRY( PATMGCSTATE, uIretCS),
359 SSMFIELD_ENTRY( PATMGCSTATE, uIretEIP),
360 SSMFIELD_ENTRY( PATMGCSTATE, Psp),
361 SSMFIELD_ENTRY( PATMGCSTATE, fPIF),
362 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCPtrInhibitInterrupts),
363 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallPatchTargetAddr),
364 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallReturnAddr),
365 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEAX),
366 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uECX),
367 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEDI),
368 SSMFIELD_ENTRY( PATMGCSTATE, Restore.eFlags),
369 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uFlags),
370 SSMFIELD_ENTRY_TERM()
371};
372
373/**
374 * SSM descriptor table for the PATMPATCHREC structure.
375 */
376static SSMFIELD const g_aPatmPatchRecFields[] =
377{
378 SSMFIELD_ENTRY( PATMPATCHRECSSM, Core.Key),
379 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pLeft),
380 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pRight),
381 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.uchHeight),
382 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
383 SSMFIELD_ENTRY( PATMPATCHRECSSM, CoreOffset.Key),
384 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pLeft),
385 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pRight),
386 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.uchHeight),
387 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
388 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uState),
389 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOldState),
390 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOpMode),
391 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPrivInstrGC),
392 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unusedHC),
393 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.aPrivInstr),
394 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPrivInstr),
395 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.opcode),
396 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchJump),
397 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPatchJumpDestGC),
398 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.pPatchBlockOffset),
399 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchBlockSize),
400 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uCurPatchOffset),
401 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment0, sizeof(uint32_t)),
402 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.flags),
403 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCLowest),
404 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCHighest),
405 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.FixupTree),
406 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrFixups),
407 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrJumpRecs), // should be zero?
408 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.JumpTree),
409 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Patch2GuestAddrTree),
410 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Guest2PatchAddrTree),
411 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrPatch2GuestRecs),
412 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment1, sizeof(uint32_t)),
413 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocStartHC), // saved as zero
414 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocEndHC), // ditto
415 SSMFIELD_ENTRY_IGN_RCPTR( PATMPATCHRECSSM, patch.unused.pGuestLoc), // ditto
416 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.unused.opsize), // ditto
417 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.pTempInfo),
418 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cCodeWrites),
419 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cTraps),
420 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cInvalidWrites),
421 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uPatchIdx),
422 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.bDirtyOpcode),
423 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.Alignment2),
424 SSMFIELD_ENTRY_TERM()
425};
426
427/**
428 * SSM descriptor table for the RELOCREC structure.
429 */
430static SSMFIELD const g_aPatmRelocRec[] =
431{
432 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, Core.Key), // Used to store the relocation type
433 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pLeft),
434 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pRight),
435 SSMFIELD_ENTRY_IGNORE( RELOCREC, Core.uchHeight),
436 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
437 SSMFIELD_ENTRY( RELOCREC, uType),
438 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
439 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, pRelocPos), // converted to a patch member offset.
440 SSMFIELD_ENTRY_RCPTR( RELOCREC, pSource),
441 SSMFIELD_ENTRY_RCPTR( RELOCREC, pDest),
442 SSMFIELD_ENTRY_TERM()
443};
444
445/**
446 * SSM descriptor table for the RECPATCHTOGUEST structure.
447 */
448static SSMFIELD const g_aPatmRecPatchToGuest[] =
449{
450 SSMFIELD_ENTRY( RECPATCHTOGUEST, Core.Key),
451 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
452 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pLeft),
453 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pRight),
454 SSMFIELD_ENTRY_IGNORE( RECPATCHTOGUEST, Core.uchHeight),
455 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
456 SSMFIELD_ENTRY_RCPTR( RECPATCHTOGUEST, pOrgInstrGC),
457 SSMFIELD_ENTRY( RECPATCHTOGUEST, enmType),
458 SSMFIELD_ENTRY( RECPATCHTOGUEST, fDirty),
459 SSMFIELD_ENTRY( RECPATCHTOGUEST, fJumpTarget),
460 SSMFIELD_ENTRY( RECPATCHTOGUEST, u8DirtyOpcode),
461 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 5),
462 SSMFIELD_ENTRY_TERM()
463};
464
465#ifdef VBOX_STRICT
466
467/**
468 * Callback function for RTAvlPVDoWithAll
469 *
470 * Counts the number of patches in the tree
471 *
472 * @returns VBox status code.
473 * @param pNode Current node
474 * @param pcPatches Pointer to patch counter (uint32_t)
475 */
476static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
477{
478 NOREF(pNode);
479 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
480 return VINF_SUCCESS;
481}
482
483/**
484 * Callback function for RTAvlU32DoWithAll
485 *
486 * Counts the number of patches in the tree
487 *
488 * @returns VBox status code.
489 * @param pNode Current node
490 * @param pcPatches Pointer to patch counter (uint32_t)
491 */
492static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
493{
494 NOREF(pNode);
495 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
496 return VINF_SUCCESS;
497}
498
499#endif /* VBOX_STRICT */
500
501/**
502 * Callback function for RTAvloU32DoWithAll
503 *
504 * Counts the number of patches in the tree
505 *
506 * @returns VBox status code.
507 * @param pNode Current node
508 * @param pcPatches Pointer to patch counter
509 */
510static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
511{
512 NOREF(pNode);
513 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
514 return VINF_SUCCESS;
515}
516
517/**
518 * Callback function for RTAvlU32DoWithAll
519 *
520 * Saves all patch to guest lookup records.
521 *
522 * @returns VBox status code.
523 * @param pNode Current node
524 * @param pVM1 Pointer to the VM
525 */
526static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
527{
528 PVM pVM = (PVM)pVM1;
529 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
530 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
531
532 /* Save the lookup record. */
533 int rc = SSMR3PutStructEx(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST), 0 /*fFlags*/, &g_aPatmRecPatchToGuest[0], NULL);
534 AssertRCReturn(rc, rc);
535
536 return VINF_SUCCESS;
537}
538
539/**
540 * Callback function for RTAvlPVDoWithAll
541 *
542 * Saves all patch to guest lookup records.
543 *
544 * @returns VBox status code.
545 * @param pNode Current node
546 * @param pVM1 Pointer to the VM
547 */
548static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
549{
550 PVM pVM = (PVM)pVM1;
551 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
552 RELOCREC rec = *(PRELOCREC)pNode;
553 RTRCPTR *pFixup = (RTRCPTR *)rec.pRelocPos;
554
555 /* Convert pointer to an offset into patch memory. May not be applicable
556 to all fixup types, thus the UINT32_MAX. */
557 Assert(rec.pRelocPos);
558 uintptr_t offRelocPos = (uintptr_t)rec.pRelocPos - (uintptr_t)pVM->patm.s.pPatchMemHC;
559 if (offRelocPos > pVM->patm.s.cbPatchMem)
560 offRelocPos = UINT32_MAX;
561 rec.pRelocPos = (uint8_t *)offRelocPos;
562
563 /* Zero rec.Core.Key since it's unused and may trigger SSM check due to the hack below. */
564 rec.Core.Key = 0;
565
566
567 /* Save the lookup record. */
568 int rc = SSMR3PutStructEx(pSSM, &rec, sizeof(rec), 0 /*fFlags*/, &g_aPatmRelocRec[0], NULL);
569 AssertRCReturn(rc, rc);
570
571 return VINF_SUCCESS;
572}
573
574/**
575 * Converts a saved state patch record to the memory record.
576 *
577 * @returns nothing.
578 * @param pPatch The memory record.
579 * @param pPatchSSM The SSM version of the patch record.
580 */
581static void patmR3PatchConvertSSM2Mem(PPATMPATCHREC pPatch, PPATMPATCHRECSSM pPatchSSM)
582{
583 /*
584 * Only restore the patch part of the tree record; not the internal data (except the key of course)
585 */
586 pPatch->Core.Key = pPatchSSM->Core.Key;
587 pPatch->CoreOffset.Key = pPatchSSM->CoreOffset.Key;
588 pPatch->patch.uState = pPatchSSM->patch.uState;
589 pPatch->patch.uOldState = pPatchSSM->patch.uOldState;
590 pPatch->patch.uOpMode = pPatchSSM->patch.uOpMode;
591 pPatch->patch.pPrivInstrGC = pPatchSSM->patch.pPrivInstrGC;
592 pPatch->patch.unusedHC = pPatchSSM->patch.unusedHC;
593 memcpy(&pPatch->patch.aPrivInstr[0], &pPatchSSM->patch.aPrivInstr[0], MAX_INSTR_SIZE);
594 pPatch->patch.cbPrivInstr = pPatchSSM->patch.cbPrivInstr;
595 pPatch->patch.opcode = pPatchSSM->patch.opcode;
596 pPatch->patch.cbPatchJump = pPatchSSM->patch.cbPatchJump;
597 pPatch->patch.pPatchJumpDestGC = pPatchSSM->patch.pPatchJumpDestGC;
598 pPatch->patch.pPatchBlockOffset = pPatchSSM->patch.pPatchBlockOffset;
599 pPatch->patch.cbPatchBlockSize = pPatchSSM->patch.cbPatchBlockSize;
600 pPatch->patch.uCurPatchOffset = pPatchSSM->patch.uCurPatchOffset;
601 pPatch->patch.flags = pPatchSSM->patch.flags;
602 pPatch->patch.pInstrGCLowest = pPatchSSM->patch.pInstrGCLowest;
603 pPatch->patch.pInstrGCHighest = pPatchSSM->patch.pInstrGCHighest;
604 pPatch->patch.FixupTree = pPatchSSM->patch.FixupTree;
605 pPatch->patch.nrFixups = pPatchSSM->patch.nrFixups;
606 pPatch->patch.nrJumpRecs = pPatchSSM->patch.nrJumpRecs;
607 pPatch->patch.JumpTree = pPatchSSM->patch.JumpTree;
608 pPatch->patch.Patch2GuestAddrTree = pPatchSSM->patch.Patch2GuestAddrTree;
609 pPatch->patch.Guest2PatchAddrTree = pPatchSSM->patch.Guest2PatchAddrTree;
610 pPatch->patch.nrPatch2GuestRecs = pPatchSSM->patch.nrPatch2GuestRecs;
611 pPatch->patch.unused = pPatchSSM->patch.unused;
612 pPatch->patch.pTempInfo = pPatchSSM->patch.pTempInfo;
613 pPatch->patch.cCodeWrites = pPatchSSM->patch.cCodeWrites;
614 pPatch->patch.cTraps = pPatchSSM->patch.cTraps;
615 pPatch->patch.cInvalidWrites = pPatchSSM->patch.cInvalidWrites;
616 pPatch->patch.uPatchIdx = pPatchSSM->patch.uPatchIdx;
617 pPatch->patch.bDirtyOpcode = pPatchSSM->patch.bDirtyOpcode;
618 pPatch->patch.pTrampolinePatchesHead = NULL;
619}
620
621/**
622 * Converts a memory patch record to the saved state version.
623 *
624 * @returns nothing.
625 * @param pPatchSSM The saved state record.
626 * @param pPatch The memory version to save.
627 */
628static void patmR3PatchConvertMem2SSM(PPATMPATCHRECSSM pPatchSSM, PPATMPATCHREC pPatch)
629{
630 pPatchSSM->Core = pPatch->Core;
631 pPatchSSM->CoreOffset = pPatch->CoreOffset;
632 pPatchSSM->patch.uState = pPatch->patch.uState;
633 pPatchSSM->patch.uOldState = pPatch->patch.uOldState;
634 pPatchSSM->patch.uOpMode = pPatch->patch.uOpMode;
635 pPatchSSM->patch.pPrivInstrGC = pPatch->patch.pPrivInstrGC;
636 pPatchSSM->patch.unusedHC = pPatch->patch.unusedHC;
637 memcpy(&pPatchSSM->patch.aPrivInstr[0], &pPatch->patch.aPrivInstr[0], MAX_INSTR_SIZE);
638 pPatchSSM->patch.cbPrivInstr = pPatch->patch.cbPrivInstr;
639 pPatchSSM->patch.opcode = pPatch->patch.opcode;
640 pPatchSSM->patch.cbPatchJump = pPatch->patch.cbPatchJump;
641 pPatchSSM->patch.pPatchJumpDestGC = pPatch->patch.pPatchJumpDestGC;
642 pPatchSSM->patch.pPatchBlockOffset = pPatch->patch.pPatchBlockOffset;
643 pPatchSSM->patch.cbPatchBlockSize = pPatch->patch.cbPatchBlockSize;
644 pPatchSSM->patch.uCurPatchOffset = pPatch->patch.uCurPatchOffset;
645 pPatchSSM->patch.flags = pPatch->patch.flags;
646 pPatchSSM->patch.pInstrGCLowest = pPatch->patch.pInstrGCLowest;
647 pPatchSSM->patch.pInstrGCHighest = pPatch->patch.pInstrGCHighest;
648 pPatchSSM->patch.FixupTree = pPatch->patch.FixupTree;
649 pPatchSSM->patch.nrFixups = pPatch->patch.nrFixups;
650 pPatchSSM->patch.nrJumpRecs = pPatch->patch.nrJumpRecs;
651 pPatchSSM->patch.JumpTree = pPatch->patch.JumpTree;
652 pPatchSSM->patch.Patch2GuestAddrTree = pPatch->patch.Patch2GuestAddrTree;
653 pPatchSSM->patch.Guest2PatchAddrTree = pPatch->patch.Guest2PatchAddrTree;
654 pPatchSSM->patch.nrPatch2GuestRecs = pPatch->patch.nrPatch2GuestRecs;
655 pPatchSSM->patch.unused = pPatch->patch.unused;
656 pPatchSSM->patch.pTempInfo = pPatch->patch.pTempInfo;
657 pPatchSSM->patch.cCodeWrites = pPatch->patch.cCodeWrites;
658 pPatchSSM->patch.cTraps = pPatch->patch.cTraps;
659 pPatchSSM->patch.cInvalidWrites = pPatch->patch.cInvalidWrites;
660 pPatchSSM->patch.uPatchIdx = pPatch->patch.uPatchIdx;
661 pPatchSSM->patch.bDirtyOpcode = pPatch->patch.bDirtyOpcode;
662}
663
664/**
665 * Callback function for RTAvloU32DoWithAll
666 *
667 * Saves the state of the patch that's being enumerated
668 *
669 * @returns VBox status code.
670 * @param pNode Current node
671 * @param pVM1 Pointer to the VM
672 */
673static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
674{
675 PVM pVM = (PVM)pVM1;
676 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
677 PATMPATCHRECSSM patch;
678 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
679 int rc;
680
681 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
682
683 patmR3PatchConvertMem2SSM(&patch, pPatch);
684 Log4(("patmSavePatchState: cbPatchJump=%u uCurPathOffset=%#x pInstrGCLowest/Higest=%#x/%#x nrFixups=%#x nrJumpRecs=%#x\n",
685 patch.patch.cbPatchJump, patch.patch.uCurPatchOffset, patch.patch.pInstrGCLowest, patch.patch.pInstrGCHighest,
686 patch.patch.nrFixups, patch.patch.nrJumpRecs));
687
688 /*
689 * Reset HC pointers that need to be recalculated when loading the state
690 */
691 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
692 ("State = %x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
693 Assert(pPatch->patch.JumpTree == 0);
694 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
695 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
696
697 /* Save the patch record itself */
698 rc = SSMR3PutStructEx(pSSM, &patch, sizeof(patch), 0 /*fFlags*/, &g_aPatmPatchRecFields[0], NULL);
699 AssertRCReturn(rc, rc);
700
701 /*
702 * Reset HC pointers in fixup records and save them.
703 */
704#ifdef VBOX_STRICT
705 uint32_t nrFixupRecs = 0;
706 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
707 AssertMsg(nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
708#endif
709 rc = RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
710 AssertRCReturn(rc, rc);
711
712#ifdef VBOX_STRICT
713 uint32_t nrLookupRecords = 0;
714 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
715 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
716#endif
717
718 rc = RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
719 AssertRCReturn(rc, rc);
720
721 return VINF_SUCCESS;
722}
723
724/**
725 * Execute state save operation.
726 *
727 * @returns VBox status code.
728 * @param pVM Pointer to the VM.
729 * @param pSSM SSM operation handle.
730 */
731DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
732{
733 PATM patmInfo = pVM->patm.s;
734 int rc;
735
736 pVM->patm.s.savedstate.pSSM = pSSM;
737
738 /*
739 * Reset HC pointers that need to be recalculated when loading the state
740 */
741 patmInfo.pPatchMemHC = NULL;
742 patmInfo.pGCStateHC = 0;
743 patmInfo.pvFaultMonitor = 0;
744
745 Assert(patmInfo.ulCallDepth == 0);
746
747 /*
748 * Count the number of patches in the tree (feeling lazy)
749 */
750 patmInfo.savedstate.cPatches = 0;
751 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
752
753 /*
754 * Save PATM structure
755 */
756 rc = SSMR3PutStructEx(pSSM, &patmInfo, sizeof(patmInfo), 0 /*fFlags*/, &g_aPatmFields[0], NULL);
757 AssertRCReturn(rc, rc);
758
759 /*
760 * Save patch memory contents
761 */
762 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
763 AssertRCReturn(rc, rc);
764
765 /*
766 * Save GC state memory
767 */
768 rc = SSMR3PutStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), 0 /*fFlags*/, &g_aPatmGCStateFields[0], NULL);
769 AssertRCReturn(rc, rc);
770
771 /*
772 * Save PATM stack page
773 */
774 SSMR3PutU32(pSSM, PATM_STACK_TOTAL_SIZE);
775 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
776 AssertRCReturn(rc, rc);
777
778 /*
779 * Save all patches
780 */
781 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
782 AssertRCReturn(rc, rc);
783
784 /** @note patch statistics are not saved. */
785
786 return VINF_SUCCESS;
787}
788
789
790/**
791 * Execute state load operation.
792 *
793 * @returns VBox status code.
794 * @param pVM Pointer to the VM.
795 * @param pSSM SSM operation handle.
796 * @param uVersion Data layout version.
797 * @param uPass The data pass.
798 */
799DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
800{
801 PATM patmInfo;
802 int rc;
803
804 if ( uVersion != PATM_SAVED_STATE_VERSION
805 && uVersion != PATM_SAVED_STATE_VERSION_MEM
806 && uVersion != PATM_SAVED_STATE_VERSION_FIXUP_HACK
807 && uVersion != PATM_SAVED_STATE_VERSION_VER16
808 )
809 {
810 AssertMsgFailed(("patmR3Load: Invalid version uVersion=%d!\n", uVersion));
811 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
812 }
813 uint32_t const fStructRestoreFlags = uVersion <= PATM_SAVED_STATE_VERSION_MEM ? SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED : 0;
814 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
815
816 pVM->patm.s.savedstate.pSSM = pSSM;
817
818 /*
819 * Restore PATM structure
820 */
821 RT_ZERO(patmInfo);
822 if ( uVersion == PATM_SAVED_STATE_VERSION_MEM
823 && SSMR3HandleRevision(pSSM) >= 86139
824 && SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(4, 2, 51))
825 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED,
826 &g_aPatmFields86139[0], NULL);
827 else
828 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), fStructRestoreFlags, &g_aPatmFields[0], NULL);
829 AssertRCReturn(rc, rc);
830
831 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
832 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
833 AssertLogRelReturn((pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC),
834 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
835 AssertLogRelReturn((pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC),
836 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
837 AssertLogRelReturn((pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC),
838 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
839 AssertLogRelReturn((pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC),
840 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
841 AssertLogRelReturn(pVM->patm.s.cbPatchMem == patmInfo.cbPatchMem, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
842
843 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
844 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
845 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
846 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
847
848 /* Lowest and highest patched instruction */
849 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
850 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
851
852 /* Sysenter handlers */
853 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
854 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
855 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
856
857 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
858
859 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
860 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
861 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
862 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
863
864
865 /** @note patch statistics are not restored. */
866
867 /*
868 * Restore patch memory contents
869 */
870 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
871 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
872 AssertRCReturn(rc, rc);
873
874 /*
875 * Restore GC state memory
876 */
877 RT_BZERO(pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
878 rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), fStructRestoreFlags, &g_aPatmGCStateFields[0], NULL);
879 AssertRCReturn(rc, rc);
880
881 /*
882 * Restore PATM stack page
883 */
884 uint32_t cbStack = PATM_STACK_TOTAL_SIZE;
885 if (uVersion > PATM_SAVED_STATE_VERSION_MEM)
886 {
887 rc = SSMR3GetU32(pSSM, &cbStack);
888 AssertRCReturn(rc, rc);
889 }
890 AssertCompile(!(PATM_STACK_TOTAL_SIZE & 31));
891 AssertLogRelMsgReturn(cbStack > 0 && cbStack <= PATM_STACK_TOTAL_SIZE && !(cbStack & 31),
892 ("cbStack=%#x vs %#x", cbStack, PATM_STACK_TOTAL_SIZE),
893 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
894 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, cbStack);
895 AssertRCReturn(rc, rc);
896 if (cbStack < PATM_STACK_TOTAL_SIZE)
897 memset((uint8_t *)pVM->patm.s.pGCStackHC + cbStack, 0, PATM_STACK_TOTAL_SIZE - cbStack);
898
899 /*
900 * Load all patches
901 */
902 for (unsigned i = 0; i < patmInfo.savedstate.cPatches; i++)
903 {
904 PATMPATCHRECSSM patch;
905 PATMPATCHREC *pPatchRec;
906
907 RT_ZERO(patch);
908 rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), fStructRestoreFlags, &g_aPatmPatchRecFields[0], NULL);
909 AssertRCReturn(rc, rc);
910 Log4(("patmR3Load: cbPatchJump=%u uCurPathOffset=%#x pInstrGCLowest/Higest=%#x/%#x nrFixups=%#x nrJumpRecs=%#x\n",
911 patch.patch.cbPatchJump, patch.patch.uCurPatchOffset, patch.patch.pInstrGCLowest, patch.patch.pInstrGCHighest,
912 patch.patch.nrFixups, patch.patch.nrJumpRecs));
913
914 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
915
916 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
917 if (RT_FAILURE(rc))
918 {
919 AssertMsgFailed(("Out of memory!!!!\n"));
920 return VERR_NO_MEMORY;
921 }
922
923 /* Convert SSM version to memory. */
924 patmR3PatchConvertSSM2Mem(pPatchRec, &patch);
925
926 Log(("Restoring patch %RRv -> %RRv state %x\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset, pPatchRec->patch.uState));
927 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
928 Assert(ret);
929 if (pPatchRec->patch.uState != PATCH_REFUSED)
930 {
931 if (pPatchRec->patch.pPatchBlockOffset)
932 {
933 /* We actually generated code for this patch. */
934 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
935 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
936 }
937 }
938 /* Set to zero as we don't need it anymore. */
939 pPatchRec->patch.pTempInfo = 0;
940
941 PATMP2GLOOKUPREC cacheRec;
942 RT_ZERO(cacheRec);
943 cacheRec.pPatch = &pPatchRec->patch;
944
945 uint8_t *pPrivInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pPatchRec->patch.pPrivInstrGC);
946 /* Can fail due to page or page table not present. */
947
948 /*
949 * Restore fixup records and correct HC pointers in fixup records
950 */
951 pPatchRec->patch.FixupTree = 0;
952 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
953 for (unsigned j = 0; j < patch.patch.nrFixups; j++)
954 {
955 RELOCREC rec;
956 int32_t offset;
957 RTRCPTR *pFixup;
958
959 RT_ZERO(rec);
960 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), fStructRestoreFlags, &g_aPatmRelocRec[0], NULL);
961 AssertRCReturn(rc, rc);
962
963 if (pPrivInstrHC)
964 {
965 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
966 offset = (int32_t)(intptr_t)rec.pRelocPos;
967 /* Convert to HC pointer again. */
968 if ((uintptr_t)rec.pRelocPos < pVM->patm.s.cbPatchMem)
969 rec.pRelocPos = pVM->patm.s.pPatchMemHC + (uintptr_t)rec.pRelocPos;
970 else
971 rec.pRelocPos = NULL;
972 pFixup = (RTRCPTR *)rec.pRelocPos;
973
974 if (pPatchRec->patch.uState != PATCH_REFUSED)
975 {
976 if ( rec.uType == FIXUP_REL_JMPTOPATCH
977 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
978 {
979 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
980 unsigned offset2 = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
981
982 rec.pRelocPos = pPrivInstrHC + offset2;
983 pFixup = (RTRCPTR *)rec.pRelocPos;
984 }
985
986 rc = patmCorrectFixup(pVM, uVersion, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
987 AssertRCReturn(rc, rc);
988 }
989
990 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
991 AssertRCReturn(rc, rc);
992 }
993 }
994 /* Release previous lock if any. */
995 if (cacheRec.Lock.pvMap)
996 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
997
998 /* And all patch to guest lookup records */
999 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
1000
1001 pPatchRec->patch.Patch2GuestAddrTree = 0;
1002 pPatchRec->patch.Guest2PatchAddrTree = 0;
1003 if (pPatchRec->patch.nrPatch2GuestRecs)
1004 {
1005 RECPATCHTOGUEST rec;
1006 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
1007
1008 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
1009 for (uint32_t j=0;j<nrPatch2GuestRecs;j++)
1010 {
1011 RT_ZERO(rec);
1012 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), fStructRestoreFlags, &g_aPatmRecPatchToGuest[0], NULL);
1013 AssertRCReturn(rc, rc);
1014
1015 patmR3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
1016 }
1017 Assert(pPatchRec->patch.Patch2GuestAddrTree);
1018 }
1019
1020 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
1021 {
1022 /* Insert the guest page lookup records (for detection self-modifying code) */
1023 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
1024 AssertRCReturn(rc, rc);
1025 }
1026
1027#if 0 /* can fail def LOG_ENABLED */
1028 if ( pPatchRec->patch.uState != PATCH_REFUSED
1029 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
1030 {
1031 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
1032 Log(("Patch code ----------------------------------------------------------\n"));
1033 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
1034 Log(("Patch code ends -----------------------------------------------------\n"));
1035 MMR3HeapFree(pPatchRec->patch.pTempInfo);
1036 pPatchRec->patch.pTempInfo = NULL;
1037 }
1038#endif
1039 /* Remove the patch in case the gc mapping is not present. */
1040 if ( !pPrivInstrHC
1041 && pPatchRec->patch.uState == PATCH_ENABLED)
1042 {
1043 Log(("Remove patch %RGv due to failed HC address translation\n", pPatchRec->patch.pPrivInstrGC));
1044 PATMR3RemovePatch(pVM, pPatchRec->patch.pPrivInstrGC);
1045 }
1046 }
1047
1048 /*
1049 * Correct absolute fixups in the global patch. (helper functions)
1050 * Bit of a mess. Uses the new patch record, but restored patch functions.
1051 */
1052 PRELOCREC pRec = 0;
1053 AVLPVKEY key = 0;
1054
1055 Log(("Correct fixups in global helper functions\n"));
1056 while (true)
1057 {
1058 int32_t offset;
1059 RTRCPTR *pFixup;
1060
1061 /* Get the record that's closest from above */
1062 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
1063 if (pRec == 0)
1064 break;
1065
1066 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
1067
1068 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
1069 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
1070 pFixup = (RTRCPTR *)pRec->pRelocPos;
1071
1072 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
1073 rc = patmCorrectFixup(pVM, uVersion, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
1074 AssertRCReturn(rc, rc);
1075 }
1076
1077#ifdef VBOX_WITH_STATISTICS
1078 /*
1079 * Restore relevant old statistics
1080 */
1081 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
1082 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
1083 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
1084 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
1085#endif
1086
1087 return VINF_SUCCESS;
1088}
1089
1090/**
1091 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
1092 *
1093 * @returns VBox status code.
1094 * @param pVM Pointer to the VM.
1095 * @param uVersion Saved state version.
1096 * @param patmInfo Saved PATM structure
1097 * @param pPatch Patch record
1098 * @param pRec Relocation record
1099 * @param offset Offset of referenced data/code
1100 * @param pFixup Fixup address
1101 */
1102static int patmCorrectFixup(PVM pVM, unsigned uVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec,
1103 int32_t offset, RTRCPTR *pFixup)
1104{
1105 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
1106
1107 switch (pRec->uType)
1108 {
1109 case FIXUP_ABSOLUTE:
1110 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
1111 {
1112 Assert( pRec->uType != PATM_SAVED_STATE_VERSION_NO_RAW_MEM
1113 || (pRec->pSource == pRec->pDest && PATM_IS_ASMFIX(pRec->pSource)) );
1114
1115 /* bird: What is this for exactly? Only the MMIO fixups used to have pSource set. */
1116 if ( pRec->pSource
1117 && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource)
1118 && pRec->uType != FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL)
1119 break;
1120
1121 RTRCPTR const uFixup = *pFixup;
1122 if ( uFixup >= patmInfo.pGCStateGC
1123 && uFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
1124 {
1125 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
1126 *pFixup = (uFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
1127 }
1128 else if ( uFixup >= patmInfo.pCPUMCtxGC
1129 && uFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
1130 {
1131 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
1132
1133 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
1134 if (uVersion == PATM_SAVED_STATE_VERSION_VER16)
1135 {
1136 unsigned offCpumCtx = uFixup - patmInfo.pCPUMCtxGC;
1137
1138 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
1139 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
1140 * function is not available in older gcc versions, at least not in gcc-3.3 */
1141 if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
1142 {
1143 LogFlow(("Changing dr[0] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[0])));
1144 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
1145 }
1146 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
1147 {
1148 LogFlow(("Changing dr[1] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[1])));
1149 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
1150 }
1151 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
1152 {
1153 LogFlow(("Changing dr[2] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[2])));
1154 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
1155 }
1156 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
1157 {
1158 LogFlow(("Changing dr[3] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[3])));
1159 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
1160 }
1161 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
1162 {
1163 LogFlow(("Changing dr[4] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[4])));
1164 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
1165 }
1166 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
1167 {
1168 LogFlow(("Changing dr[5] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[5])));
1169 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
1170 }
1171 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
1172 {
1173 LogFlow(("Changing dr[6] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[6])));
1174 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
1175 }
1176 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
1177 {
1178 LogFlow(("Changing dr[7] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[7])));
1179 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
1180 }
1181 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
1182 {
1183 LogFlow(("Changing cr0 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr0)));
1184 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
1185 }
1186 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
1187 {
1188 LogFlow(("Changing cr2 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr2)));
1189 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
1190 }
1191 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
1192 {
1193 LogFlow(("Changing cr3 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr3)));
1194 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
1195 }
1196 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
1197 {
1198 LogFlow(("Changing cr4 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr4)));
1199 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
1200 }
1201 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
1202 {
1203 LogFlow(("Changing tr offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, tr)));
1204 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1205 }
1206 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
1207 {
1208 LogFlow(("Changing ldtr offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, ldtr)));
1209 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1210 }
1211 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
1212 {
1213 LogFlow(("Changing pGdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
1214 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1215 }
1216 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
1217 {
1218 LogFlow(("Changing cbGdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
1219 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1220 }
1221 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
1222 {
1223 LogFlow(("Changing pIdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
1224 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1225 }
1226 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
1227 {
1228 LogFlow(("Changing cbIdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
1229 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1230 }
1231 else
1232 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", offCpumCtx));
1233 }
1234 else
1235 *pFixup = (uFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
1236 }
1237 else if ( uFixup >= patmInfo.pStatsGC
1238 && uFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
1239 {
1240 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
1241 *pFixup = (uFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
1242 }
1243 else if ( uFixup >= patmInfo.pGCStackGC
1244 && uFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
1245 {
1246 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
1247 *pFixup = (uFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
1248 }
1249 else if ( uFixup >= patmInfo.pPatchMemGC
1250 && uFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
1251 {
1252 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
1253 *pFixup = (uFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
1254 }
1255 /*
1256 * For PATM_SAVED_STATE_VERSION_FIXUP_HACK and earlier boldly ASSUME:
1257 * 1. That pCPUMCtxGC is in the VM structure and that its location is
1258 * at the first page of the same 4 MB chunk.
1259 * 2. That the forced actions were in the first 32 bytes of the VM
1260 * structure.
1261 * 3. That the CPUM leaves are less than 8KB into the structure.
1262 */
1263 else if ( uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK
1264 && uFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(32))
1265 {
1266 LogFlow(("Changing fLocalForcedActions fixup from %RRv to %RRv\n", uFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
1267 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1268 pRec->pSource = pRec->pDest = PATM_ASMFIX_VM_FORCEDACTIONS;
1269 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1270 }
1271 else if ( uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK
1272 && uFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(8192))
1273 {
1274 static int cCpuidFixup = 0;
1275
1276 /* Very dirty assumptions about the cpuid patch and cpuid ordering. */
1277 switch (cCpuidFixup & 3)
1278 {
1279 case 0:
1280 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
1281 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_DEF_PTR;
1282 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1283 break;
1284 case 1:
1285 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
1286 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_STD_PTR;
1287 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1288 break;
1289 case 2:
1290 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
1291 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_EXT_PTR;
1292 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1293 break;
1294 case 3:
1295 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
1296 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_CENTAUR_PTR;
1297 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1298 break;
1299 }
1300 LogFlow(("Changing cpuid fixup %d from %RRv to %RRv\n", cCpuidFixup, uFixup, *pFixup));
1301 cCpuidFixup++;
1302 }
1303 /*
1304 * For PATM_SAVED_STATE_VERSION_MEM thru PATM_SAVED_STATE_VERSION_NO_RAW_MEM
1305 * we abused Core.Key to store the type for fixups needing correcting on load.
1306 */
1307 else if ( uVersion >= PATM_SAVED_STATE_VERSION_MEM
1308 && uVersion <= PATM_SAVED_STATE_VERSION_NO_RAW_MEM)
1309 {
1310 /* Core.Key abused to store the type of fixup. */
1311 switch ((uintptr_t)pRec->Core.Key)
1312 {
1313 case PATM_FIXUP_CPU_FF_ACTION:
1314 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1315 pRec->pSource = pRec->pDest = PATM_ASMFIX_VM_FORCEDACTIONS;
1316 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1317 LogFlow(("Changing cpu ff action fixup from %x to %x\n", uFixup, *pFixup));
1318 break;
1319 case PATM_FIXUP_CPUID_DEFAULT:
1320 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
1321 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_DEF_PTR;
1322 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1323 LogFlow(("Changing cpuid def fixup from %x to %x\n", uFixup, *pFixup));
1324 break;
1325 case PATM_FIXUP_CPUID_STANDARD:
1326 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
1327 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_STD_PTR;
1328 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1329 LogFlow(("Changing cpuid std fixup from %x to %x\n", uFixup, *pFixup));
1330 break;
1331 case PATM_FIXUP_CPUID_EXTENDED:
1332 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
1333 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_EXT_PTR;
1334 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1335 LogFlow(("Changing cpuid ext fixup from %x to %x\n", uFixup, *pFixup));
1336 break;
1337 case PATM_FIXUP_CPUID_CENTAUR:
1338 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
1339 pRec->pSource = pRec->pDest = PATM_ASMFIX_CPUID_CENTAUR_PTR;
1340 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1341 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", uFixup, *pFixup));
1342 break;
1343 default:
1344 AssertMsgFailed(("Unexpected fixup value %p\n", (uintptr_t)pRec->Core.Key));
1345 break;
1346 }
1347 }
1348 /*
1349 * After PATM_SAVED_STATE_VERSION_NO_RAW_MEM we changed the fixup type
1350 * and instead put the patch fixup code in the source and target addresses.
1351 */
1352 else if ( uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM
1353 && pRec->uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL)
1354 {
1355 Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_ASMFIX(pRec->pSource));
1356 switch (pRec->pSource)
1357 {
1358 case PATM_ASMFIX_VM_FORCEDACTIONS:
1359 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1360 break;
1361 case PATM_ASMFIX_CPUID_DEF_PTR:
1362 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
1363 break;
1364 case PATM_ASMFIX_CPUID_STD_PTR: /* Saved again patches only. */
1365 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
1366 break;
1367 case PATM_ASMFIX_CPUID_EXT_PTR: /* Saved again patches only. */
1368 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
1369 break;
1370 case PATM_ASMFIX_CPUID_CENTAUR_PTR: /* Saved again patches only. */
1371 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
1372 break;
1373 case PATM_ASMFIX_REUSE_LATER_0: /* Was only used for a few days. Don't want to keep this legacy around. */
1374 case PATM_ASMFIX_REUSE_LATER_1:
1375 AssertLogRelMsgFailedReturn(("Unsupported PATM fixup. You have to discard this saved state or snapshot."),
1376 VERR_INTERNAL_ERROR);
1377 break;
1378 }
1379 }
1380 /*
1381 * Constant that may change between VM version needs fixing up.
1382 */
1383 else if (pRec->uType == FIXUP_CONSTANT_IN_PATCH_ASM_TMPL)
1384 {
1385 AssertLogRelReturn(uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1386 Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_ASMFIX(pRec->pSource));
1387 switch (pRec->pSource)
1388 {
1389 case PATM_ASMFIX_REUSE_LATER_2: /* Was only used for a few days. Don't want to keep this legacy around. */
1390 case PATM_ASMFIX_REUSE_LATER_3:
1391 AssertLogRelMsgFailedReturn(("Unsupported PATM fixup. You have to discard this saved state or snapshot."),
1392 VERR_INTERNAL_ERROR);
1393 break;
1394 default:
1395 AssertLogRelMsgFailed(("Unknown FIXUP_CONSTANT_IN_PATCH_ASM_TMPL fixup: %#x\n", pRec->pSource));
1396 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1397 }
1398 }
1399 /*
1400 * Relative fixups for calling or jumping to helper functions inside VMMRC.
1401 * (The distance between the helper function and the patch is subject to
1402 * new code being added to VMMRC as well as VM configurations influencing
1403 * heap allocations and so on and so forth.)
1404 */
1405 else if (pRec->uType == FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL)
1406 {
1407 AssertLogRelReturn(uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1408 Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_ASMFIX(pRec->pSource));
1409 int rc;
1410 RTRCPTR uRCPtrDest;
1411 switch (pRec->pSource)
1412 {
1413 case PATM_ASMFIX_HELPER_CPUM_CPUID:
1414 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "CPUMPatchHlpCpuId", &uRCPtrDest);
1415 AssertLogRelRCReturn(rc, rc);
1416 break;
1417 default:
1418 AssertLogRelMsgFailed(("Unknown FIXUP_REL_HLP_CALL_IN_PATCH_ASM_TMPL fixup: %#x\n", pRec->pSource));
1419 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1420 }
1421 RTRCPTR uRCPtrAfter = pVM->patm.s.pPatchMemGC + ((uintptr_t)&pFixup[1] - (uintptr_t)pVM->patm.s.pPatchMemHC);
1422 *pFixup = uRCPtrDest - uRCPtrAfter;
1423 }
1424
1425#ifdef RT_OS_WINDOWS
1426 AssertCompile(RT_OFFSETOF(VM, fGlobalForcedActions) < 32);
1427#endif
1428 break;
1429 }
1430
1431 case FIXUP_REL_JMPTOPATCH:
1432 {
1433 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
1434
1435 if ( pPatch->uState == PATCH_ENABLED
1436 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
1437 {
1438 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
1439 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
1440 RTRCPTR pJumpOffGC;
1441 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
1442 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
1443
1444 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1445
1446 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
1447#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
1448 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
1449 {
1450 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
1451
1452 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
1453 oldJump[0] = pPatch->aPrivInstr[0];
1454 oldJump[1] = pPatch->aPrivInstr[1];
1455 *(RTRCUINTPTR *)&oldJump[2] = displOld;
1456 }
1457 else
1458#endif
1459 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
1460 {
1461 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
1462 oldJump[0] = 0xE9;
1463 *(RTRCUINTPTR *)&oldJump[1] = displOld;
1464 }
1465 else
1466 {
1467 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
1468 break;
1469 }
1470 Assert(pPatch->cbPatchJump <= sizeof(temp));
1471
1472 /*
1473 * Read old patch jump and compare it to the one we previously installed
1474 */
1475 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
1476 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1477
1478 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1479 {
1480 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
1481
1482 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
1483 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
1484 }
1485 else
1486 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
1487 {
1488 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
1489 /*
1490 * Disable patch; this is not a good solution
1491 */
1492 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
1493 pPatch->uState = PATCH_DISABLED;
1494 }
1495 else
1496 if (RT_SUCCESS(rc))
1497 {
1498 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
1499 AssertRC(rc);
1500 }
1501 else
1502 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
1503 }
1504 else
1505 Log(("Skip the guest jump to patch code for this disabled patch %08X\n", pRec->pRelocPos));
1506
1507 pRec->pDest = pTarget;
1508 break;
1509 }
1510
1511 case FIXUP_REL_JMPTOGUEST:
1512 {
1513 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1514 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1515
1516 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1517 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1518 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1519 pRec->pSource = pSource;
1520 break;
1521
1522 }
1523}
1524 return VINF_SUCCESS;
1525}
1526
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette