VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp@ 49799

Last change on this file since 49799 was 49799, checked in by vboxsync, 11 years ago

Missed the else in the FIXUP_ABSOLUTE case.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 65.2 KB
Line 
1/* $Id: PATMSSM.cpp 49799 2013-12-05 23:41:15Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2013 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/cpumctx-v1_6.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/ssm.h>
29#include <VBox/param.h>
30#include <iprt/avl.h>
31#include "PATMInternal.h"
32#include "PATMPatch.h"
33#include "PATMA.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/vmm/csam.h>
36#include "internal/pgm.h"
37#include <VBox/dbg.h>
38#include <VBox/err.h>
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <VBox/dis.h>
44#include <VBox/disopcode.h>
45#include <VBox/version.h>
46
47/**
48 * Patch information - SSM version.
49 *
50 * the difference is the missing pTrampolinePatchesHead member
51 * to avoid changing the saved state version for now (will come later).
52 */
53typedef struct PATCHINFOSSM
54{
55 uint32_t uState;
56 uint32_t uOldState;
57 DISCPUMODE uOpMode;
58
59 /* GC pointer of privileged instruction */
60 RCPTRTYPE(uint8_t *) pPrivInstrGC;
61 R3PTRTYPE(uint8_t *) unusedHC; /* todo Can't remove due to structure size dependencies in saved states. */
62 uint8_t aPrivInstr[MAX_INSTR_SIZE];
63 uint32_t cbPrivInstr;
64 uint32_t opcode; //opcode for priv instr (OP_*)
65 uint32_t cbPatchJump; //patch jump size
66
67 /* Only valid for PATMFL_JUMP_CONFLICT patches */
68 RTRCPTR pPatchJumpDestGC;
69
70 RTGCUINTPTR32 pPatchBlockOffset;
71 uint32_t cbPatchBlockSize;
72 uint32_t uCurPatchOffset;
73#if HC_ARCH_BITS == 64
74 uint32_t Alignment0; /**< Align flags correctly. */
75#endif
76
77 uint64_t flags;
78
79 /**
80 * Lowest and highest patched GC instruction address. To optimize searches.
81 */
82 RTRCPTR pInstrGCLowest;
83 RTRCPTR pInstrGCHighest;
84
85 /* Tree of fixup records for the patch. */
86 R3PTRTYPE(PAVLPVNODECORE) FixupTree;
87 uint32_t nrFixups;
88
89 /* Tree of jumps inside the generated patch code. */
90 uint32_t nrJumpRecs;
91 R3PTRTYPE(PAVLPVNODECORE) JumpTree;
92
93 /**
94 * Lookup trees for determining the corresponding guest address of an
95 * instruction in the patch block.
96 */
97 R3PTRTYPE(PAVLU32NODECORE) Patch2GuestAddrTree;
98 R3PTRTYPE(PAVLU32NODECORE) Guest2PatchAddrTree;
99 uint32_t nrPatch2GuestRecs;
100#if HC_ARCH_BITS == 64
101 uint32_t Alignment1;
102#endif
103
104 /* Unused, but can't remove due to structure size dependencies in the saved state. */
105 PATMP2GLOOKUPREC_OBSOLETE unused;
106
107 /* Temporary information during patch creation. Don't waste hypervisor memory for this. */
108 R3PTRTYPE(PPATCHINFOTEMP) pTempInfo;
109
110 /* Count the number of writes to the corresponding guest code. */
111 uint32_t cCodeWrites;
112
113 /* Count the number of invalid writes to pages monitored for the patch. */
114 //some statistics to determine if we should keep this patch activated
115 uint32_t cTraps;
116
117 uint32_t cInvalidWrites;
118
119 // Index into the uPatchRun and uPatchTrap arrays (0..MAX_PATCHES-1)
120 uint32_t uPatchIdx;
121
122 /* First opcode byte, that's overwritten when a patch is marked dirty. */
123 uint8_t bDirtyOpcode;
124 uint8_t Alignment2[7]; /**< Align the structure size on a 8-byte boundary. */
125} PATCHINFOSSM, *PPATCHINFOSSM;
126
127/**
128 * Lookup record for patches - SSM version.
129 */
130typedef struct PATMPATCHRECSSM
131{
132 /** The key is a GC virtual address. */
133 AVLOU32NODECORE Core;
134 /** The key is a patch offset. */
135 AVLOU32NODECORE CoreOffset;
136
137 PATCHINFOSSM patch;
138} PATMPATCHRECSSM, *PPATMPATCHRECSSM;
139
140/*******************************************************************************
141* Defined Constants And Macros *
142*******************************************************************************/
143#define PATM_SUBTRACT_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) - (uintptr_t)(b)
144#define PATM_ADD_PTR(a, b) *(uintptr_t *)&(a) = (uintptr_t)(a) + (uintptr_t)(b)
145
146/*******************************************************************************
147* Internal Functions *
148*******************************************************************************/
149static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup);
150
151/*******************************************************************************
152* Global Variables *
153*******************************************************************************/
154/**
155 * SSM descriptor table for the PATM structure.
156 */
157static SSMFIELD const g_aPatmFields[] =
158{
159 /** @todo there are a bunch more fields here which can be marked as ignored. */
160 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
161 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
162 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
163 SSMFIELD_ENTRY( PATM, cbPatchMem),
164 SSMFIELD_ENTRY( PATM, offPatchMem),
165 SSMFIELD_ENTRY( PATM, fOutOfMemory),
166 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
167 SSMFIELD_ENTRY( PATM, deltaReloc),
168 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
169 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
170 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
171 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
172 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
173 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
174 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
175 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
176 SSMFIELD_ENTRY( PATM, ulCallDepth),
177 SSMFIELD_ENTRY( PATM, cPageRecords),
178 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
179 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
180 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
181 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
182 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
183 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
184 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
185 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
186 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
187 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
188 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
189 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
190 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
191 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
192 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
193 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
194 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
195 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
196 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
197 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
198 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
199 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
200 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
201 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
202 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
203 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
204 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
205 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
206 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
207 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
208 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
209 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
210 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
211 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
212 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
213 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
214 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
215 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
216 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
217 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
218 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
219 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
220 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
221 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
222 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
223 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
224 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
225 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
226 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
227 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
228 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
229 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
230 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
231 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
232 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
233 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
234 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
235 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
236 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
237 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
238 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
239 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
240 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
241 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
242 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
243 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
244 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
245 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
246 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
247 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
248 SSMFIELD_ENTRY_TERM()
249};
250
251/**
252 * SSM descriptor table for the PATM structure starting with r86139.
253 */
254static SSMFIELD const g_aPatmFields86139[] =
255{
256 /** @todo there are a bunch more fields here which can be marked as ignored. */
257 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
258 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
259 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
260 SSMFIELD_ENTRY( PATM, cbPatchMem),
261 SSMFIELD_ENTRY( PATM, offPatchMem),
262 SSMFIELD_ENTRY( PATM, fOutOfMemory),
263 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
264 SSMFIELD_ENTRY( PATM, deltaReloc),
265 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
266 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
267 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
268 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
269 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
270 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
271 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
272 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
273 SSMFIELD_ENTRY( PATM, ulCallDepth),
274 SSMFIELD_ENTRY( PATM, cPageRecords),
275 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
276 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
277 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
278 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
279 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
280 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
281 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
282 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
283 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
284 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
285 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
286 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
287 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
288 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
289 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
290 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
291 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
292 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
293 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
294 SSMFIELD_ENTRY_IGN_HCPTR( PATM, hDbgModPatchMem),
295 SSMFIELD_ENTRY_PAD_HC32( PATM, Alignment0, sizeof(uint32_t)),
296 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
297 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
298 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
299 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
300 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
301 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
302 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
303 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
304 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
305 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
306 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
307 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
308 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
309 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
310 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
311 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
312 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
313 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
314 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
315 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
316 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
317 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
318 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
319 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
320 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
321 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
322 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
323 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
324 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
325 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
326 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
327 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
328 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
329 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
330 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
331 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
332 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
333 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
334 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
335 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
336 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
337 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
338 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
339 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
340 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
341 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
342 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
343 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
344 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
345 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
346 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
347 SSMFIELD_ENTRY_TERM()
348};
349
350/**
351 * SSM descriptor table for the PATMGCSTATE structure.
352 */
353static SSMFIELD const g_aPatmGCStateFields[] =
354{
355 SSMFIELD_ENTRY( PATMGCSTATE, uVMFlags),
356 SSMFIELD_ENTRY( PATMGCSTATE, uPendingAction),
357 SSMFIELD_ENTRY( PATMGCSTATE, uPatchCalls),
358 SSMFIELD_ENTRY( PATMGCSTATE, uScratch),
359 SSMFIELD_ENTRY( PATMGCSTATE, uIretEFlags),
360 SSMFIELD_ENTRY( PATMGCSTATE, uIretCS),
361 SSMFIELD_ENTRY( PATMGCSTATE, uIretEIP),
362 SSMFIELD_ENTRY( PATMGCSTATE, Psp),
363 SSMFIELD_ENTRY( PATMGCSTATE, fPIF),
364 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCPtrInhibitInterrupts),
365 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallPatchTargetAddr),
366 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallReturnAddr),
367 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEAX),
368 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uECX),
369 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEDI),
370 SSMFIELD_ENTRY( PATMGCSTATE, Restore.eFlags),
371 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uFlags),
372 SSMFIELD_ENTRY_TERM()
373};
374
375/**
376 * SSM descriptor table for the PATMPATCHREC structure.
377 */
378static SSMFIELD const g_aPatmPatchRecFields[] =
379{
380 SSMFIELD_ENTRY( PATMPATCHRECSSM, Core.Key),
381 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pLeft),
382 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pRight),
383 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.uchHeight),
384 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
385 SSMFIELD_ENTRY( PATMPATCHRECSSM, CoreOffset.Key),
386 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pLeft),
387 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pRight),
388 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.uchHeight),
389 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
390 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uState),
391 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOldState),
392 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOpMode),
393 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPrivInstrGC),
394 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unusedHC),
395 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.aPrivInstr),
396 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPrivInstr),
397 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.opcode),
398 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchJump),
399 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPatchJumpDestGC),
400 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.pPatchBlockOffset),
401 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchBlockSize),
402 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uCurPatchOffset),
403 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment0, sizeof(uint32_t)),
404 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.flags),
405 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCLowest),
406 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCHighest),
407 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.FixupTree),
408 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrFixups),
409 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrJumpRecs), // should be zero?
410 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.JumpTree),
411 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Patch2GuestAddrTree),
412 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Guest2PatchAddrTree),
413 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrPatch2GuestRecs),
414 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment1, sizeof(uint32_t)),
415 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocStartHC), // saved as zero
416 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocEndHC), // ditto
417 SSMFIELD_ENTRY_IGN_RCPTR( PATMPATCHRECSSM, patch.unused.pGuestLoc), // ditto
418 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.unused.opsize), // ditto
419 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.pTempInfo),
420 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cCodeWrites),
421 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cTraps),
422 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cInvalidWrites),
423 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uPatchIdx),
424 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.bDirtyOpcode),
425 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.Alignment2),
426 SSMFIELD_ENTRY_TERM()
427};
428
429/**
430 * SSM descriptor table for the RELOCREC structure.
431 */
432static SSMFIELD const g_aPatmRelocRec[] =
433{
434 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, Core.Key), // Used to store the relocation type
435 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pLeft),
436 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pRight),
437 SSMFIELD_ENTRY_IGNORE( RELOCREC, Core.uchHeight),
438 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
439 SSMFIELD_ENTRY( RELOCREC, uType),
440 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
441 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, pRelocPos), // converted to a patch member offset.
442 SSMFIELD_ENTRY_RCPTR( RELOCREC, pSource),
443 SSMFIELD_ENTRY_RCPTR( RELOCREC, pDest),
444 SSMFIELD_ENTRY_TERM()
445};
446
447/**
448 * SSM descriptor table for the RECPATCHTOGUEST structure.
449 */
450static SSMFIELD const g_aPatmRecPatchToGuest[] =
451{
452 SSMFIELD_ENTRY( RECPATCHTOGUEST, Core.Key),
453 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
454 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pLeft),
455 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pRight),
456 SSMFIELD_ENTRY_IGNORE( RECPATCHTOGUEST, Core.uchHeight),
457 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
458 SSMFIELD_ENTRY_RCPTR( RECPATCHTOGUEST, pOrgInstrGC),
459 SSMFIELD_ENTRY( RECPATCHTOGUEST, enmType),
460 SSMFIELD_ENTRY( RECPATCHTOGUEST, fDirty),
461 SSMFIELD_ENTRY( RECPATCHTOGUEST, fJumpTarget),
462 SSMFIELD_ENTRY( RECPATCHTOGUEST, u8DirtyOpcode),
463 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 5),
464 SSMFIELD_ENTRY_TERM()
465};
466
467#ifdef VBOX_STRICT
468
469/**
470 * Callback function for RTAvlPVDoWithAll
471 *
472 * Counts the number of patches in the tree
473 *
474 * @returns VBox status code.
475 * @param pNode Current node
476 * @param pcPatches Pointer to patch counter (uint32_t)
477 */
478static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
479{
480 NOREF(pNode);
481 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
482 return VINF_SUCCESS;
483}
484
485/**
486 * Callback function for RTAvlU32DoWithAll
487 *
488 * Counts the number of patches in the tree
489 *
490 * @returns VBox status code.
491 * @param pNode Current node
492 * @param pcPatches Pointer to patch counter (uint32_t)
493 */
494static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
495{
496 NOREF(pNode);
497 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
498 return VINF_SUCCESS;
499}
500
501#endif /* VBOX_STRICT */
502
503/**
504 * Callback function for RTAvloU32DoWithAll
505 *
506 * Counts the number of patches in the tree
507 *
508 * @returns VBox status code.
509 * @param pNode Current node
510 * @param pcPatches Pointer to patch counter
511 */
512static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
513{
514 NOREF(pNode);
515 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
516 return VINF_SUCCESS;
517}
518
519/**
520 * Callback function for RTAvlU32DoWithAll
521 *
522 * Saves all patch to guest lookup records.
523 *
524 * @returns VBox status code.
525 * @param pNode Current node
526 * @param pVM1 Pointer to the VM
527 */
528static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
529{
530 PVM pVM = (PVM)pVM1;
531 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
532 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
533
534 /* Save the lookup record. */
535 int rc = SSMR3PutStructEx(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST), 0 /*fFlags*/, &g_aPatmRecPatchToGuest[0], NULL);
536 AssertRCReturn(rc, rc);
537
538 return VINF_SUCCESS;
539}
540
541/**
542 * Callback function for RTAvlPVDoWithAll
543 *
544 * Saves all patch to guest lookup records.
545 *
546 * @returns VBox status code.
547 * @param pNode Current node
548 * @param pVM1 Pointer to the VM
549 */
550static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
551{
552 PVM pVM = (PVM)pVM1;
553 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
554 RELOCREC rec = *(PRELOCREC)pNode;
555 RTRCPTR *pFixup = (RTRCPTR *)rec.pRelocPos;
556
557 Assert(rec.pRelocPos);
558 /* Convert pointer to an offset into patch memory. */
559 PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
560
561 /* Zero rec.Core.Key since it's unused and may trigger SSM check due to the hack below. */
562 rec.Core.Key = 0;
563
564 if (rec.uType == FIXUP_ABSOLUTE)
565 {
566 /* Core.Key abused to store the fixup type. */
567 if (*pFixup == pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))
568 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPU_FF_ACTION;
569 else
570 if (*pFixup == CPUMR3GetGuestCpuIdDefRCPtr(pVM))
571 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_DEFAULT;
572 else
573 if (*pFixup == CPUMR3GetGuestCpuIdStdRCPtr(pVM))
574 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_STANDARD;
575 else
576 if (*pFixup == CPUMR3GetGuestCpuIdExtRCPtr(pVM))
577 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_EXTENDED;
578 else
579 if (*pFixup == CPUMR3GetGuestCpuIdCentaurRCPtr(pVM))
580 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_CENTAUR;
581 }
582
583 /* Save the lookup record. */
584 int rc = SSMR3PutStructEx(pSSM, &rec, sizeof(rec), 0 /*fFlags*/, &g_aPatmRelocRec[0], NULL);
585 AssertRCReturn(rc, rc);
586
587 return VINF_SUCCESS;
588}
589
590/**
591 * Converts a saved state patch record to the memory record.
592 *
593 * @returns nothing.
594 * @param pPatch The memory record.
595 * @param pPatchSSM The SSM version of the patch record.
596 */
597static void patmR3PatchConvertSSM2Mem(PPATMPATCHREC pPatch, PPATMPATCHRECSSM pPatchSSM)
598{
599 /*
600 * Only restore the patch part of the tree record; not the internal data (except the key of course)
601 */
602 pPatch->Core.Key = pPatchSSM->Core.Key;
603 pPatch->CoreOffset.Key = pPatchSSM->CoreOffset.Key;
604 pPatch->patch.uState = pPatchSSM->patch.uState;
605 pPatch->patch.uOldState = pPatchSSM->patch.uOldState;
606 pPatch->patch.uOpMode = pPatchSSM->patch.uOpMode;
607 pPatch->patch.pPrivInstrGC = pPatchSSM->patch.pPrivInstrGC;
608 pPatch->patch.unusedHC = pPatchSSM->patch.unusedHC;
609 memcpy(&pPatch->patch.aPrivInstr[0], &pPatchSSM->patch.aPrivInstr[0], MAX_INSTR_SIZE);
610 pPatch->patch.cbPrivInstr = pPatchSSM->patch.cbPrivInstr;
611 pPatch->patch.opcode = pPatchSSM->patch.opcode;
612 pPatch->patch.cbPatchJump = pPatchSSM->patch.cbPatchJump;
613 pPatch->patch.pPatchJumpDestGC = pPatchSSM->patch.pPatchJumpDestGC;
614 pPatch->patch.pPatchBlockOffset = pPatchSSM->patch.pPatchBlockOffset;
615 pPatch->patch.cbPatchBlockSize = pPatchSSM->patch.cbPatchBlockSize;
616 pPatch->patch.uCurPatchOffset = pPatchSSM->patch.uCurPatchOffset;
617 pPatch->patch.flags = pPatchSSM->patch.flags;
618 pPatch->patch.pInstrGCLowest = pPatchSSM->patch.pInstrGCLowest;
619 pPatch->patch.pInstrGCHighest = pPatchSSM->patch.pInstrGCHighest;
620 pPatch->patch.FixupTree = pPatchSSM->patch.FixupTree;
621 pPatch->patch.nrFixups = pPatchSSM->patch.nrFixups;
622 pPatch->patch.nrJumpRecs = pPatchSSM->patch.nrJumpRecs;
623 pPatch->patch.JumpTree = pPatchSSM->patch.JumpTree;
624 pPatch->patch.Patch2GuestAddrTree = pPatchSSM->patch.Patch2GuestAddrTree;
625 pPatch->patch.Guest2PatchAddrTree = pPatchSSM->patch.Guest2PatchAddrTree;
626 pPatch->patch.nrPatch2GuestRecs = pPatchSSM->patch.nrPatch2GuestRecs;
627 pPatch->patch.unused = pPatchSSM->patch.unused;
628 pPatch->patch.pTempInfo = pPatchSSM->patch.pTempInfo;
629 pPatch->patch.cCodeWrites = pPatchSSM->patch.cCodeWrites;
630 pPatch->patch.cTraps = pPatchSSM->patch.cTraps;
631 pPatch->patch.cInvalidWrites = pPatchSSM->patch.cInvalidWrites;
632 pPatch->patch.uPatchIdx = pPatchSSM->patch.uPatchIdx;
633 pPatch->patch.bDirtyOpcode = pPatchSSM->patch.bDirtyOpcode;
634 pPatch->patch.pTrampolinePatchesHead = NULL;
635}
636
637/**
638 * Converts a memory patch record to the saved state version.
639 *
640 * @returns nothing.
641 * @param pPatchSSM The saved state record.
642 * @param pPatch The memory version to save.
643 */
644static void patmR3PatchConvertMem2SSM(PPATMPATCHRECSSM pPatchSSM, PPATMPATCHREC pPatch)
645{
646 pPatchSSM->Core = pPatch->Core;
647 pPatchSSM->CoreOffset = pPatch->CoreOffset;
648 pPatchSSM->patch.uState = pPatch->patch.uState;
649 pPatchSSM->patch.uOldState = pPatch->patch.uOldState;
650 pPatchSSM->patch.uOpMode = pPatch->patch.uOpMode;
651 pPatchSSM->patch.pPrivInstrGC = pPatch->patch.pPrivInstrGC;
652 pPatchSSM->patch.unusedHC = pPatch->patch.unusedHC;
653 memcpy(&pPatchSSM->patch.aPrivInstr[0], &pPatch->patch.aPrivInstr[0], MAX_INSTR_SIZE);
654 pPatchSSM->patch.cbPrivInstr = pPatch->patch.cbPrivInstr;
655 pPatchSSM->patch.opcode = pPatch->patch.opcode;
656 pPatchSSM->patch.cbPatchJump = pPatch->patch.cbPatchJump;
657 pPatchSSM->patch.pPatchJumpDestGC = pPatch->patch.pPatchJumpDestGC;
658 pPatchSSM->patch.pPatchBlockOffset = pPatch->patch.pPatchBlockOffset;
659 pPatchSSM->patch.cbPatchBlockSize = pPatch->patch.cbPatchBlockSize;
660 pPatchSSM->patch.uCurPatchOffset = pPatch->patch.uCurPatchOffset;
661 pPatchSSM->patch.flags = pPatch->patch.flags;
662 pPatchSSM->patch.pInstrGCLowest = pPatch->patch.pInstrGCLowest;
663 pPatchSSM->patch.pInstrGCHighest = pPatch->patch.pInstrGCHighest;
664 pPatchSSM->patch.FixupTree = pPatch->patch.FixupTree;
665 pPatchSSM->patch.nrFixups = pPatch->patch.nrFixups;
666 pPatchSSM->patch.nrJumpRecs = pPatch->patch.nrJumpRecs;
667 pPatchSSM->patch.JumpTree = pPatch->patch.JumpTree;
668 pPatchSSM->patch.Patch2GuestAddrTree = pPatch->patch.Patch2GuestAddrTree;
669 pPatchSSM->patch.Guest2PatchAddrTree = pPatch->patch.Guest2PatchAddrTree;
670 pPatchSSM->patch.nrPatch2GuestRecs = pPatch->patch.nrPatch2GuestRecs;
671 pPatchSSM->patch.unused = pPatch->patch.unused;
672 pPatchSSM->patch.pTempInfo = pPatch->patch.pTempInfo;
673 pPatchSSM->patch.cCodeWrites = pPatch->patch.cCodeWrites;
674 pPatchSSM->patch.cTraps = pPatch->patch.cTraps;
675 pPatchSSM->patch.cInvalidWrites = pPatch->patch.cInvalidWrites;
676 pPatchSSM->patch.uPatchIdx = pPatch->patch.uPatchIdx;
677 pPatchSSM->patch.bDirtyOpcode = pPatch->patch.bDirtyOpcode;
678}
679
680/**
681 * Callback function for RTAvloU32DoWithAll
682 *
683 * Saves the state of the patch that's being enumerated
684 *
685 * @returns VBox status code.
686 * @param pNode Current node
687 * @param pVM1 Pointer to the VM
688 */
689static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
690{
691 PVM pVM = (PVM)pVM1;
692 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
693 PATMPATCHRECSSM patch;
694 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
695 int rc;
696
697 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
698
699 patmR3PatchConvertMem2SSM(&patch, pPatch);
700 Log4(("patmSavePatchState: cbPatchJump=%u uCurPathOffset=%#x pInstrGCLowest/Higest=%#x/%#x nrFixups=%#x nrJumpRecs=%#x\n",
701 patch.patch.cbPatchJump, patch.patch.uCurPatchOffset, patch.patch.pInstrGCLowest, patch.patch.pInstrGCHighest,
702 patch.patch.nrFixups, patch.patch.nrJumpRecs));
703
704 /*
705 * Reset HC pointers that need to be recalculated when loading the state
706 */
707 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
708 ("State = %x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
709 Assert(pPatch->patch.JumpTree == 0);
710 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
711 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
712
713 /* Save the patch record itself */
714 rc = SSMR3PutStructEx(pSSM, &patch, sizeof(patch), 0 /*fFlags*/, &g_aPatmPatchRecFields[0], NULL);
715 AssertRCReturn(rc, rc);
716
717 /*
718 * Reset HC pointers in fixup records and save them.
719 */
720#ifdef VBOX_STRICT
721 uint32_t nrFixupRecs = 0;
722 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
723 AssertMsg(nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
724#endif
725 rc = RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
726 AssertRCReturn(rc, rc);
727
728#ifdef VBOX_STRICT
729 uint32_t nrLookupRecords = 0;
730 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
731 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
732#endif
733
734 rc = RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
735 AssertRCReturn(rc, rc);
736
737 return VINF_SUCCESS;
738}
739
740/**
741 * Execute state save operation.
742 *
743 * @returns VBox status code.
744 * @param pVM Pointer to the VM.
745 * @param pSSM SSM operation handle.
746 */
747DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
748{
749 PATM patmInfo = pVM->patm.s;
750 int rc;
751
752 pVM->patm.s.savedstate.pSSM = pSSM;
753
754 /*
755 * Reset HC pointers that need to be recalculated when loading the state
756 */
757 patmInfo.pPatchMemHC = NULL;
758 patmInfo.pGCStateHC = 0;
759 patmInfo.pvFaultMonitor = 0;
760
761 Assert(patmInfo.ulCallDepth == 0);
762
763 /*
764 * Count the number of patches in the tree (feeling lazy)
765 */
766 patmInfo.savedstate.cPatches = 0;
767 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
768
769 /*
770 * Save PATM structure
771 */
772 rc = SSMR3PutStructEx(pSSM, &patmInfo, sizeof(patmInfo), 0 /*fFlags*/, &g_aPatmFields[0], NULL);
773 AssertRCReturn(rc, rc);
774
775 /*
776 * Save patch memory contents
777 */
778 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
779 AssertRCReturn(rc, rc);
780
781 /*
782 * Save GC state memory
783 */
784 rc = SSMR3PutStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), 0 /*fFlags*/, &g_aPatmGCStateFields[0], NULL);
785 AssertRCReturn(rc, rc);
786
787 /*
788 * Save PATM stack page
789 */
790 SSMR3PutU32(pSSM, PATM_STACK_TOTAL_SIZE);
791 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
792 AssertRCReturn(rc, rc);
793
794 /*
795 * Save all patches
796 */
797 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
798 AssertRCReturn(rc, rc);
799
800 /** @note patch statistics are not saved. */
801
802 return VINF_SUCCESS;
803}
804
805
806/**
807 * @callback_method_impl{FNSSMINTLOADEXEC, Dummy load function for HM mode.}
808 */
809DECLCALLBACK(int) patmR3LoadDummy(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
810{
811 return SSMR3SkipToEndOfUnit(pSSM);
812}
813
814
815/**
816 * Execute state load operation.
817 *
818 * @returns VBox status code.
819 * @param pVM Pointer to the VM.
820 * @param pSSM SSM operation handle.
821 * @param uVersion Data layout version.
822 * @param uPass The data pass.
823 */
824DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
825{
826 PATM patmInfo;
827 int rc;
828
829 if ( uVersion != PATM_SSM_VERSION
830 && uVersion != PATM_SSM_VERSION_MEM
831 && uVersion != PATM_SSM_VERSION_FIXUP_HACK
832 && uVersion != PATM_SSM_VERSION_VER16
833 )
834 {
835 AssertMsgFailed(("patmR3Load: Invalid version uVersion=%d!\n", uVersion));
836 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
837 }
838 uint32_t const fStructRestoreFlags = uVersion <= PATM_SSM_VERSION_MEM ? SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED : 0;
839 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
840
841 pVM->patm.s.savedstate.pSSM = pSSM;
842
843 /*
844 * Restore PATM structure
845 */
846 RT_ZERO(patmInfo);
847 if ( uVersion == PATM_SSM_VERSION_MEM
848 && SSMR3HandleRevision(pSSM) >= 86139
849 && SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(4, 2, 51))
850 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED,
851 &g_aPatmFields86139[0], NULL);
852 else
853 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), fStructRestoreFlags, &g_aPatmFields[0], NULL);
854 AssertRCReturn(rc, rc);
855
856 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
857 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
858 AssertLogRelReturn((pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC),
859 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
860 AssertLogRelReturn((pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC),
861 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
862 AssertLogRelReturn((pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC),
863 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
864 AssertLogRelReturn((pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC),
865 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
866 AssertLogRelReturn(pVM->patm.s.cbPatchMem == patmInfo.cbPatchMem, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
867
868 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
869 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
870 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
871 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
872
873 /* Lowest and highest patched instruction */
874 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
875 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
876
877 /* Sysenter handlers */
878 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
879 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
880 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
881
882 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
883
884 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
885 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
886 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
887 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
888
889
890 /** @note patch statistics are not restored. */
891
892 /*
893 * Restore patch memory contents
894 */
895 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
896 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
897 AssertRCReturn(rc, rc);
898
899 /*
900 * Restore GC state memory
901 */
902 RT_BZERO(pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
903 rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), fStructRestoreFlags, &g_aPatmGCStateFields[0], NULL);
904 AssertRCReturn(rc, rc);
905
906 /*
907 * Restore PATM stack page
908 */
909 uint32_t cbStack = PATM_STACK_TOTAL_SIZE;
910 if (uVersion > PATM_SSM_VERSION_MEM)
911 {
912 rc = SSMR3GetU32(pSSM, &cbStack);
913 AssertRCReturn(rc, rc);
914 }
915 AssertCompile(!(PATM_STACK_TOTAL_SIZE & 31));
916 AssertLogRelMsgReturn(cbStack > 0 && cbStack <= PATM_STACK_TOTAL_SIZE && !(cbStack & 31),
917 ("cbStack=%#x vs %#x", cbStack, PATM_STACK_TOTAL_SIZE),
918 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
919 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, cbStack);
920 AssertRCReturn(rc, rc);
921 if (cbStack < PATM_STACK_TOTAL_SIZE)
922 memset((uint8_t *)pVM->patm.s.pGCStackHC + cbStack, 0, PATM_STACK_TOTAL_SIZE - cbStack);
923
924 /*
925 * Load all patches
926 */
927 for (unsigned i = 0; i < patmInfo.savedstate.cPatches; i++)
928 {
929 PATMPATCHRECSSM patch;
930 PATMPATCHREC *pPatchRec;
931
932 RT_ZERO(patch);
933 rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), fStructRestoreFlags, &g_aPatmPatchRecFields[0], NULL);
934 AssertRCReturn(rc, rc);
935 Log4(("patmR3Load: cbPatchJump=%u uCurPathOffset=%#x pInstrGCLowest/Higest=%#x/%#x nrFixups=%#x nrJumpRecs=%#x\n",
936 patch.patch.cbPatchJump, patch.patch.uCurPatchOffset, patch.patch.pInstrGCLowest, patch.patch.pInstrGCHighest,
937 patch.patch.nrFixups, patch.patch.nrJumpRecs));
938
939 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
940
941 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
942 if (RT_FAILURE(rc))
943 {
944 AssertMsgFailed(("Out of memory!!!!\n"));
945 return VERR_NO_MEMORY;
946 }
947
948 /* Convert SSM version to memory. */
949 patmR3PatchConvertSSM2Mem(pPatchRec, &patch);
950
951 Log(("Restoring patch %RRv -> %RRv state %x\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset, pPatchRec->patch.uState));
952 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
953 Assert(ret);
954 if (pPatchRec->patch.uState != PATCH_REFUSED)
955 {
956 if (pPatchRec->patch.pPatchBlockOffset)
957 {
958 /* We actually generated code for this patch. */
959 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
960 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
961 }
962 }
963 /* Set to zero as we don't need it anymore. */
964 pPatchRec->patch.pTempInfo = 0;
965
966 PATMP2GLOOKUPREC cacheRec;
967 RT_ZERO(cacheRec);
968 cacheRec.pPatch = &pPatchRec->patch;
969
970 uint8_t *pPrivInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pPatchRec->patch.pPrivInstrGC);
971 /* Can fail due to page or page table not present. */
972
973 /*
974 * Restore fixup records and correct HC pointers in fixup records
975 */
976 pPatchRec->patch.FixupTree = 0;
977 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
978 for (unsigned j = 0; j < patch.patch.nrFixups; j++)
979 {
980 RELOCREC rec;
981 int32_t offset;
982 RTRCPTR *pFixup;
983
984 RT_ZERO(rec);
985 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), fStructRestoreFlags, &g_aPatmRelocRec[0], NULL);
986 AssertRCReturn(rc, rc);
987
988 if (pPrivInstrHC)
989 {
990 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
991 offset = (int32_t)(intptr_t)rec.pRelocPos;
992 /* Convert to HC pointer again. */
993 PATM_ADD_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
994 pFixup = (RTRCPTR *)rec.pRelocPos;
995
996 if (pPatchRec->patch.uState != PATCH_REFUSED)
997 {
998 if ( rec.uType == FIXUP_REL_JMPTOPATCH
999 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
1000 {
1001 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
1002 unsigned offset2 = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
1003
1004 rec.pRelocPos = pPrivInstrHC + offset2;
1005 pFixup = (RTRCPTR *)rec.pRelocPos;
1006 }
1007
1008 patmCorrectFixup(pVM, uVersion, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
1009 }
1010
1011 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
1012 AssertRCReturn(rc, rc);
1013 }
1014 }
1015 /* Release previous lock if any. */
1016 if (cacheRec.Lock.pvMap)
1017 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
1018
1019 /* And all patch to guest lookup records */
1020 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
1021
1022 pPatchRec->patch.Patch2GuestAddrTree = 0;
1023 pPatchRec->patch.Guest2PatchAddrTree = 0;
1024 if (pPatchRec->patch.nrPatch2GuestRecs)
1025 {
1026 RECPATCHTOGUEST rec;
1027 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
1028
1029 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
1030 for (uint32_t j=0;j<nrPatch2GuestRecs;j++)
1031 {
1032 RT_ZERO(rec);
1033 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), fStructRestoreFlags, &g_aPatmRecPatchToGuest[0], NULL);
1034 AssertRCReturn(rc, rc);
1035
1036 patmR3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
1037 }
1038 Assert(pPatchRec->patch.Patch2GuestAddrTree);
1039 }
1040
1041 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
1042 {
1043 /* Insert the guest page lookup records (for detection self-modifying code) */
1044 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
1045 AssertRCReturn(rc, rc);
1046 }
1047
1048#if 0 /* can fail def LOG_ENABLED */
1049 if ( pPatchRec->patch.uState != PATCH_REFUSED
1050 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
1051 {
1052 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
1053 Log(("Patch code ----------------------------------------------------------\n"));
1054 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
1055 Log(("Patch code ends -----------------------------------------------------\n"));
1056 MMR3HeapFree(pPatchRec->patch.pTempInfo);
1057 pPatchRec->patch.pTempInfo = NULL;
1058 }
1059#endif
1060 /* Remove the patch in case the gc mapping is not present. */
1061 if ( !pPrivInstrHC
1062 && pPatchRec->patch.uState == PATCH_ENABLED)
1063 {
1064 Log(("Remove patch %RGv due to failed HC address translation\n", pPatchRec->patch.pPrivInstrGC));
1065 PATMR3RemovePatch(pVM, pPatchRec->patch.pPrivInstrGC);
1066 }
1067 }
1068
1069 /*
1070 * Correct absolute fixups in the global patch. (helper functions)
1071 * Bit of a mess. Uses the new patch record, but restored patch functions.
1072 */
1073 PRELOCREC pRec = 0;
1074 AVLPVKEY key = 0;
1075
1076 Log(("Correct fixups in global helper functions\n"));
1077 while (true)
1078 {
1079 int32_t offset;
1080 RTRCPTR *pFixup;
1081
1082 /* Get the record that's closest from above */
1083 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
1084 if (pRec == 0)
1085 break;
1086
1087 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
1088
1089 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
1090 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
1091 pFixup = (RTRCPTR *)pRec->pRelocPos;
1092
1093 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
1094 patmCorrectFixup(pVM, uVersion, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
1095 }
1096
1097#ifdef VBOX_WITH_STATISTICS
1098 /*
1099 * Restore relevant old statistics
1100 */
1101 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
1102 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
1103 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
1104 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
1105#endif
1106
1107 return VINF_SUCCESS;
1108}
1109
1110/**
1111 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
1112 *
1113 * @returns VBox status code.
1114 * @param pVM Pointer to the VM.
1115 * @param ulSSMVersion SSM version
1116 * @param patmInfo Saved PATM structure
1117 * @param pPatch Patch record
1118 * @param pRec Relocation record
1119 * @param offset Offset of referenced data/code
1120 * @param pFixup Fixup address
1121 */
1122static void patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec, int32_t offset, RTRCPTR *pFixup)
1123{
1124 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
1125
1126 switch (pRec->uType)
1127 {
1128 case FIXUP_ABSOLUTE:
1129 {
1130 if (pRec->pSource && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource))
1131 break;
1132
1133 if ( *pFixup >= patmInfo.pGCStateGC
1134 && *pFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
1135 {
1136 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
1137 *pFixup = (*pFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
1138 }
1139 else
1140 if ( *pFixup >= patmInfo.pCPUMCtxGC
1141 && *pFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
1142 {
1143 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
1144
1145 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
1146 if (ulSSMVersion == PATM_SSM_VERSION_VER16)
1147 {
1148 unsigned uCPUMOffset = *pFixup - patmInfo.pCPUMCtxGC;
1149
1150 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
1151 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
1152 * function is not available in older gcc versions, at least not in gcc-3.3 */
1153 if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
1154 {
1155 LogFlow(("Changing dr[0] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[0])));
1156 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
1157 }
1158 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
1159 {
1160 LogFlow(("Changing dr[1] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[1])));
1161 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
1162 }
1163 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
1164 {
1165 LogFlow(("Changing dr[2] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[2])));
1166 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
1167 }
1168 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
1169 {
1170 LogFlow(("Changing dr[3] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[3])));
1171 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
1172 }
1173 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
1174 {
1175 LogFlow(("Changing dr[4] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[4])));
1176 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
1177 }
1178 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
1179 {
1180 LogFlow(("Changing dr[5] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[5])));
1181 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
1182 }
1183 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
1184 {
1185 LogFlow(("Changing dr[6] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[6])));
1186 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
1187 }
1188 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
1189 {
1190 LogFlow(("Changing dr[7] offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, dr[7])));
1191 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
1192 }
1193 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
1194 {
1195 LogFlow(("Changing cr0 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr0)));
1196 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
1197 }
1198 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
1199 {
1200 LogFlow(("Changing cr2 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr2)));
1201 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
1202 }
1203 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
1204 {
1205 LogFlow(("Changing cr3 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr3)));
1206 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
1207 }
1208 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
1209 {
1210 LogFlow(("Changing cr4 offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, cr4)));
1211 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
1212 }
1213 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
1214 {
1215 LogFlow(("Changing tr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, tr)));
1216 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1217 }
1218 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
1219 {
1220 LogFlow(("Changing ldtr offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, ldtr)));
1221 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1222 }
1223 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
1224 {
1225 LogFlow(("Changing pGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
1226 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1227 }
1228 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
1229 {
1230 LogFlow(("Changing cbGdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
1231 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1232 }
1233 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
1234 {
1235 LogFlow(("Changing pIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
1236 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1237 }
1238 else if (uCPUMOffset == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
1239 {
1240 LogFlow(("Changing cbIdt offset from %x to %x\n", uCPUMOffset, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
1241 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1242 }
1243 else
1244 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", uCPUMOffset));
1245 }
1246 else
1247 *pFixup = (*pFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
1248 }
1249 else
1250 if ( *pFixup >= patmInfo.pStatsGC
1251 && *pFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
1252 {
1253 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
1254 *pFixup = (*pFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
1255 }
1256 else
1257 if ( *pFixup >= patmInfo.pGCStackGC
1258 && *pFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
1259 {
1260 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
1261 *pFixup = (*pFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
1262 }
1263 else
1264 if ( *pFixup >= patmInfo.pPatchMemGC
1265 && *pFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
1266 {
1267 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, *pFixup, (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
1268 *pFixup = (*pFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
1269 }
1270 else
1271 /* Boldly ASSUMES:
1272 * 1. That pCPUMCtxGC is in the VM structure and that its location is
1273 * at the first page of the same 4 MB chunk.
1274 * 2. That the forced actions were in the first 32 bytes of the VM
1275 * structure.
1276 * 3. That the CPUM leafs are less than 8KB into the structure. */
1277 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1278 && *pFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(32))
1279 {
1280 LogFlow(("Changing fLocalForcedActions fixup from %RRv to %RRv\n", *pFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
1281 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1282 }
1283 else
1284 if ( ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
1285 && *pFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(8192))
1286 {
1287 static int cCpuidFixup = 0;
1288#ifdef LOG_ENABLED
1289 RTRCPTR oldFixup = *pFixup;
1290#endif
1291 /* very dirty assumptions about the cpuid patch and cpuid ordering. */
1292 switch(cCpuidFixup & 3)
1293 {
1294 case 0:
1295 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1296 break;
1297 case 1:
1298 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1299 break;
1300 case 2:
1301 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1302 break;
1303 case 3:
1304 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1305 break;
1306 }
1307 LogFlow(("Changing cpuid fixup %d from %RRv to %RRv\n", cCpuidFixup, oldFixup, *pFixup));
1308 cCpuidFixup++;
1309 }
1310 else
1311 if (ulSSMVersion >= PATM_SSM_VERSION_MEM)
1312 {
1313#ifdef LOG_ENABLED
1314 RTRCPTR oldFixup = *pFixup;
1315#endif
1316 /* Core.Key abused to store the type of fixup */
1317 switch ((uintptr_t)pRec->Core.Key)
1318 {
1319 case PATM_FIXUP_CPU_FF_ACTION:
1320 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1321 LogFlow(("Changing cpu ff action fixup from %x to %x\n", oldFixup, *pFixup));
1322 break;
1323 case PATM_FIXUP_CPUID_DEFAULT:
1324 *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
1325 LogFlow(("Changing cpuid def fixup from %x to %x\n", oldFixup, *pFixup));
1326 break;
1327 case PATM_FIXUP_CPUID_STANDARD:
1328 *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
1329 LogFlow(("Changing cpuid std fixup from %x to %x\n", oldFixup, *pFixup));
1330 break;
1331 case PATM_FIXUP_CPUID_EXTENDED:
1332 *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
1333 LogFlow(("Changing cpuid ext fixup from %x to %x\n", oldFixup, *pFixup));
1334 break;
1335 case PATM_FIXUP_CPUID_CENTAUR:
1336 *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
1337 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", oldFixup, *pFixup));
1338 break;
1339 default:
1340 AssertMsgFailed(("Unexpected fixup value %x\n", *pFixup));
1341 break;
1342 }
1343 }
1344
1345#ifdef RT_OS_WINDOWS
1346 AssertCompile(RT_OFFSETOF(VM, fGlobalForcedActions) < 32);
1347#endif
1348 break;
1349 }
1350
1351 case FIXUP_REL_JMPTOPATCH:
1352 {
1353 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
1354
1355 if ( pPatch->uState == PATCH_ENABLED
1356 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
1357 {
1358 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
1359 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
1360 RTRCPTR pJumpOffGC;
1361 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
1362 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
1363
1364 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1365
1366 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
1367#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
1368 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
1369 {
1370 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
1371
1372 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
1373 oldJump[0] = pPatch->aPrivInstr[0];
1374 oldJump[1] = pPatch->aPrivInstr[1];
1375 *(RTRCUINTPTR *)&oldJump[2] = displOld;
1376 }
1377 else
1378#endif
1379 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
1380 {
1381 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
1382 oldJump[0] = 0xE9;
1383 *(RTRCUINTPTR *)&oldJump[1] = displOld;
1384 }
1385 else
1386 {
1387 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
1388 break;
1389 }
1390 Assert(pPatch->cbPatchJump <= sizeof(temp));
1391
1392 /*
1393 * Read old patch jump and compare it to the one we previously installed
1394 */
1395 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
1396 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1397
1398 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1399 {
1400 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
1401
1402 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
1403 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
1404 }
1405 else
1406 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
1407 {
1408 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
1409 /*
1410 * Disable patch; this is not a good solution
1411 */
1412 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
1413 pPatch->uState = PATCH_DISABLED;
1414 }
1415 else
1416 if (RT_SUCCESS(rc))
1417 {
1418 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
1419 AssertRC(rc);
1420 }
1421 else
1422 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
1423 }
1424 else
1425 Log(("Skip the guest jump to patch code for this disabled patch %08X\n", pRec->pRelocPos));
1426
1427 pRec->pDest = pTarget;
1428 break;
1429 }
1430
1431 case FIXUP_REL_JMPTOGUEST:
1432 {
1433 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1434 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1435
1436 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1437 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1438 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1439 pRec->pSource = pSource;
1440 break;
1441
1442 }
1443}
1444}
1445
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette