VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATMPatch.cpp@ 21994

Last change on this file since 21994 was 19141, checked in by vboxsync, 16 years ago

Action flags breakup.
Fixed PGM saved state loading of 2.2.2 images.
Reduced hacks in PATM state loading (fixups).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 52.4 KB
Line 
1/* $Id: PATMPatch.cpp 19141 2009-04-23 13:52:18Z vboxsync $ */
2/** @file
3 * PATMPatch - Dynamic Guest OS Instruction patches
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/stam.h>
30#include <VBox/pgm.h>
31#include <VBox/cpum.h>
32#include <VBox/iom.h>
33#include <VBox/sup.h>
34#include <VBox/mm.h>
35#include <VBox/ssm.h>
36#include <VBox/pdm.h>
37#include <VBox/trpm.h>
38#include <VBox/param.h>
39#include <iprt/avl.h>
40#include "PATMInternal.h"
41#include <VBox/vm.h>
42#include <VBox/csam.h>
43
44#include <VBox/dbg.h>
45#include <VBox/err.h>
46#include <VBox/log.h>
47#include <iprt/assert.h>
48#include <iprt/asm.h>
49#include <iprt/string.h>
50#include <VBox/dis.h>
51#include <VBox/disopcode.h>
52
53#include <stdlib.h>
54#include <stdio.h>
55#include "PATMA.h"
56#include "PATMPatch.h"
57
58/* internal structure for passing more information about call fixups to patmPatchGenCode */
59typedef struct
60{
61 RTRCPTR pTargetGC;
62 RTRCPTR pCurInstrGC;
63 RTRCPTR pNextInstrGC;
64 RTRCPTR pReturnGC;
65} PATMCALLINFO, *PPATMCALLINFO;
66
67int patmPatchAddReloc32(PVM pVM, PPATCHINFO pPatch, uint8_t *pRelocHC, uint32_t uType, RTRCPTR pSource, RTRCPTR pDest)
68{
69 PRELOCREC pRec;
70
71 Assert(uType == FIXUP_ABSOLUTE || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
72
73 LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%RRv source=%RRv dest=%RRv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest));
74
75 pRec = (PRELOCREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
76 Assert(pRec);
77 pRec->Core.Key = (AVLPVKEY)pRelocHC;
78 pRec->pRelocPos = pRelocHC; /* @todo redundant. */
79 pRec->pSource = pSource;
80 pRec->pDest = pDest;
81 pRec->uType = uType;
82
83 bool ret = RTAvlPVInsert(&pPatch->FixupTree, &pRec->Core);
84 Assert(ret); NOREF(ret);
85 pPatch->nrFixups++;
86
87 return VINF_SUCCESS;
88}
89
90int patmPatchAddJump(PVM pVM, PPATCHINFO pPatch, uint8_t *pJumpHC, uint32_t offset, RTRCPTR pTargetGC, uint32_t opcode)
91{
92 PJUMPREC pRec;
93
94 pRec = (PJUMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
95 Assert(pRec);
96
97 pRec->Core.Key = (AVLPVKEY)pJumpHC;
98 pRec->pJumpHC = pJumpHC; /* @todo redundant. */
99 pRec->offDispl = offset;
100 pRec->pTargetGC = pTargetGC;
101 pRec->opcode = opcode;
102
103 bool ret = RTAvlPVInsert(&pPatch->JumpTree, &pRec->Core);
104 Assert(ret); NOREF(ret);
105 pPatch->nrJumpRecs++;
106
107 return VINF_SUCCESS;
108}
109
110#define PATCHGEN_PROLOG_NODEF(pVM, pPatch) \
111 pPB = PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset; \
112 \
113 if (pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) \
114 { \
115 pVM->patm.s.fOutOfMemory = true; \
116 Assert(pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem); \
117 return VERR_NO_MEMORY; \
118 }
119
120#define PATCHGEN_PROLOG(pVM, pPatch) \
121 uint8_t *pPB; \
122 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
123
124
125#define PATCHGEN_EPILOG(pPatch, size) \
126 Assert(size <= 640); \
127 pPatch->uCurPatchOffset += size;
128
129
130static uint32_t patmPatchGenCode(PVM pVM, PPATCHINFO pPatch, uint8_t *pPB, PPATCHASMRECORD pAsmRecord, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fGenJump,
131 PPATMCALLINFO pCallInfo = 0)
132{
133 uint32_t i, j;
134
135 Assert(fGenJump == false || pReturnAddrGC);
136 Assert(fGenJump == false || pAsmRecord->offJump);
137 Assert(pAsmRecord && pAsmRecord->size > sizeof(pAsmRecord->uReloc[0]));
138
139 // Copy the code block
140 memcpy(pPB, pAsmRecord->pFunction, pAsmRecord->size);
141
142 // Process all fixups
143 for (j=0,i=0;i<pAsmRecord->nrRelocs*2; i+=2)
144 {
145 for (;j<pAsmRecord->size;j++)
146 {
147 if (*(uint32_t*)&pPB[j] == pAsmRecord->uReloc[i])
148 {
149 RCPTRTYPE(uint32_t *) dest;
150
151#ifdef VBOX_STRICT
152 if (pAsmRecord->uReloc[i] == PATM_FIXUP)
153 Assert(pAsmRecord->uReloc[i+1] != 0);
154 else
155 Assert(pAsmRecord->uReloc[i+1] == 0);
156#endif
157
158 /**
159 * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING A SAVED STATE WITH
160 * A DIFFERENT HYPERVISOR LAYOUT.
161 */
162 switch (pAsmRecord->uReloc[i])
163 {
164 case PATM_VMFLAGS:
165 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
166 break;
167
168 case PATM_PENDINGACTION:
169 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
170 break;
171
172 case PATM_FIXUP:
173 /* Offset in uReloc[i+1] is from the base of the function. */
174 dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->uReloc[i+1] + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC);
175 break;
176#ifdef VBOX_WITH_STATISTICS
177 case PATM_ALLPATCHCALLS:
178 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
179 break;
180
181 case PATM_IRETEFLAGS:
182 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
183 break;
184
185 case PATM_IRETCS:
186 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
187 break;
188
189 case PATM_IRETEIP:
190 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
191 break;
192
193 case PATM_PERPATCHCALLS:
194 dest = patmPatchQueryStatAddress(pVM, pPatch);
195 break;
196#endif
197 case PATM_STACKPTR:
198 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
199 break;
200
201 /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
202 * part to store the original return addresses.
203 */
204 case PATM_STACKBASE:
205 dest = pVM->patm.s.pGCStackGC;
206 break;
207
208 case PATM_STACKBASE_GUEST:
209 dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
210 break;
211
212 case PATM_RETURNADDR: /* absolute guest address; no fixup required */
213 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
214 dest = pCallInfo->pReturnGC;
215 break;
216
217 case PATM_PATCHNEXTBLOCK: /* relative address of instruction following this block */
218 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
219
220 /** @note hardcoded assumption that we must return to the instruction following this block */
221 dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->size;
222 break;
223
224 case PATM_CALLTARGET: /* relative to patch address; no fixup requird */
225 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
226
227 /* Address must be filled in later. (see patmr3SetBranchTargets) */
228 patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
229 dest = PATM_ILLEGAL_DESTINATION;
230 break;
231
232 case PATM_PATCHBASE: /* Patch GC base address */
233 dest = pVM->patm.s.pPatchMemGC;
234 break;
235
236 case PATM_CPUID_STD_PTR:
237 /* @todo dirty hack when correcting this fixup (state restore) */
238 dest = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
239 break;
240
241 case PATM_CPUID_EXT_PTR:
242 /* @todo dirty hack when correcting this fixup (state restore) */
243 dest = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
244 break;
245
246 case PATM_CPUID_CENTAUR_PTR:
247 /* @todo dirty hack when correcting this fixup (state restore) */
248 dest = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
249 break;
250
251 case PATM_CPUID_DEF_PTR:
252 /* @todo dirty hack when correcting this fixup (state restore) */
253 dest = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
254 break;
255
256 case PATM_CPUID_STD_MAX:
257 dest = CPUMGetGuestCpuIdStdMax(pVM);
258 break;
259
260 case PATM_CPUID_EXT_MAX:
261 dest = CPUMGetGuestCpuIdExtMax(pVM);
262 break;
263
264 case PATM_CPUID_CENTAUR_MAX:
265 dest = CPUMGetGuestCpuIdCentaurMax(pVM);
266 break;
267
268 case PATM_INTERRUPTFLAG:
269 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
270 break;
271
272 case PATM_INHIBITIRQADDR:
273 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
274 break;
275
276 case PATM_NEXTINSTRADDR:
277 Assert(pCallInfo);
278 /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
279 dest = pCallInfo->pNextInstrGC;
280 break;
281
282 case PATM_CURINSTRADDR:
283 Assert(pCallInfo);
284 dest = pCallInfo->pCurInstrGC;
285 break;
286
287 case PATM_VM_FORCEDACTIONS:
288 /* @todo dirty assumptions when correcting this fixup during saved state loading. */
289 dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
290 break;
291
292 case PATM_TEMP_EAX:
293 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
294 break;
295 case PATM_TEMP_ECX:
296 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
297 break;
298 case PATM_TEMP_EDI:
299 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
300 break;
301 case PATM_TEMP_EFLAGS:
302 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
303 break;
304 case PATM_TEMP_RESTORE_FLAGS:
305 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
306 break;
307 case PATM_CALL_PATCH_TARGET_ADDR:
308 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr);
309 break;
310 case PATM_CALL_RETURN_ADDR:
311 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr);
312 break;
313
314 /* Relative address of global patm lookup and call function. */
315 case PATM_LOOKUP_AND_CALL_FUNCTION:
316 {
317 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
318 Assert(pVM->patm.s.pfnHelperCallGC);
319 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
320
321 /* Relative value is target minus address of instruction after the actual call instruction. */
322 dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
323 break;
324 }
325
326 case PATM_RETURN_FUNCTION:
327 {
328 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
329 Assert(pVM->patm.s.pfnHelperRetGC);
330 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
331
332 /* Relative value is target minus address of instruction after the actual call instruction. */
333 dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
334 break;
335 }
336
337 case PATM_IRET_FUNCTION:
338 {
339 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
340 Assert(pVM->patm.s.pfnHelperIretGC);
341 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
342
343 /* Relative value is target minus address of instruction after the actual call instruction. */
344 dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
345 break;
346 }
347
348 case PATM_LOOKUP_AND_JUMP_FUNCTION:
349 {
350 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
351 Assert(pVM->patm.s.pfnHelperJumpGC);
352 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
353
354 /* Relative value is target minus address of instruction after the actual call instruction. */
355 dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
356 break;
357 }
358
359 default:
360 dest = PATM_ILLEGAL_DESTINATION;
361 AssertRelease(0);
362 break;
363 }
364
365 *(RTRCPTR *)&pPB[j] = dest;
366 if (pAsmRecord->uReloc[i] < PATM_NO_FIXUP)
367 {
368 patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE);
369 }
370 break;
371 }
372 }
373 Assert(j < pAsmRecord->size);
374 }
375 Assert(pAsmRecord->uReloc[i] == 0xffffffff);
376
377 /* Add the jump back to guest code (if required) */
378 if (fGenJump)
379 {
380 int32_t displ = pReturnAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32);
381
382 /* Add lookup record for patch to guest address translation */
383 Assert(pPB[pAsmRecord->offJump - 1] == 0xE9);
384 patmr3AddP2GLookupRecord(pVM, pPatch, &pPB[pAsmRecord->offJump - 1], pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
385
386 *(uint32_t *)&pPB[pAsmRecord->offJump] = displ;
387 patmPatchAddReloc32(pVM, pPatch, &pPB[pAsmRecord->offJump], FIXUP_REL_JMPTOGUEST,
388 PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32,
389 pReturnAddrGC);
390 }
391
392 // Calculate the right size of this patch block
393 if ((fGenJump && pAsmRecord->offJump) || (!fGenJump && !pAsmRecord->offJump))
394 {
395 return pAsmRecord->size;
396 }
397 else {
398 // if a jump instruction is present and we don't want one, then subtract SIZEOF_NEARJUMP32
399 return pAsmRecord->size - SIZEOF_NEARJUMP32;
400 }
401}
402
403/* Read bytes and check for overwritten instructions. */
404static int patmPatchReadBytes(PVM pVM, uint8_t *pDest, RTRCPTR pSrc, uint32_t cb)
405{
406 int rc = PGMPhysSimpleReadGCPtr(&pVM->aCpus[0], pDest, pSrc, cb);
407 AssertRCReturn(rc, rc);
408 /*
409 * Could be patched already; make sure this is checked!
410 */
411 for (uint32_t i=0;i<cb;i++)
412 {
413 uint8_t temp;
414
415 int rc2 = PATMR3QueryOpcode(pVM, pSrc+i, &temp);
416 if (RT_SUCCESS(rc2))
417 {
418 pDest[i] = temp;
419 }
420 else
421 break; /* no more */
422 }
423 return VINF_SUCCESS;
424}
425
426int patmPatchGenDuplicate(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
427{
428 int rc = VINF_SUCCESS;
429 PATCHGEN_PROLOG(pVM, pPatch);
430
431 rc = patmPatchReadBytes(pVM, pPB, pCurInstrGC, pCpu->opsize);
432 AssertRC(rc);
433 PATCHGEN_EPILOG(pPatch, pCpu->opsize);
434 return rc;
435}
436
437int patmPatchGenIret(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, bool fSizeOverride)
438{
439 uint32_t size;
440 PATMCALLINFO callInfo;
441
442 PATCHGEN_PROLOG(pVM, pPatch);
443
444 AssertMsg(fSizeOverride == false, ("operand size override!!\n"));
445
446 callInfo.pCurInstrGC = pCurInstrGC;
447
448 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRecord, 0, false, &callInfo);
449
450 PATCHGEN_EPILOG(pPatch, size);
451 return VINF_SUCCESS;
452}
453
454int patmPatchGenCli(PVM pVM, PPATCHINFO pPatch)
455{
456 uint32_t size;
457 PATCHGEN_PROLOG(pVM, pPatch);
458
459 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCliRecord, 0, false);
460
461 PATCHGEN_EPILOG(pPatch, size);
462 return VINF_SUCCESS;
463}
464
465/*
466 * Generate an STI patch
467 */
468int patmPatchGenSti(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RTRCPTR pNextInstrGC)
469{
470 PATMCALLINFO callInfo;
471 uint32_t size;
472
473 Log(("patmPatchGenSti at %RRv; next %RRv\n", pCurInstrGC, pNextInstrGC));
474 PATCHGEN_PROLOG(pVM, pPatch);
475 callInfo.pNextInstrGC = pNextInstrGC;
476 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMStiRecord, 0, false, &callInfo);
477 PATCHGEN_EPILOG(pPatch, size);
478
479 return VINF_SUCCESS;
480}
481
482
483int patmPatchGenPopf(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fSizeOverride, bool fGenJumpBack)
484{
485 uint32_t size;
486 PATMCALLINFO callInfo;
487
488 PATCHGEN_PROLOG(pVM, pPatch);
489
490 callInfo.pNextInstrGC = pReturnAddrGC;
491
492 Log(("patmPatchGenPopf at %RRv\n", pReturnAddrGC));
493
494 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
495 if (fSizeOverride == true)
496 {
497 Log(("operand size override!!\n"));
498 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf16Record : &PATMPopf16Record_NoExit , pReturnAddrGC, fGenJumpBack, &callInfo);
499 }
500 else
501 {
502 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf32Record : &PATMPopf32Record_NoExit, pReturnAddrGC, fGenJumpBack, &callInfo);
503 }
504
505 PATCHGEN_EPILOG(pPatch, size);
506 STAM_COUNTER_INC(&pVM->patm.s.StatGenPopf);
507 return VINF_SUCCESS;
508}
509
510int patmPatchGenPushf(PVM pVM, PPATCHINFO pPatch, bool fSizeOverride)
511{
512 uint32_t size;
513 PATCHGEN_PROLOG(pVM, pPatch);
514
515 if (fSizeOverride == true)
516 {
517 Log(("operand size override!!\n"));
518 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushf16Record, 0, false);
519 }
520 else
521 {
522 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushf32Record, 0, false);
523 }
524
525 PATCHGEN_EPILOG(pPatch, size);
526 return VINF_SUCCESS;
527}
528
529int patmPatchGenPushCS(PVM pVM, PPATCHINFO pPatch)
530{
531 uint32_t size;
532 PATCHGEN_PROLOG(pVM, pPatch);
533 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushCSRecord, 0, false);
534 PATCHGEN_EPILOG(pPatch, size);
535 return VINF_SUCCESS;
536}
537
538int patmPatchGenLoop(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
539{
540 uint32_t size = 0;
541 PPATCHASMRECORD pPatchAsmRec;
542
543 PATCHGEN_PROLOG(pVM, pPatch);
544
545 switch (opcode)
546 {
547 case OP_LOOP:
548 pPatchAsmRec = &PATMLoopRecord;
549 break;
550 case OP_LOOPNE:
551 pPatchAsmRec = &PATMLoopNZRecord;
552 break;
553 case OP_LOOPE:
554 pPatchAsmRec = &PATMLoopZRecord;
555 break;
556 case OP_JECXZ:
557 pPatchAsmRec = &PATMJEcxRecord;
558 break;
559 default:
560 AssertMsgFailed(("PatchGenLoop: invalid opcode %d\n", opcode));
561 return VERR_INVALID_PARAMETER;
562 }
563 Assert(pPatchAsmRec->offSizeOverride && pPatchAsmRec->offRelJump);
564
565 Log(("PatchGenLoop %d jump %d to %08x offrel=%d\n", opcode, pPatch->nrJumpRecs, pTargetGC, pPatchAsmRec->offRelJump));
566
567 // Generate the patch code
568 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
569
570 if (fSizeOverride)
571 {
572 pPB[pPatchAsmRec->offSizeOverride] = 0x66; // ecx -> cx or vice versa
573 }
574
575 *(RTRCPTR *)&pPB[pPatchAsmRec->offRelJump] = 0xDEADBEEF;
576
577 patmPatchAddJump(pVM, pPatch, &pPB[pPatchAsmRec->offRelJump - 1], 1, pTargetGC, opcode);
578
579 PATCHGEN_EPILOG(pPatch, size);
580 return VINF_SUCCESS;
581}
582
583int patmPatchGenRelJump(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
584{
585 uint32_t offset = 0;
586 PATCHGEN_PROLOG(pVM, pPatch);
587
588 // internal relative jumps from patch code to patch code; no relocation record required
589
590 Assert(PATMIsPatchGCAddr(pVM, pTargetGC) == false);
591
592 switch (opcode)
593 {
594 case OP_JO:
595 pPB[1] = 0x80;
596 break;
597 case OP_JNO:
598 pPB[1] = 0x81;
599 break;
600 case OP_JC:
601 pPB[1] = 0x82;
602 break;
603 case OP_JNC:
604 pPB[1] = 0x83;
605 break;
606 case OP_JE:
607 pPB[1] = 0x84;
608 break;
609 case OP_JNE:
610 pPB[1] = 0x85;
611 break;
612 case OP_JBE:
613 pPB[1] = 0x86;
614 break;
615 case OP_JNBE:
616 pPB[1] = 0x87;
617 break;
618 case OP_JS:
619 pPB[1] = 0x88;
620 break;
621 case OP_JNS:
622 pPB[1] = 0x89;
623 break;
624 case OP_JP:
625 pPB[1] = 0x8A;
626 break;
627 case OP_JNP:
628 pPB[1] = 0x8B;
629 break;
630 case OP_JL:
631 pPB[1] = 0x8C;
632 break;
633 case OP_JNL:
634 pPB[1] = 0x8D;
635 break;
636 case OP_JLE:
637 pPB[1] = 0x8E;
638 break;
639 case OP_JNLE:
640 pPB[1] = 0x8F;
641 break;
642
643 case OP_JMP:
644 /* If interrupted here, then jump to the target instruction. Used by PATM.cpp for jumping to known instructions. */
645 /* Add lookup record for patch to guest address translation */
646 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pTargetGC, PATM_LOOKUP_PATCH2GUEST);
647
648 pPB[0] = 0xE9;
649 break;
650
651 case OP_JECXZ:
652 case OP_LOOP:
653 case OP_LOOPNE:
654 case OP_LOOPE:
655 return patmPatchGenLoop(pVM, pPatch, pTargetGC, opcode, fSizeOverride);
656
657 default:
658 AssertMsg(0, ("Invalid jump opcode %d\n", opcode));
659 return VERR_PATCHING_REFUSED;
660 }
661 if (opcode != OP_JMP)
662 {
663 pPB[0] = 0xF;
664 offset += 2;
665 }
666 else offset++;
667
668 *(RTRCPTR *)&pPB[offset] = 0xDEADBEEF;
669
670 patmPatchAddJump(pVM, pPatch, pPB, offset, pTargetGC, opcode);
671
672 offset += sizeof(RTRCPTR);
673
674 PATCHGEN_EPILOG(pPatch, offset);
675 return VINF_SUCCESS;
676}
677
678/*
679 * Rewrite call to dynamic or currently unknown function (on-demand patching of function)
680 */
681int patmPatchGenCall(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC, RTRCPTR pTargetGC, bool fIndirect)
682{
683 PATMCALLINFO callInfo;
684 uint32_t offset;
685 uint32_t i, size;
686 int rc;
687
688 /** @note Don't check for IF=1 here. The ret instruction will do this. */
689 /** @note It's dangerous to do this for 'normal' patches. the jump target might be inside the generated patch jump. (seen this!) */
690
691 /* 1: Clear PATM interrupt flag on entry. */
692 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
693 if (rc == VERR_NO_MEMORY)
694 return rc;
695 AssertRCReturn(rc, rc);
696
697 PATCHGEN_PROLOG(pVM, pPatch);
698 /* 2: We must push the target address onto the stack before appending the indirect call code. */
699
700 if (fIndirect)
701 {
702 Log(("patmPatchGenIndirectCall\n"));
703 Assert(pCpu->param1.size == 4);
704 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J);
705
706 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
707 * a page fault. The assembly code restores the stack afterwards.
708 */
709 offset = 0;
710 /* include prefix byte to make sure we don't use the incorrect selector register. */
711 if (pCpu->prefix & PREFIX_SEG)
712 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
713 pPB[offset++] = 0xFF; // push r/m32
714 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
715 i = 2; /* standard offset of modrm bytes */
716 if (pCpu->prefix & PREFIX_OPSIZE)
717 i++; //skip operand prefix
718 if (pCpu->prefix & PREFIX_SEG)
719 i++; //skip segment prefix
720
721 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->opsize - i);
722 AssertRCReturn(rc, rc);
723 offset += (pCpu->opsize - i);
724 }
725 else
726 {
727 AssertMsg(PATMIsPatchGCAddr(pVM, pTargetGC) == false, ("Target is already a patch address (%RRv)?!?\n", pTargetGC));
728 Assert(pTargetGC);
729 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J);
730
731 /** @todo wasting memory as the complex search is overkill and we need only one lookup slot... */
732
733 /* Relative call to patch code (patch to patch -> no fixup). */
734 Log(("PatchGenCall from %RRv (next=%RRv) to %RRv\n", pCurInstrGC, pCurInstrGC + pCpu->opsize, pTargetGC));
735
736 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
737 * a page fault. The assembly code restores the stack afterwards.
738 */
739 offset = 0;
740 pPB[offset++] = 0x68; // push %Iv
741 *(RTRCPTR *)&pPB[offset] = pTargetGC;
742 offset += sizeof(RTRCPTR);
743 }
744
745 /* align this block properly to make sure the jump table will not be misaligned. */
746 size = (RTHCUINTPTR)&pPB[offset] & 3;
747 if (size)
748 size = 4 - size;
749
750 for (i=0;i<size;i++)
751 {
752 pPB[offset++] = 0x90; /* nop */
753 }
754 PATCHGEN_EPILOG(pPatch, offset);
755
756 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
757 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
758 callInfo.pReturnGC = pCurInstrGC + pCpu->opsize;
759 callInfo.pTargetGC = (fIndirect) ? 0xDEADBEEF : pTargetGC;
760 size = patmPatchGenCode(pVM, pPatch, pPB, (fIndirect) ? &PATMCallIndirectRecord : &PATMCallRecord, 0, false, &callInfo);
761 PATCHGEN_EPILOG(pPatch, size);
762
763 /* Need to set PATM_INTERRUPTFLAG after the patched ret returns here. */
764 rc = patmPatchGenSetPIF(pVM, pPatch, pCurInstrGC);
765 if (rc == VERR_NO_MEMORY)
766 return rc;
767 AssertRCReturn(rc, rc);
768
769 STAM_COUNTER_INC(&pVM->patm.s.StatGenCall);
770 return VINF_SUCCESS;
771}
772
773/**
774 * Generate indirect jump to unknown destination
775 *
776 * @returns VBox status code.
777 * @param pVM The VM to operate on.
778 * @param pPatch Patch record
779 * @param pCpu Disassembly state
780 * @param pCurInstrGC Current instruction address
781 */
782int patmPatchGenJump(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
783{
784 PATMCALLINFO callInfo;
785 uint32_t offset;
786 uint32_t i, size;
787 int rc;
788
789 /* 1: Clear PATM interrupt flag on entry. */
790 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
791 if (rc == VERR_NO_MEMORY)
792 return rc;
793 AssertRCReturn(rc, rc);
794
795 PATCHGEN_PROLOG(pVM, pPatch);
796 /* 2: We must push the target address onto the stack before appending the indirect call code. */
797
798 Log(("patmPatchGenIndirectJump\n"));
799 Assert(pCpu->param1.size == 4);
800 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J);
801
802 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
803 * a page fault. The assembly code restores the stack afterwards.
804 */
805 offset = 0;
806 /* include prefix byte to make sure we don't use the incorrect selector register. */
807 if (pCpu->prefix & PREFIX_SEG)
808 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
809
810 pPB[offset++] = 0xFF; // push r/m32
811 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
812 i = 2; /* standard offset of modrm bytes */
813 if (pCpu->prefix & PREFIX_OPSIZE)
814 i++; //skip operand prefix
815 if (pCpu->prefix & PREFIX_SEG)
816 i++; //skip segment prefix
817
818 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->opsize - i);
819 AssertRCReturn(rc, rc);
820 offset += (pCpu->opsize - i);
821
822 /* align this block properly to make sure the jump table will not be misaligned. */
823 size = (RTHCUINTPTR)&pPB[offset] & 3;
824 if (size)
825 size = 4 - size;
826
827 for (i=0;i<size;i++)
828 {
829 pPB[offset++] = 0x90; /* nop */
830 }
831 PATCHGEN_EPILOG(pPatch, offset);
832
833 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
834 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
835 callInfo.pReturnGC = pCurInstrGC + pCpu->opsize;
836 callInfo.pTargetGC = 0xDEADBEEF;
837 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpIndirectRecord, 0, false, &callInfo);
838 PATCHGEN_EPILOG(pPatch, size);
839
840 STAM_COUNTER_INC(&pVM->patm.s.StatGenJump);
841 return VINF_SUCCESS;
842}
843
844/**
845 * Generate return instruction
846 *
847 * @returns VBox status code.
848 * @param pVM The VM to operate on.
849 * @param pPatch Patch structure
850 * @param pCpu Disassembly struct
851 * @param pCurInstrGC Current instruction pointer
852 *
853 */
854int patmPatchGenRet(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
855{
856 int size = 0, rc;
857 RTRCPTR pPatchRetInstrGC;
858
859 /* Remember start of this patch for below. */
860 pPatchRetInstrGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
861
862 Log(("patmPatchGenRet %RRv\n", pCurInstrGC));
863
864 /** @note optimization: multiple identical ret instruction in a single patch can share a single patched ret. */
865 if ( pPatch->pTempInfo->pPatchRetInstrGC
866 && pPatch->pTempInfo->uPatchRetParam1 == (uint32_t)pCpu->param1.parval) /* nr of bytes popped off the stack should be identical of course! */
867 {
868 Assert(pCpu->pCurInstr->opcode == OP_RETN);
869 STAM_COUNTER_INC(&pVM->patm.s.StatGenRetReused);
870
871 return patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, pPatch->pTempInfo->pPatchRetInstrGC);
872 }
873
874 /* Jump back to the original instruction if IF is set again. */
875 Assert(!PATMFindActivePatchByEntrypoint(pVM, pCurInstrGC));
876 rc = patmPatchGenCheckIF(pVM, pPatch, pCurInstrGC);
877 AssertRCReturn(rc, rc);
878
879 /* align this block properly to make sure the jump table will not be misaligned. */
880 PATCHGEN_PROLOG(pVM, pPatch);
881 size = (RTHCUINTPTR)pPB & 3;
882 if (size)
883 size = 4 - size;
884
885 for (int i=0;i<size;i++)
886 pPB[i] = 0x90; /* nop */
887 PATCHGEN_EPILOG(pPatch, size);
888
889 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
890 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMRetRecord, 0, false);
891 PATCHGEN_EPILOG(pPatch, size);
892
893 STAM_COUNTER_INC(&pVM->patm.s.StatGenRet);
894 /* Duplicate the ret or ret n instruction; it will use the PATM return address */
895 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
896
897 if (rc == VINF_SUCCESS)
898 {
899 pPatch->pTempInfo->pPatchRetInstrGC = pPatchRetInstrGC;
900 pPatch->pTempInfo->uPatchRetParam1 = pCpu->param1.parval;
901 }
902 return rc;
903}
904
905/**
906 * Generate all global patm functions
907 *
908 * @returns VBox status code.
909 * @param pVM The VM to operate on.
910 * @param pPatch Patch structure
911 *
912 */
913int patmPatchGenGlobalFunctions(PVM pVM, PPATCHINFO pPatch)
914{
915 int size = 0;
916
917 pVM->patm.s.pfnHelperCallGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
918 PATCHGEN_PROLOG(pVM, pPatch);
919 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMLookupAndCallRecord, 0, false);
920 PATCHGEN_EPILOG(pPatch, size);
921
922 /* Round to next 8 byte boundary. */
923 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
924
925 pVM->patm.s.pfnHelperRetGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
926 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
927 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMRetFunctionRecord, 0, false);
928 PATCHGEN_EPILOG(pPatch, size);
929
930 /* Round to next 8 byte boundary. */
931 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
932
933 pVM->patm.s.pfnHelperJumpGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
934 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
935 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMLookupAndJumpRecord, 0, false);
936 PATCHGEN_EPILOG(pPatch, size);
937
938 /* Round to next 8 byte boundary. */
939 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
940
941 pVM->patm.s.pfnHelperIretGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
942 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
943 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretFunctionRecord, 0, false);
944 PATCHGEN_EPILOG(pPatch, size);
945
946 Log(("pfnHelperCallGC %RRv\n", pVM->patm.s.pfnHelperCallGC));
947 Log(("pfnHelperRetGC %RRv\n", pVM->patm.s.pfnHelperRetGC));
948 Log(("pfnHelperJumpGC %RRv\n", pVM->patm.s.pfnHelperJumpGC));
949 Log(("pfnHelperIretGC %RRv\n", pVM->patm.s.pfnHelperIretGC));
950
951 return VINF_SUCCESS;
952}
953
954/**
955 * Generate illegal instruction (int 3)
956 *
957 * @returns VBox status code.
958 * @param pVM The VM to operate on.
959 * @param pPatch Patch structure
960 *
961 */
962int patmPatchGenIllegalInstr(PVM pVM, PPATCHINFO pPatch)
963{
964 PATCHGEN_PROLOG(pVM, pPatch);
965
966 pPB[0] = 0xCC;
967
968 PATCHGEN_EPILOG(pPatch, 1);
969 return VINF_SUCCESS;
970}
971
972/**
973 * Check virtual IF flag and jump back to original guest code if set
974 *
975 * @returns VBox status code.
976 * @param pVM The VM to operate on.
977 * @param pPatch Patch structure
978 * @param pCurInstrGC Guest context pointer to the current instruction
979 *
980 */
981int patmPatchGenCheckIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
982{
983 uint32_t size;
984
985 PATCHGEN_PROLOG(pVM, pPatch);
986
987 /* Add lookup record for patch to guest address translation */
988 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
989
990 /* Generate code to check for IF=1 before executing the call to the duplicated function. */
991 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCheckIFRecord, pCurInstrGC, true);
992
993 PATCHGEN_EPILOG(pPatch, size);
994 return VINF_SUCCESS;
995}
996
997/**
998 * Set PATM interrupt flag
999 *
1000 * @returns VBox status code.
1001 * @param pVM The VM to operate on.
1002 * @param pPatch Patch structure
1003 * @param pInstrGC Corresponding guest instruction
1004 *
1005 */
1006int patmPatchGenSetPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1007{
1008 PATCHGEN_PROLOG(pVM, pPatch);
1009
1010 /* Add lookup record for patch to guest address translation */
1011 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1012
1013 int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMSetPIFRecord, 0, false);
1014 PATCHGEN_EPILOG(pPatch, size);
1015 return VINF_SUCCESS;
1016}
1017
1018/**
1019 * Clear PATM interrupt flag
1020 *
1021 * @returns VBox status code.
1022 * @param pVM The VM to operate on.
1023 * @param pPatch Patch structure
1024 * @param pInstrGC Corresponding guest instruction
1025 *
1026 */
1027int patmPatchGenClearPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1028{
1029 PATCHGEN_PROLOG(pVM, pPatch);
1030
1031 /* Add lookup record for patch to guest address translation */
1032 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1033
1034 int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearPIFRecord, 0, false);
1035 PATCHGEN_EPILOG(pPatch, size);
1036 return VINF_SUCCESS;
1037}
1038
1039
1040/**
1041 * Clear PATM inhibit irq flag
1042 *
1043 * @returns VBox status code.
1044 * @param pVM The VM to operate on.
1045 * @param pPatch Patch structure
1046 * @param pNextInstrGC Next guest instruction
1047 */
1048int patmPatchGenClearInhibitIRQ(PVM pVM, PPATCHINFO pPatch, RTRCPTR pNextInstrGC)
1049{
1050 int size;
1051 PATMCALLINFO callInfo;
1052
1053 PATCHGEN_PROLOG(pVM, pPatch);
1054
1055 Assert((pPatch->flags & (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION)) != (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION));
1056
1057 /* Add lookup record for patch to guest address translation */
1058 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pNextInstrGC, PATM_LOOKUP_PATCH2GUEST);
1059
1060 callInfo.pNextInstrGC = pNextInstrGC;
1061
1062 if (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1063 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearInhibitIRQContIF0Record, 0, false, &callInfo);
1064 else
1065 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearInhibitIRQFaultIF0Record, 0, false, &callInfo);
1066
1067 PATCHGEN_EPILOG(pPatch, size);
1068 return VINF_SUCCESS;
1069}
1070
1071/**
1072 * Generate an interrupt handler entrypoint
1073 *
1074 * @returns VBox status code.
1075 * @param pVM The VM to operate on.
1076 * @param pPatch Patch record
1077 * @param pIntHandlerGC IDT handler address
1078 *
1079 ** @todo must check if virtual IF is already cleared on entry!!!!!!!!!!!!!!!!!!!!!!!
1080 */
1081int patmPatchGenIntEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pIntHandlerGC)
1082{
1083 uint32_t size;
1084 int rc = VINF_SUCCESS;
1085
1086 PATCHGEN_PROLOG(pVM, pPatch);
1087
1088 /* Add lookup record for patch to guest address translation */
1089 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1090
1091 /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
1092 size = patmPatchGenCode(pVM, pPatch, pPB,
1093 (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord,
1094 0, false);
1095
1096 PATCHGEN_EPILOG(pPatch, size);
1097
1098 // Interrupt gates set IF to 0
1099 rc = patmPatchGenCli(pVM, pPatch);
1100 AssertRCReturn(rc, rc);
1101
1102 return rc;
1103}
1104
1105/**
1106 * Generate a trap handler entrypoint
1107 *
1108 * @returns VBox status code.
1109 * @param pVM The VM to operate on.
1110 * @param pPatch Patch record
1111 * @param pTrapHandlerGC IDT handler address
1112 */
1113int patmPatchGenTrapEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTrapHandlerGC)
1114{
1115 uint32_t size;
1116
1117 PATCHGEN_PROLOG(pVM, pPatch);
1118
1119 /* Add lookup record for patch to guest address translation */
1120 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pTrapHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1121
1122 /* Generate entrypoint for the trap handler (correcting CS in the interrupt stack frame) */
1123 size = patmPatchGenCode(pVM, pPatch, pPB,
1124 (pPatch->flags & PATMFL_TRAPHANDLER_WITH_ERRORCODE) ? &PATMTrapEntryRecordErrorCode : &PATMTrapEntryRecord,
1125 pTrapHandlerGC, true);
1126 PATCHGEN_EPILOG(pPatch, size);
1127
1128 return VINF_SUCCESS;
1129}
1130
1131#ifdef VBOX_WITH_STATISTICS
1132int patmPatchGenStats(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1133{
1134 uint32_t size;
1135
1136 PATCHGEN_PROLOG(pVM, pPatch);
1137
1138 /* Add lookup record for stats code -> guest handler. */
1139 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1140
1141 /* Generate code to keep calling statistics for this patch */
1142 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMStatsRecord, pInstrGC, false);
1143 PATCHGEN_EPILOG(pPatch, size);
1144
1145 return VINF_SUCCESS;
1146}
1147#endif
1148
1149/**
1150 * Debug register moves to or from general purpose registers
1151 * mov GPR, DRx
1152 * mov DRx, GPR
1153 *
1154 * @todo: if we ever want to support hardware debug registers natively, then
1155 * this will need to be changed!
1156 */
1157int patmPatchGenMovDebug(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1158{
1159 int rc = VINF_SUCCESS;
1160 int reg, mod, rm, dbgreg;
1161 uint32_t offset;
1162
1163 PATCHGEN_PROLOG(pVM, pPatch);
1164
1165 mod = 0; //effective address (only)
1166 rm = 5; //disp32
1167 if (pCpu->pCurInstr->param1 == OP_PARM_Dd)
1168 {
1169 Assert(0); // You not come here. Illegal!
1170
1171 // mov DRx, GPR
1172 pPB[0] = 0x89; //mov disp32, GPR
1173 Assert(pCpu->param1.flags & USE_REG_DBG);
1174 Assert(pCpu->param2.flags & USE_REG_GEN32);
1175
1176 dbgreg = pCpu->param1.base.reg_dbg;
1177 reg = pCpu->param2.base.reg_gen;
1178 }
1179 else
1180 {
1181 // mov GPR, DRx
1182 Assert(pCpu->param1.flags & USE_REG_GEN32);
1183 Assert(pCpu->param2.flags & USE_REG_DBG);
1184
1185 pPB[0] = 0x8B; // mov GPR, disp32
1186 reg = pCpu->param1.base.reg_gen;
1187 dbgreg = pCpu->param2.base.reg_dbg;
1188 }
1189
1190 pPB[1] = MAKE_MODRM(mod, reg, rm);
1191
1192 AssertReturn(dbgreg <= USE_REG_DR7, VERR_INVALID_PARAMETER);
1193 offset = RT_OFFSETOF(CPUMCTX, dr[dbgreg]);
1194
1195 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1196 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1197
1198 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
1199 return rc;
1200}
1201
1202/*
1203 * Control register moves to or from general purpose registers
1204 * mov GPR, CRx
1205 * mov CRx, GPR
1206 */
1207int patmPatchGenMovControl(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1208{
1209 int rc = VINF_SUCCESS;
1210 int reg, mod, rm, ctrlreg;
1211 uint32_t offset;
1212
1213 PATCHGEN_PROLOG(pVM, pPatch);
1214
1215 mod = 0; //effective address (only)
1216 rm = 5; //disp32
1217 if (pCpu->pCurInstr->param1 == OP_PARM_Cd)
1218 {
1219 Assert(0); // You not come here. Illegal!
1220
1221 // mov CRx, GPR
1222 pPB[0] = 0x89; //mov disp32, GPR
1223 ctrlreg = pCpu->param1.base.reg_ctrl;
1224 reg = pCpu->param2.base.reg_gen;
1225 Assert(pCpu->param1.flags & USE_REG_CR);
1226 Assert(pCpu->param2.flags & USE_REG_GEN32);
1227 }
1228 else
1229 {
1230 // mov GPR, DRx
1231 Assert(pCpu->param1.flags & USE_REG_GEN32);
1232 Assert(pCpu->param2.flags & USE_REG_CR);
1233
1234 pPB[0] = 0x8B; // mov GPR, disp32
1235 reg = pCpu->param1.base.reg_gen;
1236 ctrlreg = pCpu->param2.base.reg_ctrl;
1237 }
1238
1239 pPB[1] = MAKE_MODRM(mod, reg, rm);
1240
1241 /// @todo: make this an array in the context structure
1242 switch (ctrlreg)
1243 {
1244 case USE_REG_CR0:
1245 offset = RT_OFFSETOF(CPUMCTX, cr0);
1246 break;
1247 case USE_REG_CR2:
1248 offset = RT_OFFSETOF(CPUMCTX, cr2);
1249 break;
1250 case USE_REG_CR3:
1251 offset = RT_OFFSETOF(CPUMCTX, cr3);
1252 break;
1253 case USE_REG_CR4:
1254 offset = RT_OFFSETOF(CPUMCTX, cr4);
1255 break;
1256 default: /* Shut up compiler warning. */
1257 AssertFailed();
1258 offset = 0;
1259 break;
1260 }
1261 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1262 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1263
1264 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
1265 return rc;
1266}
1267
1268/*
1269 * mov GPR, SS
1270 */
1271int patmPatchGenMovFromSS(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1272{
1273 uint32_t size, offset;
1274
1275 Log(("patmPatchGenMovFromSS %RRv\n", pCurInstrGC));
1276
1277 Assert(pPatch->flags & PATMFL_CODE32);
1278
1279 PATCHGEN_PROLOG(pVM, pPatch);
1280 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearPIFRecord, 0, false);
1281 PATCHGEN_EPILOG(pPatch, size);
1282
1283 /* push ss */
1284 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1285 offset = 0;
1286 if (pCpu->prefix & PREFIX_OPSIZE)
1287 pPB[offset++] = 0x66; /* size override -> 16 bits push */
1288 pPB[offset++] = 0x16;
1289 PATCHGEN_EPILOG(pPatch, offset);
1290
1291 /* checks and corrects RPL of pushed ss*/
1292 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1293 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMMovFromSSRecord, 0, false);
1294 PATCHGEN_EPILOG(pPatch, size);
1295
1296 /* pop general purpose register */
1297 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1298 offset = 0;
1299 if (pCpu->prefix & PREFIX_OPSIZE)
1300 pPB[offset++] = 0x66; /* size override -> 16 bits pop */
1301 pPB[offset++] = 0x58 + pCpu->param1.base.reg_gen;
1302 PATCHGEN_EPILOG(pPatch, offset);
1303
1304
1305 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1306 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMSetPIFRecord, 0, false);
1307 PATCHGEN_EPILOG(pPatch, size);
1308
1309 return VINF_SUCCESS;
1310}
1311
1312
1313/**
1314 * Generate an sldt or str patch instruction
1315 *
1316 * @returns VBox status code.
1317 * @param pVM The VM to operate on.
1318 * @param pPatch Patch record
1319 * @param pCpu Disassembly state
1320 * @param pCurInstrGC Guest instruction address
1321 */
1322int patmPatchGenSldtStr(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1323{
1324 // sldt %Ew
1325 int rc = VINF_SUCCESS;
1326 uint32_t offset = 0;
1327 uint32_t i;
1328
1329 /** @todo segment prefix (untested) */
1330 Assert(pCpu->prefix == PREFIX_NONE || pCpu->prefix == PREFIX_OPSIZE);
1331
1332 PATCHGEN_PROLOG(pVM, pPatch);
1333
1334 if (pCpu->param1.flags == USE_REG_GEN32 || pCpu->param1.flags == USE_REG_GEN16)
1335 {
1336 /* Register operand */
1337 // 8B 15 [32 bits addr] mov edx, CPUMCTX.tr/ldtr
1338
1339 if (pCpu->prefix == PREFIX_OPSIZE)
1340 pPB[offset++] = 0x66;
1341
1342 pPB[offset++] = 0x8B; // mov destreg, CPUMCTX.tr/ldtr
1343 /* Modify REG part according to destination of original instruction */
1344 pPB[offset++] = MAKE_MODRM(0, pCpu->param1.base.reg_gen, 5);
1345 if (pCpu->pCurInstr->opcode == OP_STR)
1346 {
1347 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1348 }
1349 else
1350 {
1351 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1352 }
1353 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1354 offset += sizeof(RTRCPTR);
1355 }
1356 else
1357 {
1358 /* Memory operand */
1359 //50 push eax
1360 //52 push edx
1361 //8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1362 //66 A1 48 7C 42 00 mov ax, CPUMCTX.tr/ldtr
1363 //66 89 02 mov word ptr [edx],ax
1364 //5A pop edx
1365 //58 pop eax
1366
1367 pPB[offset++] = 0x50; // push eax
1368 pPB[offset++] = 0x52; // push edx
1369
1370 if (pCpu->prefix == PREFIX_SEG)
1371 {
1372 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1373 }
1374 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1375 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1376 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, USE_REG_EDX, pCpu->ModRM.Bits.Rm);
1377
1378 i = 3; /* standard offset of modrm bytes */
1379 if (pCpu->prefix == PREFIX_OPSIZE)
1380 i++; //skip operand prefix
1381 if (pCpu->prefix == PREFIX_SEG)
1382 i++; //skip segment prefix
1383
1384 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->opsize - i);
1385 AssertRCReturn(rc, rc);
1386 offset += (pCpu->opsize - i);
1387
1388 pPB[offset++] = 0x66; // mov ax, CPUMCTX.tr/ldtr
1389 pPB[offset++] = 0xA1;
1390 if (pCpu->pCurInstr->opcode == OP_STR)
1391 {
1392 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1393 }
1394 else
1395 {
1396 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1397 }
1398 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1399 offset += sizeof(RTRCPTR);
1400
1401 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1402 pPB[offset++] = 0x89;
1403 pPB[offset++] = 0x02;
1404
1405 pPB[offset++] = 0x5A; // pop edx
1406 pPB[offset++] = 0x58; // pop eax
1407 }
1408
1409 PATCHGEN_EPILOG(pPatch, offset);
1410
1411 return rc;
1412}
1413
1414/**
1415 * Generate an sgdt or sidt patch instruction
1416 *
1417 * @returns VBox status code.
1418 * @param pVM The VM to operate on.
1419 * @param pPatch Patch record
1420 * @param pCpu Disassembly state
1421 * @param pCurInstrGC Guest instruction address
1422 */
1423int patmPatchGenSxDT(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1424{
1425 int rc = VINF_SUCCESS;
1426 uint32_t offset = 0, offset_base, offset_limit;
1427 uint32_t i;
1428
1429 /* @todo segment prefix (untested) */
1430 Assert(pCpu->prefix == PREFIX_NONE);
1431
1432 // sgdt %Ms
1433 // sidt %Ms
1434
1435 switch (pCpu->pCurInstr->opcode)
1436 {
1437 case OP_SGDT:
1438 offset_base = RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1439 offset_limit = RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1440 break;
1441
1442 case OP_SIDT:
1443 offset_base = RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1444 offset_limit = RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1445 break;
1446
1447 default:
1448 return VERR_INVALID_PARAMETER;
1449 }
1450
1451//50 push eax
1452//52 push edx
1453//8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1454//66 A1 48 7C 42 00 mov ax, CPUMCTX.gdtr.limit
1455//66 89 02 mov word ptr [edx],ax
1456//A1 48 7C 42 00 mov eax, CPUMCTX.gdtr.base
1457//89 42 02 mov dword ptr [edx+2],eax
1458//5A pop edx
1459//58 pop eax
1460
1461 PATCHGEN_PROLOG(pVM, pPatch);
1462 pPB[offset++] = 0x50; // push eax
1463 pPB[offset++] = 0x52; // push edx
1464
1465 if (pCpu->prefix == PREFIX_SEG)
1466 {
1467 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1468 }
1469 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1470 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1471 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, USE_REG_EDX, pCpu->ModRM.Bits.Rm);
1472
1473 i = 3; /* standard offset of modrm bytes */
1474 if (pCpu->prefix == PREFIX_OPSIZE)
1475 i++; //skip operand prefix
1476 if (pCpu->prefix == PREFIX_SEG)
1477 i++; //skip segment prefix
1478 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->opsize - i);
1479 AssertRCReturn(rc, rc);
1480 offset += (pCpu->opsize - i);
1481
1482 pPB[offset++] = 0x66; // mov ax, CPUMCTX.gdtr.limit
1483 pPB[offset++] = 0xA1;
1484 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_limit;
1485 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1486 offset += sizeof(RTRCPTR);
1487
1488 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1489 pPB[offset++] = 0x89;
1490 pPB[offset++] = 0x02;
1491
1492 pPB[offset++] = 0xA1; // mov eax, CPUMCTX.gdtr.base
1493 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_base;
1494 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1495 offset += sizeof(RTRCPTR);
1496
1497 pPB[offset++] = 0x89; // mov dword ptr [edx+2],eax
1498 pPB[offset++] = 0x42;
1499 pPB[offset++] = 0x02;
1500
1501 pPB[offset++] = 0x5A; // pop edx
1502 pPB[offset++] = 0x58; // pop eax
1503
1504 PATCHGEN_EPILOG(pPatch, offset);
1505
1506 return rc;
1507}
1508
1509/**
1510 * Generate a cpuid patch instruction
1511 *
1512 * @returns VBox status code.
1513 * @param pVM The VM to operate on.
1514 * @param pPatch Patch record
1515 * @param pCurInstrGC Guest instruction address
1516 */
1517int patmPatchGenCpuid(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
1518{
1519 uint32_t size;
1520 PATCHGEN_PROLOG(pVM, pPatch);
1521
1522 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCpuidRecord, 0, false);
1523
1524 PATCHGEN_EPILOG(pPatch, size);
1525 return VINF_SUCCESS;
1526}
1527
1528/**
1529 * Generate the jump from guest to patch code
1530 *
1531 * @returns VBox status code.
1532 * @param pVM The VM to operate on.
1533 * @param pPatch Patch record
1534 * @param pTargetGC Guest target jump
1535 * @param fClearInhibitIRQs Clear inhibit irq flag
1536 */
1537int patmPatchGenJumpToGuest(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fClearInhibitIRQs)
1538{
1539 int rc = VINF_SUCCESS;
1540 uint32_t size;
1541
1542 if (fClearInhibitIRQs)
1543 {
1544 rc = patmPatchGenClearInhibitIRQ(pVM, pPatch, pReturnAddrGC);
1545 if (rc == VERR_NO_MEMORY)
1546 return rc;
1547 AssertRCReturn(rc, rc);
1548 }
1549
1550 PATCHGEN_PROLOG(pVM, pPatch);
1551
1552 /* Add lookup record for patch to guest address translation */
1553 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
1554
1555 /* Generate code to jump to guest code if IF=1, else fault. */
1556 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpToGuest_IF1Record, pReturnAddrGC, true);
1557 PATCHGEN_EPILOG(pPatch, size);
1558
1559 return rc;
1560}
1561
1562/*
1563 * Relative jump from patch code to patch code (no fixup required)
1564 */
1565int patmPatchGenPatchJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RCPTRTYPE(uint8_t *) pPatchAddrGC, bool fAddLookupRecord)
1566{
1567 int32_t displ;
1568 int rc = VINF_SUCCESS;
1569
1570 Assert(PATMIsPatchGCAddr(pVM, pPatchAddrGC));
1571 PATCHGEN_PROLOG(pVM, pPatch);
1572
1573 if (fAddLookupRecord)
1574 {
1575 /* Add lookup record for patch to guest address translation */
1576 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
1577 }
1578
1579 pPB[0] = 0xE9; //JMP
1580
1581 displ = pPatchAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + SIZEOF_NEARJUMP32);
1582
1583 *(uint32_t *)&pPB[1] = displ;
1584
1585 PATCHGEN_EPILOG(pPatch, SIZEOF_NEARJUMP32);
1586
1587 return rc;
1588}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette