VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATMPatch.cpp@ 6738

Last change on this file since 6738 was 5999, checked in by vboxsync, 17 years ago

The Giant CDDL Dual-License Header Change.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 52.2 KB
Line 
1/* $Id: PATMPatch.cpp 5999 2007-12-07 15:05:06Z vboxsync $ */
2/** @file
3 * PATMPatch - Dynamic Guest OS Instruction patches
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 innotek GmbH
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/patm.h>
25#include <VBox/stam.h>
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/iom.h>
29#include <VBox/sup.h>
30#include <VBox/mm.h>
31#include <VBox/ssm.h>
32#include <VBox/pdm.h>
33#include <VBox/trpm.h>
34#include <VBox/param.h>
35#include <iprt/avl.h>
36#include "PATMInternal.h"
37#include <VBox/vm.h>
38#include <VBox/csam.h>
39
40#include <VBox/dbg.h>
41#include <VBox/err.h>
42#include <VBox/log.h>
43#include <iprt/assert.h>
44#include <iprt/asm.h>
45#include <iprt/string.h>
46#include <VBox/dis.h>
47#include <VBox/disopcode.h>
48
49#include <stdlib.h>
50#include <stdio.h>
51#include "PATMA.h"
52#include "PATMPatch.h"
53
54/* internal structure for passing more information about call fixups to patmPatchGenCode */
55typedef struct
56{
57 RTGCPTR pTargetGC;
58 RTGCPTR pCurInstrGC;
59 RTGCPTR pNextInstrGC;
60 RTGCPTR pReturnGC;
61} PATMCALLINFO, *PPATMCALLINFO;
62
63int patmPatchAddReloc32(PVM pVM, PPATCHINFO pPatch, uint8_t *pRelocHC, uint32_t uType, RTGCPTR pSource, RTGCPTR pDest)
64{
65 PRELOCREC pRec;
66
67 Assert(uType == FIXUP_ABSOLUTE || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
68
69 LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%VGv source=%VGv dest=%VGv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest));
70
71 pRec = (PRELOCREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
72 Assert(pRec);
73 pRec->Core.Key = (AVLPVKEY)pRelocHC;
74 pRec->pRelocPos = pRelocHC; /* @todo redundant. */
75 pRec->pSource = pSource;
76 pRec->pDest = pDest;
77 pRec->uType = uType;
78
79 bool ret = RTAvlPVInsert(&pPatch->FixupTree, &pRec->Core);
80 Assert(ret); NOREF(ret);
81 pPatch->nrFixups++;
82
83 return VINF_SUCCESS;
84}
85
86int patmPatchAddJump(PVM pVM, PPATCHINFO pPatch, uint8_t *pJumpHC, uint32_t offset, RTGCPTR pTargetGC, uint32_t opcode)
87{
88 PJUMPREC pRec;
89
90 pRec = (PJUMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
91 Assert(pRec);
92
93 pRec->Core.Key = (AVLPVKEY)pJumpHC;
94 pRec->pJumpHC = pJumpHC; /* @todo redundant. */
95 pRec->offDispl = offset;
96 pRec->pTargetGC = pTargetGC;
97 pRec->opcode = opcode;
98
99 bool ret = RTAvlPVInsert(&pPatch->JumpTree, &pRec->Core);
100 Assert(ret); NOREF(ret);
101 pPatch->nrJumpRecs++;
102
103 return VINF_SUCCESS;
104}
105
106#define PATCHGEN_PROLOG_NODEF(pVM, pPatch) \
107 pPB = PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset; \
108 \
109 if (pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) \
110 { \
111 pVM->patm.s.fOutOfMemory = true; \
112 Assert(pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem); \
113 return VERR_NO_MEMORY; \
114 }
115
116#define PATCHGEN_PROLOG(pVM, pPatch) \
117 uint8_t *pPB; \
118 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
119
120
121#define PATCHGEN_EPILOG(pPatch, size) \
122 Assert(size <= 640); \
123 pPatch->uCurPatchOffset += size;
124
125
126static uint32_t patmPatchGenCode(PVM pVM, PPATCHINFO pPatch, uint8_t *pPB, PPATCHASMRECORD pAsmRecord, GCPTRTYPE(uint8_t *) pReturnAddrGC, bool fGenJump,
127 PPATMCALLINFO pCallInfo = 0)
128{
129 uint32_t i, j;
130
131 Assert(fGenJump == false || pReturnAddrGC);
132 Assert(fGenJump == false || pAsmRecord->offJump);
133 Assert(pAsmRecord && pAsmRecord->size > sizeof(pAsmRecord->uReloc[0]));
134
135 // Copy the code block
136 memcpy(pPB, pAsmRecord->pFunction, pAsmRecord->size);
137
138 // Process all fixups
139 for (j=0,i=0;i<pAsmRecord->nrRelocs*2; i+=2)
140 {
141 for (;j<pAsmRecord->size;j++)
142 {
143 if (*(uint32_t*)&pPB[j] == pAsmRecord->uReloc[i])
144 {
145 GCPTRTYPE(uint32_t *) dest;
146
147#ifdef VBOX_STRICT
148 if (pAsmRecord->uReloc[i] == PATM_FIXUP)
149 Assert(pAsmRecord->uReloc[i+1] != 0);
150 else
151 Assert(pAsmRecord->uReloc[i+1] == 0);
152#endif
153
154 switch (pAsmRecord->uReloc[i])
155 {
156 case PATM_VMFLAGS:
157 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
158 break;
159
160 case PATM_PENDINGACTION:
161 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
162 break;
163
164 case PATM_FIXUP:
165 /* Offset in uReloc[i+1] is from the base of the function. */
166 dest = (RTGCUINTPTR)pVM->patm.s.pPatchMemGC + pAsmRecord->uReloc[i+1] + (RTGCUINTPTR)(pPB - pVM->patm.s.pPatchMemHC);
167 break;
168#ifdef VBOX_WITH_STATISTICS
169 case PATM_ALLPATCHCALLS:
170 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
171 break;
172
173 case PATM_IRETEFLAGS:
174 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
175 break;
176
177 case PATM_IRETCS:
178 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
179 break;
180
181 case PATM_IRETEIP:
182 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
183 break;
184
185 case PATM_PERPATCHCALLS:
186 dest = patmPatchQueryStatAddress(pVM, pPatch);
187 break;
188#endif
189 case PATM_STACKPTR:
190 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
191 break;
192
193 /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
194 * part to store the original return addresses.
195 */
196 case PATM_STACKBASE:
197 dest = pVM->patm.s.pGCStackGC;
198 break;
199
200 case PATM_STACKBASE_GUEST:
201 dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
202 break;
203
204 case PATM_RETURNADDR: /* absolute guest address; no fixup required */
205 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
206 dest = pCallInfo->pReturnGC;
207 break;
208
209 case PATM_PATCHNEXTBLOCK: /* relative address of instruction following this block */
210 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
211
212 /** @note hardcoded assumption that we must return to the instruction following this block */
213 dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->size;
214 break;
215
216 case PATM_CALLTARGET: /* relative to patch address; no fixup requird */
217 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
218
219 /* Address must be filled in later. (see patmr3SetBranchTargets) */
220 patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
221 dest = PATM_ILLEGAL_DESTINATION;
222 break;
223
224 case PATM_PATCHBASE: /* Patch GC base address */
225 dest = pVM->patm.s.pPatchMemGC;
226 break;
227
228 case PATM_CPUID_STD_PTR:
229 dest = CPUMGetGuestCpuIdStdGCPtr(pVM);
230 break;
231
232 case PATM_CPUID_EXT_PTR:
233 dest = CPUMGetGuestCpuIdExtGCPtr(pVM);
234 break;
235
236 case PATM_CPUID_CENTAUR_PTR:
237 dest = CPUMGetGuestCpuIdCentaurGCPtr(pVM);
238 break;
239
240 case PATM_CPUID_DEF_PTR:
241 dest = CPUMGetGuestCpuIdDefGCPtr(pVM);
242 break;
243
244 case PATM_CPUID_STD_MAX:
245 dest = CPUMGetGuestCpuIdStdMax(pVM);
246 break;
247
248 case PATM_CPUID_EXT_MAX:
249 dest = CPUMGetGuestCpuIdExtMax(pVM);
250 break;
251
252 case PATM_CPUID_CENTAUR_MAX:
253 dest = CPUMGetGuestCpuIdCentaurMax(pVM);
254 break;
255
256 case PATM_INTERRUPTFLAG:
257 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
258 break;
259
260 case PATM_INHIBITIRQADDR:
261 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
262 break;
263
264 case PATM_NEXTINSTRADDR:
265 Assert(pCallInfo);
266 /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
267 dest = pCallInfo->pNextInstrGC;
268 break;
269
270 case PATM_CURINSTRADDR:
271 Assert(pCallInfo);
272 dest = pCallInfo->pCurInstrGC;
273 break;
274
275 case PATM_VM_FORCEDACTIONS:
276 dest = pVM->pVMGC + RT_OFFSETOF(VM, fForcedActions);
277 break;
278
279 case PATM_TEMP_EAX:
280 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
281 break;
282 case PATM_TEMP_ECX:
283 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
284 break;
285 case PATM_TEMP_EDI:
286 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
287 break;
288 case PATM_TEMP_EFLAGS:
289 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
290 break;
291 case PATM_TEMP_RESTORE_FLAGS:
292 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
293 break;
294 case PATM_CALL_PATCH_TARGET_ADDR:
295 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr);
296 break;
297 case PATM_CALL_RETURN_ADDR:
298 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr);
299 break;
300
301 /* Relative address of global patm lookup and call function. */
302 case PATM_LOOKUP_AND_CALL_FUNCTION:
303 {
304 RTGCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR)(&pPB[j] + sizeof(RTGCPTR) - pVM->patm.s.pPatchMemHC);
305 Assert(pVM->patm.s.pfnHelperCallGC);
306 Assert(sizeof(uint32_t) == sizeof(RTGCPTR));
307
308 /* Relative value is target minus address of instruction after the actual call instruction. */
309 dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
310 break;
311 }
312
313 case PATM_RETURN_FUNCTION:
314 {
315 RTGCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR)(&pPB[j] + sizeof(RTGCPTR) - pVM->patm.s.pPatchMemHC);
316 Assert(pVM->patm.s.pfnHelperRetGC);
317 Assert(sizeof(uint32_t) == sizeof(RTGCPTR));
318
319 /* Relative value is target minus address of instruction after the actual call instruction. */
320 dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
321 break;
322 }
323
324 case PATM_IRET_FUNCTION:
325 {
326 RTGCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR)(&pPB[j] + sizeof(RTGCPTR) - pVM->patm.s.pPatchMemHC);
327 Assert(pVM->patm.s.pfnHelperIretGC);
328 Assert(sizeof(uint32_t) == sizeof(RTGCPTR));
329
330 /* Relative value is target minus address of instruction after the actual call instruction. */
331 dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
332 break;
333 }
334
335 case PATM_LOOKUP_AND_JUMP_FUNCTION:
336 {
337 RTGCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR)(&pPB[j] + sizeof(RTGCPTR) - pVM->patm.s.pPatchMemHC);
338 Assert(pVM->patm.s.pfnHelperJumpGC);
339 Assert(sizeof(uint32_t) == sizeof(RTGCPTR));
340
341 /* Relative value is target minus address of instruction after the actual call instruction. */
342 dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
343 break;
344 }
345
346 default:
347 dest = PATM_ILLEGAL_DESTINATION;
348 AssertRelease(0);
349 break;
350 }
351
352 *(RTGCPTR *)&pPB[j] = dest;
353 if (pAsmRecord->uReloc[i] < PATM_NO_FIXUP)
354 {
355 patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE);
356 }
357 break;
358 }
359 }
360 Assert(j < pAsmRecord->size);
361 }
362 Assert(pAsmRecord->uReloc[i] == 0xffffffff);
363
364 /* Add the jump back to guest code (if required) */
365 if (fGenJump)
366 {
367 int32_t displ = pReturnAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32);
368
369 /* Add lookup record for patch to guest address translation */
370 Assert(pPB[pAsmRecord->offJump - 1] == 0xE9);
371 patmr3AddP2GLookupRecord(pVM, pPatch, &pPB[pAsmRecord->offJump - 1], pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
372
373 *(uint32_t *)&pPB[pAsmRecord->offJump] = displ;
374 patmPatchAddReloc32(pVM, pPatch, &pPB[pAsmRecord->offJump], FIXUP_REL_JMPTOGUEST,
375 PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32,
376 pReturnAddrGC);
377 }
378
379 // Calculate the right size of this patch block
380 if ((fGenJump && pAsmRecord->offJump) || (!fGenJump && !pAsmRecord->offJump))
381 {
382 return pAsmRecord->size;
383 }
384 else {
385 // if a jump instruction is present and we don't want one, then subtract SIZEOF_NEARJUMP32
386 return pAsmRecord->size - SIZEOF_NEARJUMP32;
387 }
388}
389
390/* Read bytes and check for overwritten instructions. */
391static int patmPatchReadBytes(PVM pVM, uint8_t *pDest, RTGCPTR pSrc, uint32_t cb)
392{
393 int rc = PGMPhysReadGCPtr(pVM, pDest, pSrc, cb);
394 AssertRCReturn(rc, rc);
395 /*
396 * Could be patched already; make sure this is checked!
397 */
398 for (uint32_t i=0;i<cb;i++)
399 {
400 uint8_t temp;
401
402 int rc2 = PATMR3QueryOpcode(pVM, pSrc+i, &temp);
403 if (VBOX_SUCCESS(rc2))
404 {
405 pDest[i] = temp;
406 }
407 else
408 break; /* no more */
409 }
410 return VINF_SUCCESS;
411}
412
413int patmPatchGenDuplicate(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, GCPTRTYPE(uint8_t *) pCurInstrGC)
414{
415 int rc = VINF_SUCCESS;
416 PATCHGEN_PROLOG(pVM, pPatch);
417
418 rc = patmPatchReadBytes(pVM, pPB, pCurInstrGC, pCpu->opsize);
419 AssertRC(rc);
420 PATCHGEN_EPILOG(pPatch, pCpu->opsize);
421 return rc;
422}
423
424int patmPatchGenIret(PVM pVM, PPATCHINFO pPatch, RTGCPTR pCurInstrGC, bool fSizeOverride)
425{
426 uint32_t size;
427 PATMCALLINFO callInfo;
428
429 PATCHGEN_PROLOG(pVM, pPatch);
430
431 AssertMsg(fSizeOverride == false, ("operand size override!!\n"));
432
433 callInfo.pCurInstrGC = pCurInstrGC;
434
435 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRecord, 0, false, &callInfo);
436
437 PATCHGEN_EPILOG(pPatch, size);
438 return VINF_SUCCESS;
439}
440
441int patmPatchGenCli(PVM pVM, PPATCHINFO pPatch)
442{
443 uint32_t size;
444 PATCHGEN_PROLOG(pVM, pPatch);
445
446 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCliRecord, 0, false);
447
448 PATCHGEN_EPILOG(pPatch, size);
449 return VINF_SUCCESS;
450}
451
452/*
453 * Generate an STI patch
454 */
455int patmPatchGenSti(PVM pVM, PPATCHINFO pPatch, RTGCPTR pCurInstrGC, RTGCPTR pNextInstrGC)
456{
457 PATMCALLINFO callInfo;
458 uint32_t size;
459
460 Log(("patmPatchGenSti at %VGv; next %VGv\n", pCurInstrGC, pNextInstrGC));
461 PATCHGEN_PROLOG(pVM, pPatch);
462 callInfo.pNextInstrGC = pNextInstrGC;
463 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMStiRecord, 0, false, &callInfo);
464 PATCHGEN_EPILOG(pPatch, size);
465
466 return VINF_SUCCESS;
467}
468
469
470int patmPatchGenPopf(PVM pVM, PPATCHINFO pPatch, GCPTRTYPE(uint8_t *) pReturnAddrGC, bool fSizeOverride, bool fGenJumpBack)
471{
472 uint32_t size;
473 PATMCALLINFO callInfo;
474
475 PATCHGEN_PROLOG(pVM, pPatch);
476
477 callInfo.pNextInstrGC = pReturnAddrGC;
478
479 Log(("patmPatchGenPopf at %VGv\n", pReturnAddrGC));
480
481 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
482 if (fSizeOverride == true)
483 {
484 Log(("operand size override!!\n"));
485 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf16Record : &PATMPopf16Record_NoExit , pReturnAddrGC, fGenJumpBack, &callInfo);
486 }
487 else
488 {
489 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf32Record : &PATMPopf32Record_NoExit, pReturnAddrGC, fGenJumpBack, &callInfo);
490 }
491
492 PATCHGEN_EPILOG(pPatch, size);
493 STAM_COUNTER_INC(&pVM->patm.s.StatGenPopf);
494 return VINF_SUCCESS;
495}
496
497int patmPatchGenPushf(PVM pVM, PPATCHINFO pPatch, bool fSizeOverride)
498{
499 uint32_t size;
500 PATCHGEN_PROLOG(pVM, pPatch);
501
502 if (fSizeOverride == true)
503 {
504 Log(("operand size override!!\n"));
505 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushf16Record, 0, false);
506 }
507 else
508 {
509 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushf32Record, 0, false);
510 }
511
512 PATCHGEN_EPILOG(pPatch, size);
513 return VINF_SUCCESS;
514}
515
516int patmPatchGenPushCS(PVM pVM, PPATCHINFO pPatch)
517{
518 uint32_t size;
519 PATCHGEN_PROLOG(pVM, pPatch);
520 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushCSRecord, 0, false);
521 PATCHGEN_EPILOG(pPatch, size);
522 return VINF_SUCCESS;
523}
524
525int patmPatchGenLoop(PVM pVM, PPATCHINFO pPatch, GCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
526{
527 uint32_t size = 0;
528 PPATCHASMRECORD pPatchAsmRec;
529
530 PATCHGEN_PROLOG(pVM, pPatch);
531
532 switch (opcode)
533 {
534 case OP_LOOP:
535 pPatchAsmRec = &PATMLoopRecord;
536 break;
537 case OP_LOOPNE:
538 pPatchAsmRec = &PATMLoopNZRecord;
539 break;
540 case OP_LOOPE:
541 pPatchAsmRec = &PATMLoopZRecord;
542 break;
543 case OP_JECXZ:
544 pPatchAsmRec = &PATMJEcxRecord;
545 break;
546 default:
547 AssertMsgFailed(("PatchGenLoop: invalid opcode %d\n", opcode));
548 return VERR_INVALID_PARAMETER;
549 }
550 Assert(pPatchAsmRec->offSizeOverride && pPatchAsmRec->offRelJump);
551
552 Log(("PatchGenLoop %d jump %d to %08x offrel=%d\n", opcode, pPatch->nrJumpRecs, pTargetGC, pPatchAsmRec->offRelJump));
553
554 // Generate the patch code
555 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
556
557 if (fSizeOverride)
558 {
559 pPB[pPatchAsmRec->offSizeOverride] = 0x66; // ecx -> cx or vice versa
560 }
561
562 *(RTGCPTR *)&pPB[pPatchAsmRec->offRelJump] = 0xDEADBEEF;
563
564 patmPatchAddJump(pVM, pPatch, &pPB[pPatchAsmRec->offRelJump - 1], 1, pTargetGC, opcode);
565
566 PATCHGEN_EPILOG(pPatch, size);
567 return VINF_SUCCESS;
568}
569
570int patmPatchGenRelJump(PVM pVM, PPATCHINFO pPatch, GCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
571{
572 uint32_t offset = 0;
573 PATCHGEN_PROLOG(pVM, pPatch);
574
575 // internal relative jumps from patch code to patch code; no relocation record required
576
577 Assert(PATMIsPatchGCAddr(pVM, pTargetGC) == false);
578
579 switch (opcode)
580 {
581 case OP_JO:
582 pPB[1] = 0x80;
583 break;
584 case OP_JNO:
585 pPB[1] = 0x81;
586 break;
587 case OP_JC:
588 pPB[1] = 0x82;
589 break;
590 case OP_JNC:
591 pPB[1] = 0x83;
592 break;
593 case OP_JE:
594 pPB[1] = 0x84;
595 break;
596 case OP_JNE:
597 pPB[1] = 0x85;
598 break;
599 case OP_JBE:
600 pPB[1] = 0x86;
601 break;
602 case OP_JNBE:
603 pPB[1] = 0x87;
604 break;
605 case OP_JS:
606 pPB[1] = 0x88;
607 break;
608 case OP_JNS:
609 pPB[1] = 0x89;
610 break;
611 case OP_JP:
612 pPB[1] = 0x8A;
613 break;
614 case OP_JNP:
615 pPB[1] = 0x8B;
616 break;
617 case OP_JL:
618 pPB[1] = 0x8C;
619 break;
620 case OP_JNL:
621 pPB[1] = 0x8D;
622 break;
623 case OP_JLE:
624 pPB[1] = 0x8E;
625 break;
626 case OP_JNLE:
627 pPB[1] = 0x8F;
628 break;
629
630 case OP_JMP:
631 /* If interrupted here, then jump to the target instruction. Used by PATM.cpp for jumping to known instructions. */
632 /* Add lookup record for patch to guest address translation */
633 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pTargetGC, PATM_LOOKUP_PATCH2GUEST);
634
635 pPB[0] = 0xE9;
636 break;
637
638 case OP_JECXZ:
639 case OP_LOOP:
640 case OP_LOOPNE:
641 case OP_LOOPE:
642 return patmPatchGenLoop(pVM, pPatch, pTargetGC, opcode, fSizeOverride);
643
644 default:
645 AssertMsg(0, ("Invalid jump opcode %d\n", opcode));
646 return VERR_PATCHING_REFUSED;
647 }
648 if (opcode != OP_JMP)
649 {
650 pPB[0] = 0xF;
651 offset += 2;
652 }
653 else offset++;
654
655 *(RTGCPTR *)&pPB[offset] = 0xDEADBEEF;
656
657 patmPatchAddJump(pVM, pPatch, pPB, offset, pTargetGC, opcode);
658
659 offset += sizeof(RTGCPTR);
660
661 PATCHGEN_EPILOG(pPatch, offset);
662 return VINF_SUCCESS;
663}
664
665/*
666 * Rewrite call to dynamic or currently unknown function (on-demand patching of function)
667 */
668int patmPatchGenCall(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTGCPTR pCurInstrGC, RTGCPTR pTargetGC, bool fIndirect)
669{
670 PATMCALLINFO callInfo;
671 uint32_t offset;
672 uint32_t i, size;
673 int rc;
674
675 /** @note Don't check for IF=1 here. The ret instruction will do this. */
676 /** @note It's dangerous to do this for 'normal' patches. the jump target might be inside the generated patch jump. (seen this!) */
677
678 /* 1: Clear PATM interrupt flag on entry. */
679 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
680 if (rc == VERR_NO_MEMORY)
681 return rc;
682 AssertRCReturn(rc, rc);
683
684 PATCHGEN_PROLOG(pVM, pPatch);
685 /* 2: We must push the target address onto the stack before appending the indirect call code. */
686
687 if (fIndirect)
688 {
689 Log(("patmPatchGenIndirectCall\n"));
690 Assert(pCpu->param1.size == 4);
691 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J);
692
693 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
694 * a page fault. The assembly code restores the stack afterwards.
695 */
696 offset = 0;
697 /* include prefix byte to make sure we don't use the incorrect selector register. */
698 if (pCpu->prefix & PREFIX_SEG)
699 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
700 pPB[offset++] = 0xFF; // push r/m32
701 pPB[offset++] = MAKE_MODRM(MODRM_MOD(pCpu->ModRM), 6 /* group 5 */, MODRM_RM(pCpu->ModRM));
702 i = 2; /* standard offset of modrm bytes */
703 if (pCpu->prefix & PREFIX_OPSIZE)
704 i++; //skip operand prefix
705 if (pCpu->prefix & PREFIX_SEG)
706 i++; //skip segment prefix
707
708 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTGCPTR)((RTGCUINTPTR)pCurInstrGC + i), pCpu->opsize - i);
709 AssertRCReturn(rc, rc);
710 offset += (pCpu->opsize - i);
711 }
712 else
713 {
714 AssertMsg(PATMIsPatchGCAddr(pVM, pTargetGC) == false, ("Target is already a patch address (%VGv)?!?\n", pTargetGC));
715 Assert(pTargetGC);
716 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J);
717
718 /** @todo wasting memory as the complex search is overkill and we need only one lookup slot... */
719
720 /* Relative call to patch code (patch to patch -> no fixup). */
721 Log(("PatchGenCall from %VGv (next=%VGv) to %VGv\n", pCurInstrGC, pCurInstrGC + pCpu->opsize, pTargetGC));
722
723 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
724 * a page fault. The assembly code restores the stack afterwards.
725 */
726 offset = 0;
727 pPB[offset++] = 0x68; // push %Iv
728 *(RTGCPTR *)&pPB[offset] = pTargetGC;
729 offset += sizeof(RTGCPTR);
730 }
731
732 /* align this block properly to make sure the jump table will not be misaligned. */
733 size = (RTHCUINTPTR)&pPB[offset] & 3;
734 if (size)
735 size = 4 - size;
736
737 for (i=0;i<size;i++)
738 {
739 pPB[offset++] = 0x90; /* nop */
740 }
741 PATCHGEN_EPILOG(pPatch, offset);
742
743 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
744 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
745 callInfo.pReturnGC = pCurInstrGC + pCpu->opsize;
746 callInfo.pTargetGC = (fIndirect) ? 0xDEADBEEF : pTargetGC;
747 size = patmPatchGenCode(pVM, pPatch, pPB, (fIndirect) ? &PATMCallIndirectRecord : &PATMCallRecord, 0, false, &callInfo);
748 PATCHGEN_EPILOG(pPatch, size);
749
750 /* Need to set PATM_INTERRUPTFLAG after the patched ret returns here. */
751 rc = patmPatchGenSetPIF(pVM, pPatch, pCurInstrGC);
752 if (rc == VERR_NO_MEMORY)
753 return rc;
754 AssertRCReturn(rc, rc);
755
756 STAM_COUNTER_INC(&pVM->patm.s.StatGenCall);
757 return VINF_SUCCESS;
758}
759
760/**
761 * Generate indirect jump to unknown destination
762 *
763 * @returns VBox status code.
764 * @param pVM The VM to operate on.
765 * @param pPatch Patch record
766 * @param pCpu Disassembly state
767 * @param pCurInstrGC Current instruction address
768 */
769int patmPatchGenJump(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTGCPTR pCurInstrGC)
770{
771 PATMCALLINFO callInfo;
772 uint32_t offset;
773 uint32_t i, size;
774 int rc;
775
776 /* 1: Clear PATM interrupt flag on entry. */
777 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
778 if (rc == VERR_NO_MEMORY)
779 return rc;
780 AssertRCReturn(rc, rc);
781
782 PATCHGEN_PROLOG(pVM, pPatch);
783 /* 2: We must push the target address onto the stack before appending the indirect call code. */
784
785 Log(("patmPatchGenIndirectJump\n"));
786 Assert(pCpu->param1.size == 4);
787 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J);
788
789 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
790 * a page fault. The assembly code restores the stack afterwards.
791 */
792 offset = 0;
793 /* include prefix byte to make sure we don't use the incorrect selector register. */
794 if (pCpu->prefix & PREFIX_SEG)
795 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
796
797 pPB[offset++] = 0xFF; // push r/m32
798 pPB[offset++] = MAKE_MODRM(MODRM_MOD(pCpu->ModRM), 6 /* group 5 */, MODRM_RM(pCpu->ModRM));
799 i = 2; /* standard offset of modrm bytes */
800 if (pCpu->prefix & PREFIX_OPSIZE)
801 i++; //skip operand prefix
802 if (pCpu->prefix & PREFIX_SEG)
803 i++; //skip segment prefix
804
805 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTGCPTR)((RTGCUINTPTR)pCurInstrGC + i), pCpu->opsize - i);
806 AssertRCReturn(rc, rc);
807 offset += (pCpu->opsize - i);
808
809 /* align this block properly to make sure the jump table will not be misaligned. */
810 size = (RTHCUINTPTR)&pPB[offset] & 3;
811 if (size)
812 size = 4 - size;
813
814 for (i=0;i<size;i++)
815 {
816 pPB[offset++] = 0x90; /* nop */
817 }
818 PATCHGEN_EPILOG(pPatch, offset);
819
820 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
821 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
822 callInfo.pReturnGC = pCurInstrGC + pCpu->opsize;
823 callInfo.pTargetGC = 0xDEADBEEF;
824 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpIndirectRecord, 0, false, &callInfo);
825 PATCHGEN_EPILOG(pPatch, size);
826
827 STAM_COUNTER_INC(&pVM->patm.s.StatGenJump);
828 return VINF_SUCCESS;
829}
830
831/**
832 * Generate return instruction
833 *
834 * @returns VBox status code.
835 * @param pVM The VM to operate on.
836 * @param pPatch Patch structure
837 * @param pCpu Disassembly struct
838 * @param pCurInstrGC Current instruction pointer
839 *
840 */
841int patmPatchGenRet(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, GCPTRTYPE(uint8_t *) pCurInstrGC)
842{
843 int size = 0, rc;
844 RTGCPTR pPatchRetInstrGC;
845
846 /* Remember start of this patch for below. */
847 pPatchRetInstrGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
848
849 Log(("patmPatchGenRet %VGv\n", pCurInstrGC));
850
851 /** @note optimization: multiple identical ret instruction in a single patch can share a single patched ret. */
852 if ( pPatch->pTempInfo->pPatchRetInstrGC
853 && pPatch->pTempInfo->uPatchRetParam1 == (uint32_t)pCpu->param1.parval) /* nr of bytes popped off the stack should be identical of course! */
854 {
855 Assert(pCpu->pCurInstr->opcode == OP_RETN);
856 STAM_COUNTER_INC(&pVM->patm.s.StatGenRetReused);
857
858 return patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, pPatch->pTempInfo->pPatchRetInstrGC);
859 }
860
861 /* Jump back to the original instruction if IF is set again. */
862 Assert(!PATMFindActivePatchByEntrypoint(pVM, pCurInstrGC));
863 rc = patmPatchGenCheckIF(pVM, pPatch, pCurInstrGC);
864 AssertRCReturn(rc, rc);
865
866 /* align this block properly to make sure the jump table will not be misaligned. */
867 PATCHGEN_PROLOG(pVM, pPatch);
868 size = (RTHCUINTPTR)pPB & 3;
869 if (size)
870 size = 4 - size;
871
872 for (int i=0;i<size;i++)
873 pPB[i] = 0x90; /* nop */
874 PATCHGEN_EPILOG(pPatch, size);
875
876 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
877 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMRetRecord, 0, false);
878 PATCHGEN_EPILOG(pPatch, size);
879
880 STAM_COUNTER_INC(&pVM->patm.s.StatGenRet);
881 /* Duplicate the ret or ret n instruction; it will use the PATM return address */
882 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
883
884 if (rc == VINF_SUCCESS)
885 {
886 pPatch->pTempInfo->pPatchRetInstrGC = pPatchRetInstrGC;
887 pPatch->pTempInfo->uPatchRetParam1 = pCpu->param1.parval;
888 }
889 return rc;
890}
891
892/**
893 * Generate all global patm functions
894 *
895 * @returns VBox status code.
896 * @param pVM The VM to operate on.
897 * @param pPatch Patch structure
898 *
899 */
900int patmPatchGenGlobalFunctions(PVM pVM, PPATCHINFO pPatch)
901{
902 int size = 0;
903
904 pVM->patm.s.pfnHelperCallGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
905 PATCHGEN_PROLOG(pVM, pPatch);
906 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMLookupAndCallRecord, 0, false);
907 PATCHGEN_EPILOG(pPatch, size);
908
909 /* Round to next 8 byte boundary. */
910 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
911
912 pVM->patm.s.pfnHelperRetGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
913 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
914 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMRetFunctionRecord, 0, false);
915 PATCHGEN_EPILOG(pPatch, size);
916
917 /* Round to next 8 byte boundary. */
918 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
919
920 pVM->patm.s.pfnHelperJumpGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
921 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
922 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMLookupAndJumpRecord, 0, false);
923 PATCHGEN_EPILOG(pPatch, size);
924
925 /* Round to next 8 byte boundary. */
926 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
927
928 pVM->patm.s.pfnHelperIretGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
929 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
930 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretFunctionRecord, 0, false);
931 PATCHGEN_EPILOG(pPatch, size);
932
933 Log(("pfnHelperCallGC %VGv\n", pVM->patm.s.pfnHelperCallGC));
934 Log(("pfnHelperRetGC %VGv\n", pVM->patm.s.pfnHelperRetGC));
935 Log(("pfnHelperJumpGC %VGv\n", pVM->patm.s.pfnHelperJumpGC));
936 Log(("pfnHelperIretGC %VGv\n", pVM->patm.s.pfnHelperIretGC));
937
938 return VINF_SUCCESS;
939}
940
941/**
942 * Generate illegal instruction (int 3)
943 *
944 * @returns VBox status code.
945 * @param pVM The VM to operate on.
946 * @param pPatch Patch structure
947 *
948 */
949int patmPatchGenIllegalInstr(PVM pVM, PPATCHINFO pPatch)
950{
951 PATCHGEN_PROLOG(pVM, pPatch);
952
953 pPB[0] = 0xCC;
954
955 PATCHGEN_EPILOG(pPatch, 1);
956 return VINF_SUCCESS;
957}
958
959/**
960 * Check virtual IF flag and jump back to original guest code if set
961 *
962 * @returns VBox status code.
963 * @param pVM The VM to operate on.
964 * @param pPatch Patch structure
965 * @param pCurInstrGC Guest context pointer to the current instruction
966 *
967 */
968int patmPatchGenCheckIF(PVM pVM, PPATCHINFO pPatch, RTGCPTR pCurInstrGC)
969{
970 uint32_t size;
971
972 PATCHGEN_PROLOG(pVM, pPatch);
973
974 /* Add lookup record for patch to guest address translation */
975 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
976
977 /* Generate code to check for IF=1 before executing the call to the duplicated function. */
978 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCheckIFRecord, pCurInstrGC, true);
979
980 PATCHGEN_EPILOG(pPatch, size);
981 return VINF_SUCCESS;
982}
983
984/**
985 * Set PATM interrupt flag
986 *
987 * @returns VBox status code.
988 * @param pVM The VM to operate on.
989 * @param pPatch Patch structure
990 * @param pInstrGC Corresponding guest instruction
991 *
992 */
993int patmPatchGenSetPIF(PVM pVM, PPATCHINFO pPatch, RTGCPTR pInstrGC)
994{
995 PATCHGEN_PROLOG(pVM, pPatch);
996
997 /* Add lookup record for patch to guest address translation */
998 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
999
1000 int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMSetPIFRecord, 0, false);
1001 PATCHGEN_EPILOG(pPatch, size);
1002 return VINF_SUCCESS;
1003}
1004
1005/**
1006 * Clear PATM interrupt flag
1007 *
1008 * @returns VBox status code.
1009 * @param pVM The VM to operate on.
1010 * @param pPatch Patch structure
1011 * @param pInstrGC Corresponding guest instruction
1012 *
1013 */
1014int patmPatchGenClearPIF(PVM pVM, PPATCHINFO pPatch, RTGCPTR pInstrGC)
1015{
1016 PATCHGEN_PROLOG(pVM, pPatch);
1017
1018 /* Add lookup record for patch to guest address translation */
1019 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1020
1021 int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearPIFRecord, 0, false);
1022 PATCHGEN_EPILOG(pPatch, size);
1023 return VINF_SUCCESS;
1024}
1025
1026
1027/**
1028 * Clear PATM inhibit irq flag
1029 *
1030 * @returns VBox status code.
1031 * @param pVM The VM to operate on.
1032 * @param pPatch Patch structure
1033 * @param pNextInstrGC Next guest instruction
1034 */
1035int patmPatchGenClearInhibitIRQ(PVM pVM, PPATCHINFO pPatch, RTGCPTR pNextInstrGC)
1036{
1037 int size;
1038 PATMCALLINFO callInfo;
1039
1040 PATCHGEN_PROLOG(pVM, pPatch);
1041
1042 Assert((pPatch->flags & (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION)) != (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION));
1043
1044 /* Add lookup record for patch to guest address translation */
1045 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pNextInstrGC, PATM_LOOKUP_PATCH2GUEST);
1046
1047 callInfo.pNextInstrGC = pNextInstrGC;
1048
1049 if (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1050 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearInhibitIRQContIF0Record, 0, false, &callInfo);
1051 else
1052 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearInhibitIRQFaultIF0Record, 0, false, &callInfo);
1053
1054 PATCHGEN_EPILOG(pPatch, size);
1055 return VINF_SUCCESS;
1056}
1057
1058/**
1059 * Generate an interrupt handler entrypoint
1060 *
1061 * @returns VBox status code.
1062 * @param pVM The VM to operate on.
1063 * @param pPatch Patch record
1064 * @param pIntHandlerGC IDT handler address
1065 *
1066 ** @todo must check if virtual IF is already cleared on entry!!!!!!!!!!!!!!!!!!!!!!!
1067 */
1068int patmPatchGenIntEntry(PVM pVM, PPATCHINFO pPatch, RTGCPTR pIntHandlerGC)
1069{
1070 uint32_t size;
1071 int rc = VINF_SUCCESS;
1072
1073 PATCHGEN_PROLOG(pVM, pPatch);
1074
1075 /* Add lookup record for patch to guest address translation */
1076 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1077
1078 /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
1079 size = patmPatchGenCode(pVM, pPatch, pPB,
1080 (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord,
1081 0, false);
1082
1083 PATCHGEN_EPILOG(pPatch, size);
1084
1085 // Interrupt gates set IF to 0
1086 rc = patmPatchGenCli(pVM, pPatch);
1087 AssertRCReturn(rc, rc);
1088
1089 return rc;
1090}
1091
1092/**
1093 * Generate a trap handler entrypoint
1094 *
1095 * @returns VBox status code.
1096 * @param pVM The VM to operate on.
1097 * @param pPatch Patch record
1098 * @param pTrapHandlerGC IDT handler address
1099 */
1100int patmPatchGenTrapEntry(PVM pVM, PPATCHINFO pPatch, RTGCPTR pTrapHandlerGC)
1101{
1102 uint32_t size;
1103
1104 PATCHGEN_PROLOG(pVM, pPatch);
1105
1106 /* Add lookup record for patch to guest address translation */
1107 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pTrapHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1108
1109 /* Generate entrypoint for the trap handler (correcting CS in the interrupt stack frame) */
1110 size = patmPatchGenCode(pVM, pPatch, pPB,
1111 (pPatch->flags & PATMFL_TRAPHANDLER_WITH_ERRORCODE) ? &PATMTrapEntryRecordErrorCode : &PATMTrapEntryRecord,
1112 pTrapHandlerGC, true);
1113 PATCHGEN_EPILOG(pPatch, size);
1114
1115 return VINF_SUCCESS;
1116}
1117
1118#ifdef VBOX_WITH_STATISTICS
1119int patmPatchGenStats(PVM pVM, PPATCHINFO pPatch, RTGCPTR pInstrGC)
1120{
1121 uint32_t size;
1122
1123 PATCHGEN_PROLOG(pVM, pPatch);
1124
1125 /* Add lookup record for stats code -> guest handler. */
1126 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1127
1128 /* Generate code to keep calling statistics for this patch */
1129 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMStatsRecord, pInstrGC, false);
1130 PATCHGEN_EPILOG(pPatch, size);
1131
1132 return VINF_SUCCESS;
1133}
1134#endif
1135
1136/**
1137 * Debug register moves to or from general purpose registers
1138 * mov GPR, DRx
1139 * mov DRx, GPR
1140 *
1141 * @todo: if we ever want to support hardware debug registers natively, then
1142 * this will need to be changed!
1143 */
1144int patmPatchGenMovDebug(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1145{
1146 int rc = VINF_SUCCESS;
1147 int reg, mod, rm, dbgreg;
1148 uint32_t offset;
1149
1150 PATCHGEN_PROLOG(pVM, pPatch);
1151
1152 mod = 0; //effective address (only)
1153 rm = 5; //disp32
1154 if (pCpu->pCurInstr->param1 == OP_PARM_Dd)
1155 {
1156 Assert(0); // You not come here. Illegal!
1157
1158 // mov DRx, GPR
1159 pPB[0] = 0x89; //mov disp32, GPR
1160 Assert(pCpu->param1.flags & USE_REG_DBG);
1161 Assert(pCpu->param2.flags & USE_REG_GEN32);
1162
1163 dbgreg = pCpu->param1.base.reg_dbg;
1164 reg = pCpu->param2.base.reg_gen32;
1165 }
1166 else
1167 {
1168 // mov GPR, DRx
1169 Assert(pCpu->param1.flags & USE_REG_GEN32);
1170 Assert(pCpu->param2.flags & USE_REG_DBG);
1171
1172 pPB[0] = 0x8B; // mov GPR, disp32
1173 reg = pCpu->param1.base.reg_gen32;
1174 dbgreg = pCpu->param2.base.reg_dbg;
1175 }
1176
1177 pPB[1] = MAKE_MODRM(mod, reg, rm);
1178
1179 /// @todo: make this an array in the context structure
1180 switch (dbgreg)
1181 {
1182 case USE_REG_DR0:
1183 offset = RT_OFFSETOF(CPUMCTX, dr0);
1184 break;
1185 case USE_REG_DR1:
1186 offset = RT_OFFSETOF(CPUMCTX, dr1);
1187 break;
1188 case USE_REG_DR2:
1189 offset = RT_OFFSETOF(CPUMCTX, dr2);
1190 break;
1191 case USE_REG_DR3:
1192 offset = RT_OFFSETOF(CPUMCTX, dr3);
1193 break;
1194 case USE_REG_DR4:
1195 offset = RT_OFFSETOF(CPUMCTX, dr4);
1196 break;
1197 case USE_REG_DR5:
1198 offset = RT_OFFSETOF(CPUMCTX, dr5);
1199 break;
1200 case USE_REG_DR6:
1201 offset = RT_OFFSETOF(CPUMCTX, dr6);
1202 break;
1203 case USE_REG_DR7:
1204 offset = RT_OFFSETOF(CPUMCTX, dr7);
1205 break;
1206 default: /* Shut up compiler warning. */
1207 AssertFailed();
1208 offset = 0;
1209 break;
1210 }
1211 *(RTGCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1212 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1213
1214 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTGCPTR));
1215 return rc;
1216}
1217
1218/*
1219 * Control register moves to or from general purpose registers
1220 * mov GPR, CRx
1221 * mov CRx, GPR
1222 */
1223int patmPatchGenMovControl(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1224{
1225 int rc = VINF_SUCCESS;
1226 int reg, mod, rm, ctrlreg;
1227 uint32_t offset;
1228
1229 PATCHGEN_PROLOG(pVM, pPatch);
1230
1231 mod = 0; //effective address (only)
1232 rm = 5; //disp32
1233 if (pCpu->pCurInstr->param1 == OP_PARM_Cd)
1234 {
1235 Assert(0); // You not come here. Illegal!
1236
1237 // mov CRx, GPR
1238 pPB[0] = 0x89; //mov disp32, GPR
1239 ctrlreg = pCpu->param1.base.reg_ctrl;
1240 reg = pCpu->param2.base.reg_gen32;
1241 Assert(pCpu->param1.flags & USE_REG_CR);
1242 Assert(pCpu->param2.flags & USE_REG_GEN32);
1243 }
1244 else
1245 {
1246 // mov GPR, DRx
1247 Assert(pCpu->param1.flags & USE_REG_GEN32);
1248 Assert(pCpu->param2.flags & USE_REG_CR);
1249
1250 pPB[0] = 0x8B; // mov GPR, disp32
1251 reg = pCpu->param1.base.reg_gen32;
1252 ctrlreg = pCpu->param2.base.reg_ctrl;
1253 }
1254
1255 pPB[1] = MAKE_MODRM(mod, reg, rm);
1256
1257 /// @todo: make this an array in the context structure
1258 switch (ctrlreg)
1259 {
1260 case USE_REG_CR0:
1261 offset = RT_OFFSETOF(CPUMCTX, cr0);
1262 break;
1263 case USE_REG_CR2:
1264 offset = RT_OFFSETOF(CPUMCTX, cr2);
1265 break;
1266 case USE_REG_CR3:
1267 offset = RT_OFFSETOF(CPUMCTX, cr3);
1268 break;
1269 case USE_REG_CR4:
1270 offset = RT_OFFSETOF(CPUMCTX, cr4);
1271 break;
1272 default: /* Shut up compiler warning. */
1273 AssertFailed();
1274 offset = 0;
1275 break;
1276 }
1277 *(RTGCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1278 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1279
1280 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTGCPTR));
1281 return rc;
1282}
1283
1284/*
1285 * mov GPR, SS
1286 */
1287int patmPatchGenMovFromSS(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTGCPTR pCurInstrGC)
1288{
1289 uint32_t size, offset;
1290
1291 Log(("patmPatchGenMovFromSS %VGv\n", pCurInstrGC));
1292
1293 Assert(pPatch->flags & PATMFL_CODE32);
1294
1295 PATCHGEN_PROLOG(pVM, pPatch);
1296 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearPIFRecord, 0, false);
1297 PATCHGEN_EPILOG(pPatch, size);
1298
1299 /* push ss */
1300 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1301 offset = 0;
1302 if (pCpu->prefix & PREFIX_OPSIZE)
1303 pPB[offset++] = 0x66; /* size override -> 16 bits push */
1304 pPB[offset++] = 0x16;
1305 PATCHGEN_EPILOG(pPatch, offset);
1306
1307 /* checks and corrects RPL of pushed ss*/
1308 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1309 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMMovFromSSRecord, 0, false);
1310 PATCHGEN_EPILOG(pPatch, size);
1311
1312 /* pop general purpose register */
1313 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1314 offset = 0;
1315 if (pCpu->prefix & PREFIX_OPSIZE)
1316 pPB[offset++] = 0x66; /* size override -> 16 bits pop */
1317 pPB[offset++] = 0x58 + pCpu->param1.base.reg_gen32;
1318 PATCHGEN_EPILOG(pPatch, offset);
1319
1320
1321 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1322 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMSetPIFRecord, 0, false);
1323 PATCHGEN_EPILOG(pPatch, size);
1324
1325 return VINF_SUCCESS;
1326}
1327
1328
1329/**
1330 * Generate an sldt or str patch instruction
1331 *
1332 * @returns VBox status code.
1333 * @param pVM The VM to operate on.
1334 * @param pPatch Patch record
1335 * @param pCpu Disassembly state
1336 * @param pCurInstrGC Guest instruction address
1337 */
1338int patmPatchGenSldtStr(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTGCPTR pCurInstrGC)
1339{
1340 // sldt %Ew
1341 int rc = VINF_SUCCESS;
1342 uint32_t offset = 0;
1343 uint32_t i;
1344
1345 /** @todo segment prefix (untested) */
1346 Assert(pCpu->prefix == PREFIX_NONE || pCpu->prefix == PREFIX_OPSIZE);
1347
1348 PATCHGEN_PROLOG(pVM, pPatch);
1349
1350 if (pCpu->param1.flags == USE_REG_GEN32 || pCpu->param1.flags == USE_REG_GEN16)
1351 {
1352 /* Register operand */
1353 // 8B 15 [32 bits addr] mov edx, CPUMCTX.tr/ldtr
1354
1355 if (pCpu->prefix == PREFIX_OPSIZE)
1356 pPB[offset++] = 0x66;
1357
1358 pPB[offset++] = 0x8B; // mov destreg, CPUMCTX.tr/ldtr
1359 /* Modify REG part according to destination of original instruction */
1360 pPB[offset++] = MAKE_MODRM(0, pCpu->param1.base.reg_gen32, 5);
1361 if (pCpu->pCurInstr->opcode == OP_STR)
1362 {
1363 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1364 }
1365 else
1366 {
1367 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1368 }
1369 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1370 offset += sizeof(RTGCPTR);
1371 }
1372 else
1373 {
1374 /* Memory operand */
1375 //50 push eax
1376 //52 push edx
1377 //8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1378 //66 A1 48 7C 42 00 mov ax, CPUMCTX.tr/ldtr
1379 //66 89 02 mov word ptr [edx],ax
1380 //5A pop edx
1381 //58 pop eax
1382
1383 pPB[offset++] = 0x50; // push eax
1384 pPB[offset++] = 0x52; // push edx
1385
1386 if (pCpu->prefix == PREFIX_SEG)
1387 {
1388 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1389 }
1390 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1391 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1392 pPB[offset++] = MAKE_MODRM(MODRM_MOD(pCpu->ModRM), USE_REG_EDX, MODRM_RM(pCpu->ModRM));
1393
1394 i = 3; /* standard offset of modrm bytes */
1395 if (pCpu->prefix == PREFIX_OPSIZE)
1396 i++; //skip operand prefix
1397 if (pCpu->prefix == PREFIX_SEG)
1398 i++; //skip segment prefix
1399
1400 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTGCPTR)((RTGCUINTPTR)pCurInstrGC + i), pCpu->opsize - i);
1401 AssertRCReturn(rc, rc);
1402 offset += (pCpu->opsize - i);
1403
1404 pPB[offset++] = 0x66; // mov ax, CPUMCTX.tr/ldtr
1405 pPB[offset++] = 0xA1;
1406 if (pCpu->pCurInstr->opcode == OP_STR)
1407 {
1408 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1409 }
1410 else
1411 {
1412 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1413 }
1414 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1415 offset += sizeof(RTGCPTR);
1416
1417 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1418 pPB[offset++] = 0x89;
1419 pPB[offset++] = 0x02;
1420
1421 pPB[offset++] = 0x5A; // pop edx
1422 pPB[offset++] = 0x58; // pop eax
1423 }
1424
1425 PATCHGEN_EPILOG(pPatch, offset);
1426
1427 return rc;
1428}
1429
1430/**
1431 * Generate an sgdt or sidt patch instruction
1432 *
1433 * @returns VBox status code.
1434 * @param pVM The VM to operate on.
1435 * @param pPatch Patch record
1436 * @param pCpu Disassembly state
1437 * @param pCurInstrGC Guest instruction address
1438 */
1439int patmPatchGenSxDT(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTGCPTR pCurInstrGC)
1440{
1441 int rc = VINF_SUCCESS;
1442 uint32_t offset = 0, offset_base, offset_limit;
1443 uint32_t i;
1444
1445 /* @todo segment prefix (untested) */
1446 Assert(pCpu->prefix == PREFIX_NONE);
1447
1448 // sgdt %Ms
1449 // sidt %Ms
1450
1451 switch (pCpu->pCurInstr->opcode)
1452 {
1453 case OP_SGDT:
1454 offset_base = RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1455 offset_limit = RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1456 break;
1457
1458 case OP_SIDT:
1459 offset_base = RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1460 offset_limit = RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1461 break;
1462
1463 default:
1464 return VERR_INVALID_PARAMETER;
1465 }
1466
1467//50 push eax
1468//52 push edx
1469//8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1470//66 A1 48 7C 42 00 mov ax, CPUMCTX.gdtr.limit
1471//66 89 02 mov word ptr [edx],ax
1472//A1 48 7C 42 00 mov eax, CPUMCTX.gdtr.base
1473//89 42 02 mov dword ptr [edx+2],eax
1474//5A pop edx
1475//58 pop eax
1476
1477 PATCHGEN_PROLOG(pVM, pPatch);
1478 pPB[offset++] = 0x50; // push eax
1479 pPB[offset++] = 0x52; // push edx
1480
1481 if (pCpu->prefix == PREFIX_SEG)
1482 {
1483 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1484 }
1485 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1486 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1487 pPB[offset++] = MAKE_MODRM(MODRM_MOD(pCpu->ModRM), USE_REG_EDX, MODRM_RM(pCpu->ModRM));
1488
1489 i = 3; /* standard offset of modrm bytes */
1490 if (pCpu->prefix == PREFIX_OPSIZE)
1491 i++; //skip operand prefix
1492 if (pCpu->prefix == PREFIX_SEG)
1493 i++; //skip segment prefix
1494 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTGCPTR)((RTGCUINTPTR)pCurInstrGC + i), pCpu->opsize - i);
1495 AssertRCReturn(rc, rc);
1496 offset += (pCpu->opsize - i);
1497
1498 pPB[offset++] = 0x66; // mov ax, CPUMCTX.gdtr.limit
1499 pPB[offset++] = 0xA1;
1500 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_limit;
1501 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1502 offset += sizeof(RTGCPTR);
1503
1504 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1505 pPB[offset++] = 0x89;
1506 pPB[offset++] = 0x02;
1507
1508 pPB[offset++] = 0xA1; // mov eax, CPUMCTX.gdtr.base
1509 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_base;
1510 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1511 offset += sizeof(RTGCPTR);
1512
1513 pPB[offset++] = 0x89; // mov dword ptr [edx+2],eax
1514 pPB[offset++] = 0x42;
1515 pPB[offset++] = 0x02;
1516
1517 pPB[offset++] = 0x5A; // pop edx
1518 pPB[offset++] = 0x58; // pop eax
1519
1520 PATCHGEN_EPILOG(pPatch, offset);
1521
1522 return rc;
1523}
1524
1525/**
1526 * Generate a cpuid patch instruction
1527 *
1528 * @returns VBox status code.
1529 * @param pVM The VM to operate on.
1530 * @param pPatch Patch record
1531 * @param pCurInstrGC Guest instruction address
1532 */
1533int patmPatchGenCpuid(PVM pVM, PPATCHINFO pPatch, RTGCPTR pCurInstrGC)
1534{
1535 uint32_t size;
1536 PATCHGEN_PROLOG(pVM, pPatch);
1537
1538 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCpuidRecord, 0, false);
1539
1540 PATCHGEN_EPILOG(pPatch, size);
1541 return VINF_SUCCESS;
1542}
1543
1544/**
1545 * Generate the jump from guest to patch code
1546 *
1547 * @returns VBox status code.
1548 * @param pVM The VM to operate on.
1549 * @param pPatch Patch record
1550 * @param pTargetGC Guest target jump
1551 * @param fClearInhibitIRQs Clear inhibit irq flag
1552 */
1553int patmPatchGenJumpToGuest(PVM pVM, PPATCHINFO pPatch, GCPTRTYPE(uint8_t *) pReturnAddrGC, bool fClearInhibitIRQs)
1554{
1555 int rc = VINF_SUCCESS;
1556 uint32_t size;
1557
1558 if (fClearInhibitIRQs)
1559 {
1560 rc = patmPatchGenClearInhibitIRQ(pVM, pPatch, pReturnAddrGC);
1561 if (rc == VERR_NO_MEMORY)
1562 return rc;
1563 AssertRCReturn(rc, rc);
1564 }
1565
1566 PATCHGEN_PROLOG(pVM, pPatch);
1567
1568 /* Add lookup record for patch to guest address translation */
1569 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
1570
1571 /* Generate code to jump to guest code if IF=1, else fault. */
1572 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpToGuest_IF1Record, pReturnAddrGC, true);
1573 PATCHGEN_EPILOG(pPatch, size);
1574
1575 return rc;
1576}
1577
1578/*
1579 * Relative jump from patch code to patch code (no fixup required)
1580 */
1581int patmPatchGenPatchJump(PVM pVM, PPATCHINFO pPatch, RTGCPTR pCurInstrGC, GCPTRTYPE(uint8_t *) pPatchAddrGC, bool fAddLookupRecord)
1582{
1583 int32_t displ;
1584 int rc = VINF_SUCCESS;
1585
1586 Assert(PATMIsPatchGCAddr(pVM, pPatchAddrGC));
1587 PATCHGEN_PROLOG(pVM, pPatch);
1588
1589 if (fAddLookupRecord)
1590 {
1591 /* Add lookup record for patch to guest address translation */
1592 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
1593 }
1594
1595 pPB[0] = 0xE9; //JMP
1596
1597 displ = pPatchAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + SIZEOF_NEARJUMP32);
1598
1599 *(uint32_t *)&pPB[1] = displ;
1600
1601 PATCHGEN_EPILOG(pPatch, SIZEOF_NEARJUMP32);
1602
1603 return rc;
1604}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette