VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp@ 41737

Last change on this file since 41737 was 41737, checked in by vboxsync, 12 years ago

DISOPCODE: s/opcode/uOpcode/

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 52.3 KB
Line 
1/* $Id: PATMPatch.cpp 41737 2012-06-15 01:01:49Z vboxsync $ */
2/** @file
3 * PATMPatch - Dynamic Guest OS Instruction patches
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/trpm.h>
30#include <VBox/param.h>
31#include <iprt/avl.h>
32#include "PATMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/csam.h>
35
36#include <VBox/dbg.h>
37#include <VBox/err.h>
38#include <VBox/log.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/string.h>
42#include <VBox/dis.h>
43#include <VBox/disopcode.h>
44
45#include <stdlib.h>
46#include <stdio.h>
47#include "PATMA.h"
48#include "PATMPatch.h"
49
50/* internal structure for passing more information about call fixups to patmPatchGenCode */
51typedef struct
52{
53 RTRCPTR pTargetGC;
54 RTRCPTR pCurInstrGC;
55 RTRCPTR pNextInstrGC;
56 RTRCPTR pReturnGC;
57} PATMCALLINFO, *PPATMCALLINFO;
58
59int patmPatchAddReloc32(PVM pVM, PPATCHINFO pPatch, uint8_t *pRelocHC, uint32_t uType, RTRCPTR pSource, RTRCPTR pDest)
60{
61 PRELOCREC pRec;
62
63 Assert(uType == FIXUP_ABSOLUTE || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
64
65 LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%RRv source=%RRv dest=%RRv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest));
66
67 pRec = (PRELOCREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
68 Assert(pRec);
69 pRec->Core.Key = (AVLPVKEY)pRelocHC;
70 pRec->pRelocPos = pRelocHC; /* @todo redundant. */
71 pRec->pSource = pSource;
72 pRec->pDest = pDest;
73 pRec->uType = uType;
74
75 bool ret = RTAvlPVInsert(&pPatch->FixupTree, &pRec->Core);
76 Assert(ret); NOREF(ret);
77 pPatch->nrFixups++;
78
79 return VINF_SUCCESS;
80}
81
82int patmPatchAddJump(PVM pVM, PPATCHINFO pPatch, uint8_t *pJumpHC, uint32_t offset, RTRCPTR pTargetGC, uint32_t opcode)
83{
84 PJUMPREC pRec;
85
86 pRec = (PJUMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
87 Assert(pRec);
88
89 pRec->Core.Key = (AVLPVKEY)pJumpHC;
90 pRec->pJumpHC = pJumpHC; /* @todo redundant. */
91 pRec->offDispl = offset;
92 pRec->pTargetGC = pTargetGC;
93 pRec->opcode = opcode;
94
95 bool ret = RTAvlPVInsert(&pPatch->JumpTree, &pRec->Core);
96 Assert(ret); NOREF(ret);
97 pPatch->nrJumpRecs++;
98
99 return VINF_SUCCESS;
100}
101
102#define PATCHGEN_PROLOG_NODEF(pVM, pPatch) \
103 pPB = PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset; \
104 \
105 if (pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) \
106 { \
107 pVM->patm.s.fOutOfMemory = true; \
108 Assert(pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem); \
109 return VERR_NO_MEMORY; \
110 }
111
112#define PATCHGEN_PROLOG(pVM, pPatch) \
113 uint8_t *pPB; \
114 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
115
116
117#define PATCHGEN_EPILOG(pPatch, size) \
118 Assert(size <= 640); \
119 pPatch->uCurPatchOffset += size;
120
121
122static uint32_t patmPatchGenCode(PVM pVM, PPATCHINFO pPatch, uint8_t *pPB, PPATCHASMRECORD pAsmRecord, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fGenJump,
123 PPATMCALLINFO pCallInfo = 0)
124{
125 uint32_t i, j;
126
127 Assert(fGenJump == false || pReturnAddrGC);
128 Assert(fGenJump == false || pAsmRecord->offJump);
129 Assert(pAsmRecord && pAsmRecord->size > sizeof(pAsmRecord->uReloc[0]));
130
131 // Copy the code block
132 memcpy(pPB, pAsmRecord->pFunction, pAsmRecord->size);
133
134 // Process all fixups
135 for (j=0,i=0;i<pAsmRecord->nrRelocs*2; i+=2)
136 {
137 for (;j<pAsmRecord->size;j++)
138 {
139 if (*(uint32_t*)&pPB[j] == pAsmRecord->uReloc[i])
140 {
141 RCPTRTYPE(uint32_t *) dest;
142
143#ifdef VBOX_STRICT
144 if (pAsmRecord->uReloc[i] == PATM_FIXUP)
145 Assert(pAsmRecord->uReloc[i+1] != 0);
146 else
147 Assert(pAsmRecord->uReloc[i+1] == 0);
148#endif
149
150 /**
151 * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING A SAVED STATE WITH
152 * A DIFFERENT HYPERVISOR LAYOUT.
153 */
154 switch (pAsmRecord->uReloc[i])
155 {
156 case PATM_VMFLAGS:
157 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
158 break;
159
160 case PATM_PENDINGACTION:
161 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
162 break;
163
164 case PATM_FIXUP:
165 /* Offset in uReloc[i+1] is from the base of the function. */
166 dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->uReloc[i+1] + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC);
167 break;
168#ifdef VBOX_WITH_STATISTICS
169 case PATM_ALLPATCHCALLS:
170 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
171 break;
172
173 case PATM_IRETEFLAGS:
174 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
175 break;
176
177 case PATM_IRETCS:
178 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
179 break;
180
181 case PATM_IRETEIP:
182 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
183 break;
184
185 case PATM_PERPATCHCALLS:
186 dest = patmPatchQueryStatAddress(pVM, pPatch);
187 break;
188#endif
189 case PATM_STACKPTR:
190 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
191 break;
192
193 /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
194 * part to store the original return addresses.
195 */
196 case PATM_STACKBASE:
197 dest = pVM->patm.s.pGCStackGC;
198 break;
199
200 case PATM_STACKBASE_GUEST:
201 dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
202 break;
203
204 case PATM_RETURNADDR: /* absolute guest address; no fixup required */
205 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
206 dest = pCallInfo->pReturnGC;
207 break;
208
209 case PATM_PATCHNEXTBLOCK: /* relative address of instruction following this block */
210 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
211
212 /** @note hardcoded assumption that we must return to the instruction following this block */
213 dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->size;
214 break;
215
216 case PATM_CALLTARGET: /* relative to patch address; no fixup required */
217 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
218
219 /* Address must be filled in later. (see patmr3SetBranchTargets) */
220 patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
221 dest = PATM_ILLEGAL_DESTINATION;
222 break;
223
224 case PATM_PATCHBASE: /* Patch GC base address */
225 dest = pVM->patm.s.pPatchMemGC;
226 break;
227
228 case PATM_CPUID_STD_PTR:
229 /* @todo dirty hack when correcting this fixup (state restore) */
230 dest = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
231 break;
232
233 case PATM_CPUID_EXT_PTR:
234 /* @todo dirty hack when correcting this fixup (state restore) */
235 dest = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
236 break;
237
238 case PATM_CPUID_CENTAUR_PTR:
239 /* @todo dirty hack when correcting this fixup (state restore) */
240 dest = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
241 break;
242
243 case PATM_CPUID_DEF_PTR:
244 /* @todo dirty hack when correcting this fixup (state restore) */
245 dest = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
246 break;
247
248 case PATM_CPUID_STD_MAX:
249 dest = CPUMGetGuestCpuIdStdMax(pVM);
250 break;
251
252 case PATM_CPUID_EXT_MAX:
253 dest = CPUMGetGuestCpuIdExtMax(pVM);
254 break;
255
256 case PATM_CPUID_CENTAUR_MAX:
257 dest = CPUMGetGuestCpuIdCentaurMax(pVM);
258 break;
259
260 case PATM_INTERRUPTFLAG:
261 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
262 break;
263
264 case PATM_INHIBITIRQADDR:
265 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
266 break;
267
268 case PATM_NEXTINSTRADDR:
269 Assert(pCallInfo);
270 /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
271 dest = pCallInfo->pNextInstrGC;
272 break;
273
274 case PATM_CURINSTRADDR:
275 Assert(pCallInfo);
276 dest = pCallInfo->pCurInstrGC;
277 break;
278
279 case PATM_VM_FORCEDACTIONS:
280 /* @todo dirty assumptions when correcting this fixup during saved state loading. */
281 dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
282 break;
283
284 case PATM_TEMP_EAX:
285 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
286 break;
287 case PATM_TEMP_ECX:
288 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
289 break;
290 case PATM_TEMP_EDI:
291 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
292 break;
293 case PATM_TEMP_EFLAGS:
294 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
295 break;
296 case PATM_TEMP_RESTORE_FLAGS:
297 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
298 break;
299 case PATM_CALL_PATCH_TARGET_ADDR:
300 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr);
301 break;
302 case PATM_CALL_RETURN_ADDR:
303 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr);
304 break;
305
306 /* Relative address of global patm lookup and call function. */
307 case PATM_LOOKUP_AND_CALL_FUNCTION:
308 {
309 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
310 Assert(pVM->patm.s.pfnHelperCallGC);
311 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
312
313 /* Relative value is target minus address of instruction after the actual call instruction. */
314 dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
315 break;
316 }
317
318 case PATM_RETURN_FUNCTION:
319 {
320 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
321 Assert(pVM->patm.s.pfnHelperRetGC);
322 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
323
324 /* Relative value is target minus address of instruction after the actual call instruction. */
325 dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
326 break;
327 }
328
329 case PATM_IRET_FUNCTION:
330 {
331 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
332 Assert(pVM->patm.s.pfnHelperIretGC);
333 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
334
335 /* Relative value is target minus address of instruction after the actual call instruction. */
336 dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
337 break;
338 }
339
340 case PATM_LOOKUP_AND_JUMP_FUNCTION:
341 {
342 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
343 Assert(pVM->patm.s.pfnHelperJumpGC);
344 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
345
346 /* Relative value is target minus address of instruction after the actual call instruction. */
347 dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
348 break;
349 }
350
351 default:
352 dest = PATM_ILLEGAL_DESTINATION;
353 AssertRelease(0);
354 break;
355 }
356
357 *(RTRCPTR *)&pPB[j] = dest;
358 if (pAsmRecord->uReloc[i] < PATM_NO_FIXUP)
359 {
360 patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE);
361 }
362 break;
363 }
364 }
365 Assert(j < pAsmRecord->size);
366 }
367 Assert(pAsmRecord->uReloc[i] == 0xffffffff);
368
369 /* Add the jump back to guest code (if required) */
370 if (fGenJump)
371 {
372 int32_t displ = pReturnAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32);
373
374 /* Add lookup record for patch to guest address translation */
375 Assert(pPB[pAsmRecord->offJump - 1] == 0xE9);
376 patmr3AddP2GLookupRecord(pVM, pPatch, &pPB[pAsmRecord->offJump - 1], pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
377
378 *(uint32_t *)&pPB[pAsmRecord->offJump] = displ;
379 patmPatchAddReloc32(pVM, pPatch, &pPB[pAsmRecord->offJump], FIXUP_REL_JMPTOGUEST,
380 PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32,
381 pReturnAddrGC);
382 }
383
384 // Calculate the right size of this patch block
385 if ((fGenJump && pAsmRecord->offJump) || (!fGenJump && !pAsmRecord->offJump))
386 {
387 return pAsmRecord->size;
388 }
389 else {
390 // if a jump instruction is present and we don't want one, then subtract SIZEOF_NEARJUMP32
391 return pAsmRecord->size - SIZEOF_NEARJUMP32;
392 }
393}
394
395/* Read bytes and check for overwritten instructions. */
396static int patmPatchReadBytes(PVM pVM, uint8_t *pDest, RTRCPTR pSrc, uint32_t cb)
397{
398 int rc = PGMPhysSimpleReadGCPtr(&pVM->aCpus[0], pDest, pSrc, cb);
399 AssertRCReturn(rc, rc);
400 /*
401 * Could be patched already; make sure this is checked!
402 */
403 for (uint32_t i=0;i<cb;i++)
404 {
405 uint8_t temp;
406
407 int rc2 = PATMR3QueryOpcode(pVM, pSrc+i, &temp);
408 if (RT_SUCCESS(rc2))
409 {
410 pDest[i] = temp;
411 }
412 else
413 break; /* no more */
414 }
415 return VINF_SUCCESS;
416}
417
418int patmPatchGenDuplicate(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
419{
420 int rc = VINF_SUCCESS;
421 PATCHGEN_PROLOG(pVM, pPatch);
422
423 uint32_t const cbInstrShutUpGcc = pCpu->cbInstr;
424 rc = patmPatchReadBytes(pVM, pPB, pCurInstrGC, cbInstrShutUpGcc);
425 AssertRC(rc);
426 PATCHGEN_EPILOG(pPatch, cbInstrShutUpGcc);
427 return rc;
428}
429
430int patmPatchGenIret(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, bool fSizeOverride)
431{
432 uint32_t size;
433 PATMCALLINFO callInfo;
434
435 PATCHGEN_PROLOG(pVM, pPatch);
436
437 AssertMsg(fSizeOverride == false, ("operand size override!!\n"));
438
439 callInfo.pCurInstrGC = pCurInstrGC;
440
441 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRecord, 0, false, &callInfo);
442
443 PATCHGEN_EPILOG(pPatch, size);
444 return VINF_SUCCESS;
445}
446
447int patmPatchGenCli(PVM pVM, PPATCHINFO pPatch)
448{
449 uint32_t size;
450 PATCHGEN_PROLOG(pVM, pPatch);
451
452 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCliRecord, 0, false);
453
454 PATCHGEN_EPILOG(pPatch, size);
455 return VINF_SUCCESS;
456}
457
458/*
459 * Generate an STI patch
460 */
461int patmPatchGenSti(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RTRCPTR pNextInstrGC)
462{
463 PATMCALLINFO callInfo;
464 uint32_t size;
465
466 Log(("patmPatchGenSti at %RRv; next %RRv\n", pCurInstrGC, pNextInstrGC));
467 PATCHGEN_PROLOG(pVM, pPatch);
468 callInfo.pNextInstrGC = pNextInstrGC;
469 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMStiRecord, 0, false, &callInfo);
470 PATCHGEN_EPILOG(pPatch, size);
471
472 return VINF_SUCCESS;
473}
474
475
476int patmPatchGenPopf(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fSizeOverride, bool fGenJumpBack)
477{
478 uint32_t size;
479 PATMCALLINFO callInfo;
480
481 PATCHGEN_PROLOG(pVM, pPatch);
482
483 callInfo.pNextInstrGC = pReturnAddrGC;
484
485 Log(("patmPatchGenPopf at %RRv\n", pReturnAddrGC));
486
487 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
488 if (fSizeOverride == true)
489 {
490 Log(("operand size override!!\n"));
491 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf16Record : &PATMPopf16Record_NoExit , pReturnAddrGC, fGenJumpBack, &callInfo);
492 }
493 else
494 {
495 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf32Record : &PATMPopf32Record_NoExit, pReturnAddrGC, fGenJumpBack, &callInfo);
496 }
497
498 PATCHGEN_EPILOG(pPatch, size);
499 STAM_COUNTER_INC(&pVM->patm.s.StatGenPopf);
500 return VINF_SUCCESS;
501}
502
503int patmPatchGenPushf(PVM pVM, PPATCHINFO pPatch, bool fSizeOverride)
504{
505 uint32_t size;
506 PATCHGEN_PROLOG(pVM, pPatch);
507
508 if (fSizeOverride == true)
509 {
510 Log(("operand size override!!\n"));
511 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushf16Record, 0, false);
512 }
513 else
514 {
515 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushf32Record, 0, false);
516 }
517
518 PATCHGEN_EPILOG(pPatch, size);
519 return VINF_SUCCESS;
520}
521
522int patmPatchGenPushCS(PVM pVM, PPATCHINFO pPatch)
523{
524 uint32_t size;
525 PATCHGEN_PROLOG(pVM, pPatch);
526 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushCSRecord, 0, false);
527 PATCHGEN_EPILOG(pPatch, size);
528 return VINF_SUCCESS;
529}
530
531int patmPatchGenLoop(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
532{
533 uint32_t size = 0;
534 PPATCHASMRECORD pPatchAsmRec;
535
536 PATCHGEN_PROLOG(pVM, pPatch);
537
538 switch (opcode)
539 {
540 case OP_LOOP:
541 pPatchAsmRec = &PATMLoopRecord;
542 break;
543 case OP_LOOPNE:
544 pPatchAsmRec = &PATMLoopNZRecord;
545 break;
546 case OP_LOOPE:
547 pPatchAsmRec = &PATMLoopZRecord;
548 break;
549 case OP_JECXZ:
550 pPatchAsmRec = &PATMJEcxRecord;
551 break;
552 default:
553 AssertMsgFailed(("PatchGenLoop: invalid opcode %d\n", opcode));
554 return VERR_INVALID_PARAMETER;
555 }
556 Assert(pPatchAsmRec->offSizeOverride && pPatchAsmRec->offRelJump);
557
558 Log(("PatchGenLoop %d jump %d to %08x offrel=%d\n", opcode, pPatch->nrJumpRecs, pTargetGC, pPatchAsmRec->offRelJump));
559
560 // Generate the patch code
561 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
562
563 if (fSizeOverride)
564 {
565 pPB[pPatchAsmRec->offSizeOverride] = 0x66; // ecx -> cx or vice versa
566 }
567
568 *(RTRCPTR *)&pPB[pPatchAsmRec->offRelJump] = 0xDEADBEEF;
569
570 patmPatchAddJump(pVM, pPatch, &pPB[pPatchAsmRec->offRelJump - 1], 1, pTargetGC, opcode);
571
572 PATCHGEN_EPILOG(pPatch, size);
573 return VINF_SUCCESS;
574}
575
576int patmPatchGenRelJump(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
577{
578 uint32_t offset = 0;
579 PATCHGEN_PROLOG(pVM, pPatch);
580
581 // internal relative jumps from patch code to patch code; no relocation record required
582
583 Assert(PATMIsPatchGCAddr(pVM, pTargetGC) == false);
584
585 switch (opcode)
586 {
587 case OP_JO:
588 pPB[1] = 0x80;
589 break;
590 case OP_JNO:
591 pPB[1] = 0x81;
592 break;
593 case OP_JC:
594 pPB[1] = 0x82;
595 break;
596 case OP_JNC:
597 pPB[1] = 0x83;
598 break;
599 case OP_JE:
600 pPB[1] = 0x84;
601 break;
602 case OP_JNE:
603 pPB[1] = 0x85;
604 break;
605 case OP_JBE:
606 pPB[1] = 0x86;
607 break;
608 case OP_JNBE:
609 pPB[1] = 0x87;
610 break;
611 case OP_JS:
612 pPB[1] = 0x88;
613 break;
614 case OP_JNS:
615 pPB[1] = 0x89;
616 break;
617 case OP_JP:
618 pPB[1] = 0x8A;
619 break;
620 case OP_JNP:
621 pPB[1] = 0x8B;
622 break;
623 case OP_JL:
624 pPB[1] = 0x8C;
625 break;
626 case OP_JNL:
627 pPB[1] = 0x8D;
628 break;
629 case OP_JLE:
630 pPB[1] = 0x8E;
631 break;
632 case OP_JNLE:
633 pPB[1] = 0x8F;
634 break;
635
636 case OP_JMP:
637 /* If interrupted here, then jump to the target instruction. Used by PATM.cpp for jumping to known instructions. */
638 /* Add lookup record for patch to guest address translation */
639 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pTargetGC, PATM_LOOKUP_PATCH2GUEST);
640
641 pPB[0] = 0xE9;
642 break;
643
644 case OP_JECXZ:
645 case OP_LOOP:
646 case OP_LOOPNE:
647 case OP_LOOPE:
648 return patmPatchGenLoop(pVM, pPatch, pTargetGC, opcode, fSizeOverride);
649
650 default:
651 AssertMsg(0, ("Invalid jump opcode %d\n", opcode));
652 return VERR_PATCHING_REFUSED;
653 }
654 if (opcode != OP_JMP)
655 {
656 pPB[0] = 0xF;
657 offset += 2;
658 }
659 else offset++;
660
661 *(RTRCPTR *)&pPB[offset] = 0xDEADBEEF;
662
663 patmPatchAddJump(pVM, pPatch, pPB, offset, pTargetGC, opcode);
664
665 offset += sizeof(RTRCPTR);
666
667 PATCHGEN_EPILOG(pPatch, offset);
668 return VINF_SUCCESS;
669}
670
671/*
672 * Rewrite call to dynamic or currently unknown function (on-demand patching of function)
673 */
674int patmPatchGenCall(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC, RTRCPTR pTargetGC, bool fIndirect)
675{
676 PATMCALLINFO callInfo;
677 uint32_t offset;
678 uint32_t i, size;
679 int rc;
680
681 /** @note Don't check for IF=1 here. The ret instruction will do this. */
682 /** @note It's dangerous to do this for 'normal' patches. the jump target might be inside the generated patch jump. (seen this!) */
683
684 /* 1: Clear PATM interrupt flag on entry. */
685 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
686 if (rc == VERR_NO_MEMORY)
687 return rc;
688 AssertRCReturn(rc, rc);
689
690 PATCHGEN_PROLOG(pVM, pPatch);
691 /* 2: We must push the target address onto the stack before appending the indirect call code. */
692
693 if (fIndirect)
694 {
695 Log(("patmPatchGenIndirectCall\n"));
696 Assert(pCpu->param1.cb == 4);
697 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J);
698
699 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
700 * a page fault. The assembly code restores the stack afterwards.
701 */
702 offset = 0;
703 /* include prefix byte to make sure we don't use the incorrect selector register. */
704 if (pCpu->fPrefix & DISPREFIX_SEG)
705 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
706 pPB[offset++] = 0xFF; // push r/m32
707 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
708 i = 2; /* standard offset of modrm bytes */
709 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
710 i++; //skip operand prefix
711 if (pCpu->fPrefix & DISPREFIX_SEG)
712 i++; //skip segment prefix
713
714 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
715 AssertRCReturn(rc, rc);
716 offset += (pCpu->cbInstr - i);
717 }
718 else
719 {
720 AssertMsg(PATMIsPatchGCAddr(pVM, pTargetGC) == false, ("Target is already a patch address (%RRv)?!?\n", pTargetGC));
721 Assert(pTargetGC);
722 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J);
723
724 /** @todo wasting memory as the complex search is overkill and we need only one lookup slot... */
725
726 /* Relative call to patch code (patch to patch -> no fixup). */
727 Log(("PatchGenCall from %RRv (next=%RRv) to %RRv\n", pCurInstrGC, pCurInstrGC + pCpu->cbInstr, pTargetGC));
728
729 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
730 * a page fault. The assembly code restores the stack afterwards.
731 */
732 offset = 0;
733 pPB[offset++] = 0x68; // push %Iv
734 *(RTRCPTR *)&pPB[offset] = pTargetGC;
735 offset += sizeof(RTRCPTR);
736 }
737
738 /* align this block properly to make sure the jump table will not be misaligned. */
739 size = (RTHCUINTPTR)&pPB[offset] & 3;
740 if (size)
741 size = 4 - size;
742
743 for (i=0;i<size;i++)
744 {
745 pPB[offset++] = 0x90; /* nop */
746 }
747 PATCHGEN_EPILOG(pPatch, offset);
748
749 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
750 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
751 callInfo.pReturnGC = pCurInstrGC + pCpu->cbInstr;
752 callInfo.pTargetGC = (fIndirect) ? 0xDEADBEEF : pTargetGC;
753 size = patmPatchGenCode(pVM, pPatch, pPB, (fIndirect) ? &PATMCallIndirectRecord : &PATMCallRecord, 0, false, &callInfo);
754 PATCHGEN_EPILOG(pPatch, size);
755
756 /* Need to set PATM_INTERRUPTFLAG after the patched ret returns here. */
757 rc = patmPatchGenSetPIF(pVM, pPatch, pCurInstrGC);
758 if (rc == VERR_NO_MEMORY)
759 return rc;
760 AssertRCReturn(rc, rc);
761
762 STAM_COUNTER_INC(&pVM->patm.s.StatGenCall);
763 return VINF_SUCCESS;
764}
765
766/**
767 * Generate indirect jump to unknown destination
768 *
769 * @returns VBox status code.
770 * @param pVM The VM to operate on.
771 * @param pPatch Patch record
772 * @param pCpu Disassembly state
773 * @param pCurInstrGC Current instruction address
774 */
775int patmPatchGenJump(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
776{
777 PATMCALLINFO callInfo;
778 uint32_t offset;
779 uint32_t i, size;
780 int rc;
781
782 /* 1: Clear PATM interrupt flag on entry. */
783 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
784 if (rc == VERR_NO_MEMORY)
785 return rc;
786 AssertRCReturn(rc, rc);
787
788 PATCHGEN_PROLOG(pVM, pPatch);
789 /* 2: We must push the target address onto the stack before appending the indirect call code. */
790
791 Log(("patmPatchGenIndirectJump\n"));
792 Assert(pCpu->param1.cb == 4);
793 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J);
794
795 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
796 * a page fault. The assembly code restores the stack afterwards.
797 */
798 offset = 0;
799 /* include prefix byte to make sure we don't use the incorrect selector register. */
800 if (pCpu->fPrefix & DISPREFIX_SEG)
801 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
802
803 pPB[offset++] = 0xFF; // push r/m32
804 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
805 i = 2; /* standard offset of modrm bytes */
806 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
807 i++; //skip operand prefix
808 if (pCpu->fPrefix & DISPREFIX_SEG)
809 i++; //skip segment prefix
810
811 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
812 AssertRCReturn(rc, rc);
813 offset += (pCpu->cbInstr - i);
814
815 /* align this block properly to make sure the jump table will not be misaligned. */
816 size = (RTHCUINTPTR)&pPB[offset] & 3;
817 if (size)
818 size = 4 - size;
819
820 for (i=0;i<size;i++)
821 {
822 pPB[offset++] = 0x90; /* nop */
823 }
824 PATCHGEN_EPILOG(pPatch, offset);
825
826 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
827 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
828 callInfo.pReturnGC = pCurInstrGC + pCpu->cbInstr;
829 callInfo.pTargetGC = 0xDEADBEEF;
830 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpIndirectRecord, 0, false, &callInfo);
831 PATCHGEN_EPILOG(pPatch, size);
832
833 STAM_COUNTER_INC(&pVM->patm.s.StatGenJump);
834 return VINF_SUCCESS;
835}
836
837/**
838 * Generate return instruction
839 *
840 * @returns VBox status code.
841 * @param pVM The VM to operate on.
842 * @param pPatch Patch structure
843 * @param pCpu Disassembly struct
844 * @param pCurInstrGC Current instruction pointer
845 *
846 */
847int patmPatchGenRet(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
848{
849 int size = 0, rc;
850 RTRCPTR pPatchRetInstrGC;
851
852 /* Remember start of this patch for below. */
853 pPatchRetInstrGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
854
855 Log(("patmPatchGenRet %RRv\n", pCurInstrGC));
856
857 /** @note optimization: multiple identical ret instruction in a single patch can share a single patched ret. */
858 if ( pPatch->pTempInfo->pPatchRetInstrGC
859 && pPatch->pTempInfo->uPatchRetParam1 == (uint32_t)pCpu->param1.parval) /* nr of bytes popped off the stack should be identical of course! */
860 {
861 Assert(pCpu->pCurInstr->uOpcode == OP_RETN);
862 STAM_COUNTER_INC(&pVM->patm.s.StatGenRetReused);
863
864 return patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, pPatch->pTempInfo->pPatchRetInstrGC);
865 }
866
867 /* Jump back to the original instruction if IF is set again. */
868 Assert(!PATMFindActivePatchByEntrypoint(pVM, pCurInstrGC));
869 rc = patmPatchGenCheckIF(pVM, pPatch, pCurInstrGC);
870 AssertRCReturn(rc, rc);
871
872 /* align this block properly to make sure the jump table will not be misaligned. */
873 PATCHGEN_PROLOG(pVM, pPatch);
874 size = (RTHCUINTPTR)pPB & 3;
875 if (size)
876 size = 4 - size;
877
878 for (int i=0;i<size;i++)
879 pPB[i] = 0x90; /* nop */
880 PATCHGEN_EPILOG(pPatch, size);
881
882 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
883 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMRetRecord, 0, false);
884 PATCHGEN_EPILOG(pPatch, size);
885
886 STAM_COUNTER_INC(&pVM->patm.s.StatGenRet);
887 /* Duplicate the ret or ret n instruction; it will use the PATM return address */
888 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
889
890 if (rc == VINF_SUCCESS)
891 {
892 pPatch->pTempInfo->pPatchRetInstrGC = pPatchRetInstrGC;
893 pPatch->pTempInfo->uPatchRetParam1 = pCpu->param1.parval;
894 }
895 return rc;
896}
897
898/**
899 * Generate all global patm functions
900 *
901 * @returns VBox status code.
902 * @param pVM The VM to operate on.
903 * @param pPatch Patch structure
904 *
905 */
906int patmPatchGenGlobalFunctions(PVM pVM, PPATCHINFO pPatch)
907{
908 int size = 0;
909
910 pVM->patm.s.pfnHelperCallGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
911 PATCHGEN_PROLOG(pVM, pPatch);
912 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMLookupAndCallRecord, 0, false);
913 PATCHGEN_EPILOG(pPatch, size);
914
915 /* Round to next 8 byte boundary. */
916 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
917
918 pVM->patm.s.pfnHelperRetGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
919 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
920 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMRetFunctionRecord, 0, false);
921 PATCHGEN_EPILOG(pPatch, size);
922
923 /* Round to next 8 byte boundary. */
924 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
925
926 pVM->patm.s.pfnHelperJumpGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
927 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
928 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMLookupAndJumpRecord, 0, false);
929 PATCHGEN_EPILOG(pPatch, size);
930
931 /* Round to next 8 byte boundary. */
932 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
933
934 pVM->patm.s.pfnHelperIretGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
935 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
936 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretFunctionRecord, 0, false);
937 PATCHGEN_EPILOG(pPatch, size);
938
939 Log(("pfnHelperCallGC %RRv\n", pVM->patm.s.pfnHelperCallGC));
940 Log(("pfnHelperRetGC %RRv\n", pVM->patm.s.pfnHelperRetGC));
941 Log(("pfnHelperJumpGC %RRv\n", pVM->patm.s.pfnHelperJumpGC));
942 Log(("pfnHelperIretGC %RRv\n", pVM->patm.s.pfnHelperIretGC));
943
944 return VINF_SUCCESS;
945}
946
947/**
948 * Generate illegal instruction (int 3)
949 *
950 * @returns VBox status code.
951 * @param pVM The VM to operate on.
952 * @param pPatch Patch structure
953 *
954 */
955int patmPatchGenIllegalInstr(PVM pVM, PPATCHINFO pPatch)
956{
957 PATCHGEN_PROLOG(pVM, pPatch);
958
959 pPB[0] = 0xCC;
960
961 PATCHGEN_EPILOG(pPatch, 1);
962 return VINF_SUCCESS;
963}
964
965/**
966 * Check virtual IF flag and jump back to original guest code if set
967 *
968 * @returns VBox status code.
969 * @param pVM The VM to operate on.
970 * @param pPatch Patch structure
971 * @param pCurInstrGC Guest context pointer to the current instruction
972 *
973 */
974int patmPatchGenCheckIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
975{
976 uint32_t size;
977
978 PATCHGEN_PROLOG(pVM, pPatch);
979
980 /* Add lookup record for patch to guest address translation */
981 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
982
983 /* Generate code to check for IF=1 before executing the call to the duplicated function. */
984 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCheckIFRecord, pCurInstrGC, true);
985
986 PATCHGEN_EPILOG(pPatch, size);
987 return VINF_SUCCESS;
988}
989
990/**
991 * Set PATM interrupt flag
992 *
993 * @returns VBox status code.
994 * @param pVM The VM to operate on.
995 * @param pPatch Patch structure
996 * @param pInstrGC Corresponding guest instruction
997 *
998 */
999int patmPatchGenSetPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1000{
1001 PATCHGEN_PROLOG(pVM, pPatch);
1002
1003 /* Add lookup record for patch to guest address translation */
1004 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1005
1006 int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMSetPIFRecord, 0, false);
1007 PATCHGEN_EPILOG(pPatch, size);
1008 return VINF_SUCCESS;
1009}
1010
1011/**
1012 * Clear PATM interrupt flag
1013 *
1014 * @returns VBox status code.
1015 * @param pVM The VM to operate on.
1016 * @param pPatch Patch structure
1017 * @param pInstrGC Corresponding guest instruction
1018 *
1019 */
1020int patmPatchGenClearPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1021{
1022 PATCHGEN_PROLOG(pVM, pPatch);
1023
1024 /* Add lookup record for patch to guest address translation */
1025 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1026
1027 int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearPIFRecord, 0, false);
1028 PATCHGEN_EPILOG(pPatch, size);
1029 return VINF_SUCCESS;
1030}
1031
1032
1033/**
1034 * Clear PATM inhibit irq flag
1035 *
1036 * @returns VBox status code.
1037 * @param pVM The VM to operate on.
1038 * @param pPatch Patch structure
1039 * @param pNextInstrGC Next guest instruction
1040 */
1041int patmPatchGenClearInhibitIRQ(PVM pVM, PPATCHINFO pPatch, RTRCPTR pNextInstrGC)
1042{
1043 int size;
1044 PATMCALLINFO callInfo;
1045
1046 PATCHGEN_PROLOG(pVM, pPatch);
1047
1048 Assert((pPatch->flags & (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION)) != (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION));
1049
1050 /* Add lookup record for patch to guest address translation */
1051 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pNextInstrGC, PATM_LOOKUP_PATCH2GUEST);
1052
1053 callInfo.pNextInstrGC = pNextInstrGC;
1054
1055 if (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1056 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearInhibitIRQContIF0Record, 0, false, &callInfo);
1057 else
1058 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearInhibitIRQFaultIF0Record, 0, false, &callInfo);
1059
1060 PATCHGEN_EPILOG(pPatch, size);
1061 return VINF_SUCCESS;
1062}
1063
1064/**
1065 * Generate an interrupt handler entrypoint
1066 *
1067 * @returns VBox status code.
1068 * @param pVM The VM to operate on.
1069 * @param pPatch Patch record
1070 * @param pIntHandlerGC IDT handler address
1071 *
1072 ** @todo must check if virtual IF is already cleared on entry!!!!!!!!!!!!!!!!!!!!!!!
1073 */
1074int patmPatchGenIntEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pIntHandlerGC)
1075{
1076 uint32_t size;
1077 int rc = VINF_SUCCESS;
1078
1079 PATCHGEN_PROLOG(pVM, pPatch);
1080
1081 /* Add lookup record for patch to guest address translation */
1082 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1083
1084 /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
1085 size = patmPatchGenCode(pVM, pPatch, pPB,
1086 (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord,
1087 0, false);
1088
1089 PATCHGEN_EPILOG(pPatch, size);
1090
1091 // Interrupt gates set IF to 0
1092 rc = patmPatchGenCli(pVM, pPatch);
1093 AssertRCReturn(rc, rc);
1094
1095 return rc;
1096}
1097
1098/**
1099 * Generate a trap handler entrypoint
1100 *
1101 * @returns VBox status code.
1102 * @param pVM The VM to operate on.
1103 * @param pPatch Patch record
1104 * @param pTrapHandlerGC IDT handler address
1105 */
1106int patmPatchGenTrapEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTrapHandlerGC)
1107{
1108 uint32_t size;
1109
1110 PATCHGEN_PROLOG(pVM, pPatch);
1111
1112 /* Add lookup record for patch to guest address translation */
1113 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pTrapHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1114
1115 /* Generate entrypoint for the trap handler (correcting CS in the interrupt stack frame) */
1116 size = patmPatchGenCode(pVM, pPatch, pPB,
1117 (pPatch->flags & PATMFL_TRAPHANDLER_WITH_ERRORCODE) ? &PATMTrapEntryRecordErrorCode : &PATMTrapEntryRecord,
1118 pTrapHandlerGC, true);
1119 PATCHGEN_EPILOG(pPatch, size);
1120
1121 return VINF_SUCCESS;
1122}
1123
1124#ifdef VBOX_WITH_STATISTICS
1125int patmPatchGenStats(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1126{
1127 uint32_t size;
1128
1129 PATCHGEN_PROLOG(pVM, pPatch);
1130
1131 /* Add lookup record for stats code -> guest handler. */
1132 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1133
1134 /* Generate code to keep calling statistics for this patch */
1135 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMStatsRecord, pInstrGC, false);
1136 PATCHGEN_EPILOG(pPatch, size);
1137
1138 return VINF_SUCCESS;
1139}
1140#endif
1141
1142/**
1143 * Debug register moves to or from general purpose registers
1144 * mov GPR, DRx
1145 * mov DRx, GPR
1146 *
1147 * @todo: if we ever want to support hardware debug registers natively, then
1148 * this will need to be changed!
1149 */
1150int patmPatchGenMovDebug(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1151{
1152 int rc = VINF_SUCCESS;
1153 unsigned reg, mod, rm, dbgreg;
1154 uint32_t offset;
1155
1156 PATCHGEN_PROLOG(pVM, pPatch);
1157
1158 mod = 0; //effective address (only)
1159 rm = 5; //disp32
1160 if (pCpu->pCurInstr->param1 == OP_PARM_Dd)
1161 {
1162 Assert(0); // You not come here. Illegal!
1163
1164 // mov DRx, GPR
1165 pPB[0] = 0x89; //mov disp32, GPR
1166 Assert(pCpu->param1.fUse & DISUSE_REG_DBG);
1167 Assert(pCpu->param2.fUse & DISUSE_REG_GEN32);
1168
1169 dbgreg = pCpu->param1.base.reg_dbg;
1170 reg = pCpu->param2.base.reg_gen;
1171 }
1172 else
1173 {
1174 // mov GPR, DRx
1175 Assert(pCpu->param1.fUse & DISUSE_REG_GEN32);
1176 Assert(pCpu->param2.fUse & DISUSE_REG_DBG);
1177
1178 pPB[0] = 0x8B; // mov GPR, disp32
1179 reg = pCpu->param1.base.reg_gen;
1180 dbgreg = pCpu->param2.base.reg_dbg;
1181 }
1182
1183 pPB[1] = MAKE_MODRM(mod, reg, rm);
1184
1185 AssertReturn(dbgreg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1186 offset = RT_OFFSETOF(CPUMCTX, dr[dbgreg]);
1187
1188 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1189 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1190
1191 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
1192 return rc;
1193}
1194
1195/*
1196 * Control register moves to or from general purpose registers
1197 * mov GPR, CRx
1198 * mov CRx, GPR
1199 */
1200int patmPatchGenMovControl(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1201{
1202 int rc = VINF_SUCCESS;
1203 int reg, mod, rm, ctrlreg;
1204 uint32_t offset;
1205
1206 PATCHGEN_PROLOG(pVM, pPatch);
1207
1208 mod = 0; //effective address (only)
1209 rm = 5; //disp32
1210 if (pCpu->pCurInstr->param1 == OP_PARM_Cd)
1211 {
1212 Assert(0); // You not come here. Illegal!
1213
1214 // mov CRx, GPR
1215 pPB[0] = 0x89; //mov disp32, GPR
1216 ctrlreg = pCpu->param1.base.reg_ctrl;
1217 reg = pCpu->param2.base.reg_gen;
1218 Assert(pCpu->param1.fUse & DISUSE_REG_CR);
1219 Assert(pCpu->param2.fUse & DISUSE_REG_GEN32);
1220 }
1221 else
1222 {
1223 // mov GPR, DRx
1224 Assert(pCpu->param1.fUse & DISUSE_REG_GEN32);
1225 Assert(pCpu->param2.fUse & DISUSE_REG_CR);
1226
1227 pPB[0] = 0x8B; // mov GPR, disp32
1228 reg = pCpu->param1.base.reg_gen;
1229 ctrlreg = pCpu->param2.base.reg_ctrl;
1230 }
1231
1232 pPB[1] = MAKE_MODRM(mod, reg, rm);
1233
1234 /// @todo: make this an array in the context structure
1235 switch (ctrlreg)
1236 {
1237 case DISCREG_CR0:
1238 offset = RT_OFFSETOF(CPUMCTX, cr0);
1239 break;
1240 case DISCREG_CR2:
1241 offset = RT_OFFSETOF(CPUMCTX, cr2);
1242 break;
1243 case DISCREG_CR3:
1244 offset = RT_OFFSETOF(CPUMCTX, cr3);
1245 break;
1246 case DISCREG_CR4:
1247 offset = RT_OFFSETOF(CPUMCTX, cr4);
1248 break;
1249 default: /* Shut up compiler warning. */
1250 AssertFailed();
1251 offset = 0;
1252 break;
1253 }
1254 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1255 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1256
1257 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
1258 return rc;
1259}
1260
1261/*
1262 * mov GPR, SS
1263 */
1264int patmPatchGenMovFromSS(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1265{
1266 uint32_t size, offset;
1267
1268 Log(("patmPatchGenMovFromSS %RRv\n", pCurInstrGC));
1269
1270 Assert(pPatch->flags & PATMFL_CODE32);
1271
1272 PATCHGEN_PROLOG(pVM, pPatch);
1273 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearPIFRecord, 0, false);
1274 PATCHGEN_EPILOG(pPatch, size);
1275
1276 /* push ss */
1277 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1278 offset = 0;
1279 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
1280 pPB[offset++] = 0x66; /* size override -> 16 bits push */
1281 pPB[offset++] = 0x16;
1282 PATCHGEN_EPILOG(pPatch, offset);
1283
1284 /* checks and corrects RPL of pushed ss*/
1285 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1286 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMMovFromSSRecord, 0, false);
1287 PATCHGEN_EPILOG(pPatch, size);
1288
1289 /* pop general purpose register */
1290 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1291 offset = 0;
1292 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
1293 pPB[offset++] = 0x66; /* size override -> 16 bits pop */
1294 pPB[offset++] = 0x58 + pCpu->param1.base.reg_gen;
1295 PATCHGEN_EPILOG(pPatch, offset);
1296
1297
1298 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1299 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMSetPIFRecord, 0, false);
1300 PATCHGEN_EPILOG(pPatch, size);
1301
1302 return VINF_SUCCESS;
1303}
1304
1305
1306/**
1307 * Generate an sldt or str patch instruction
1308 *
1309 * @returns VBox status code.
1310 * @param pVM The VM to operate on.
1311 * @param pPatch Patch record
1312 * @param pCpu Disassembly state
1313 * @param pCurInstrGC Guest instruction address
1314 */
1315int patmPatchGenSldtStr(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1316{
1317 // sldt %Ew
1318 int rc = VINF_SUCCESS;
1319 uint32_t offset = 0;
1320 uint32_t i;
1321
1322 /** @todo segment prefix (untested) */
1323 Assert(pCpu->fPrefix == DISPREFIX_NONE || pCpu->fPrefix == DISPREFIX_OPSIZE);
1324
1325 PATCHGEN_PROLOG(pVM, pPatch);
1326
1327 if (pCpu->param1.fUse == DISUSE_REG_GEN32 || pCpu->param1.fUse == DISUSE_REG_GEN16)
1328 {
1329 /* Register operand */
1330 // 8B 15 [32 bits addr] mov edx, CPUMCTX.tr/ldtr
1331
1332 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1333 pPB[offset++] = 0x66;
1334
1335 pPB[offset++] = 0x8B; // mov destreg, CPUMCTX.tr/ldtr
1336 /* Modify REG part according to destination of original instruction */
1337 pPB[offset++] = MAKE_MODRM(0, pCpu->param1.base.reg_gen, 5);
1338 if (pCpu->pCurInstr->uOpcode == OP_STR)
1339 {
1340 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1341 }
1342 else
1343 {
1344 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1345 }
1346 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1347 offset += sizeof(RTRCPTR);
1348 }
1349 else
1350 {
1351 /* Memory operand */
1352 //50 push eax
1353 //52 push edx
1354 //8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1355 //66 A1 48 7C 42 00 mov ax, CPUMCTX.tr/ldtr
1356 //66 89 02 mov word ptr [edx],ax
1357 //5A pop edx
1358 //58 pop eax
1359
1360 pPB[offset++] = 0x50; // push eax
1361 pPB[offset++] = 0x52; // push edx
1362
1363 if (pCpu->fPrefix == DISPREFIX_SEG)
1364 {
1365 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1366 }
1367 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1368 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1369 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, DISGREG_EDX , pCpu->ModRM.Bits.Rm);
1370
1371 i = 3; /* standard offset of modrm bytes */
1372 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1373 i++; //skip operand prefix
1374 if (pCpu->fPrefix == DISPREFIX_SEG)
1375 i++; //skip segment prefix
1376
1377 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
1378 AssertRCReturn(rc, rc);
1379 offset += (pCpu->cbInstr - i);
1380
1381 pPB[offset++] = 0x66; // mov ax, CPUMCTX.tr/ldtr
1382 pPB[offset++] = 0xA1;
1383 if (pCpu->pCurInstr->uOpcode == OP_STR)
1384 {
1385 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1386 }
1387 else
1388 {
1389 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1390 }
1391 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1392 offset += sizeof(RTRCPTR);
1393
1394 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1395 pPB[offset++] = 0x89;
1396 pPB[offset++] = 0x02;
1397
1398 pPB[offset++] = 0x5A; // pop edx
1399 pPB[offset++] = 0x58; // pop eax
1400 }
1401
1402 PATCHGEN_EPILOG(pPatch, offset);
1403
1404 return rc;
1405}
1406
1407/**
1408 * Generate an sgdt or sidt patch instruction
1409 *
1410 * @returns VBox status code.
1411 * @param pVM The VM to operate on.
1412 * @param pPatch Patch record
1413 * @param pCpu Disassembly state
1414 * @param pCurInstrGC Guest instruction address
1415 */
1416int patmPatchGenSxDT(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1417{
1418 int rc = VINF_SUCCESS;
1419 uint32_t offset = 0, offset_base, offset_limit;
1420 uint32_t i;
1421
1422 /* @todo segment prefix (untested) */
1423 Assert(pCpu->fPrefix == DISPREFIX_NONE);
1424
1425 // sgdt %Ms
1426 // sidt %Ms
1427
1428 switch (pCpu->pCurInstr->uOpcode)
1429 {
1430 case OP_SGDT:
1431 offset_base = RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1432 offset_limit = RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1433 break;
1434
1435 case OP_SIDT:
1436 offset_base = RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1437 offset_limit = RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1438 break;
1439
1440 default:
1441 return VERR_INVALID_PARAMETER;
1442 }
1443
1444//50 push eax
1445//52 push edx
1446//8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1447//66 A1 48 7C 42 00 mov ax, CPUMCTX.gdtr.limit
1448//66 89 02 mov word ptr [edx],ax
1449//A1 48 7C 42 00 mov eax, CPUMCTX.gdtr.base
1450//89 42 02 mov dword ptr [edx+2],eax
1451//5A pop edx
1452//58 pop eax
1453
1454 PATCHGEN_PROLOG(pVM, pPatch);
1455 pPB[offset++] = 0x50; // push eax
1456 pPB[offset++] = 0x52; // push edx
1457
1458 if (pCpu->fPrefix == DISPREFIX_SEG)
1459 {
1460 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1461 }
1462 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1463 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1464 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, DISGREG_EDX , pCpu->ModRM.Bits.Rm);
1465
1466 i = 3; /* standard offset of modrm bytes */
1467 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1468 i++; //skip operand prefix
1469 if (pCpu->fPrefix == DISPREFIX_SEG)
1470 i++; //skip segment prefix
1471 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
1472 AssertRCReturn(rc, rc);
1473 offset += (pCpu->cbInstr - i);
1474
1475 pPB[offset++] = 0x66; // mov ax, CPUMCTX.gdtr.limit
1476 pPB[offset++] = 0xA1;
1477 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_limit;
1478 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1479 offset += sizeof(RTRCPTR);
1480
1481 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1482 pPB[offset++] = 0x89;
1483 pPB[offset++] = 0x02;
1484
1485 pPB[offset++] = 0xA1; // mov eax, CPUMCTX.gdtr.base
1486 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_base;
1487 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1488 offset += sizeof(RTRCPTR);
1489
1490 pPB[offset++] = 0x89; // mov dword ptr [edx+2],eax
1491 pPB[offset++] = 0x42;
1492 pPB[offset++] = 0x02;
1493
1494 pPB[offset++] = 0x5A; // pop edx
1495 pPB[offset++] = 0x58; // pop eax
1496
1497 PATCHGEN_EPILOG(pPatch, offset);
1498
1499 return rc;
1500}
1501
1502/**
1503 * Generate a cpuid patch instruction
1504 *
1505 * @returns VBox status code.
1506 * @param pVM The VM to operate on.
1507 * @param pPatch Patch record
1508 * @param pCurInstrGC Guest instruction address
1509 */
1510int patmPatchGenCpuid(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
1511{
1512 uint32_t size;
1513 PATCHGEN_PROLOG(pVM, pPatch);
1514
1515 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCpuidRecord, 0, false);
1516
1517 PATCHGEN_EPILOG(pPatch, size);
1518 NOREF(pCurInstrGC);
1519 return VINF_SUCCESS;
1520}
1521
1522/**
1523 * Generate the jump from guest to patch code
1524 *
1525 * @returns VBox status code.
1526 * @param pVM The VM to operate on.
1527 * @param pPatch Patch record
1528 * @param pTargetGC Guest target jump
1529 * @param fClearInhibitIRQs Clear inhibit irq flag
1530 */
1531int patmPatchGenJumpToGuest(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fClearInhibitIRQs)
1532{
1533 int rc = VINF_SUCCESS;
1534 uint32_t size;
1535
1536 if (fClearInhibitIRQs)
1537 {
1538 rc = patmPatchGenClearInhibitIRQ(pVM, pPatch, pReturnAddrGC);
1539 if (rc == VERR_NO_MEMORY)
1540 return rc;
1541 AssertRCReturn(rc, rc);
1542 }
1543
1544 PATCHGEN_PROLOG(pVM, pPatch);
1545
1546 /* Add lookup record for patch to guest address translation */
1547 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
1548
1549 /* Generate code to jump to guest code if IF=1, else fault. */
1550 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpToGuest_IF1Record, pReturnAddrGC, true);
1551 PATCHGEN_EPILOG(pPatch, size);
1552
1553 return rc;
1554}
1555
1556/*
1557 * Relative jump from patch code to patch code (no fixup required)
1558 */
1559int patmPatchGenPatchJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RCPTRTYPE(uint8_t *) pPatchAddrGC, bool fAddLookupRecord)
1560{
1561 int32_t displ;
1562 int rc = VINF_SUCCESS;
1563
1564 Assert(PATMIsPatchGCAddr(pVM, pPatchAddrGC));
1565 PATCHGEN_PROLOG(pVM, pPatch);
1566
1567 if (fAddLookupRecord)
1568 {
1569 /* Add lookup record for patch to guest address translation */
1570 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
1571 }
1572
1573 pPB[0] = 0xE9; //JMP
1574
1575 displ = pPatchAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + SIZEOF_NEARJUMP32);
1576
1577 *(uint32_t *)&pPB[1] = displ;
1578
1579 PATCHGEN_EPILOG(pPatch, SIZEOF_NEARJUMP32);
1580
1581 return rc;
1582}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette