VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp@ 54699

Last change on this file since 54699 was 54688, checked in by vboxsync, 10 years ago

PATM: Doing some more cleanups while trying to understand stuff again.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 53.1 KB
Line 
1/* $Id: PATMPatch.cpp 54688 2015-03-08 23:08:04Z vboxsync $ */
2/** @file
3 * PATMPatch - Dynamic Guest OS Instruction patches
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2013 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/trpm.h>
30#include <VBox/vmm/csam.h>
31#include "PATMInternal.h"
32#include <VBox/vmm/vm.h>
33#include <VBox/param.h>
34
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/dis.h>
38#include <VBox/disopcode.h>
39
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43
44#include "PATMA.h"
45#include "PATMPatch.h"
46
47
48/*******************************************************************************
49* Structures and Typedefs *
50*******************************************************************************/
51/**
52 * Internal structure for passing more information about call fixups to
53 * patmPatchGenCode.
54 */
55typedef struct
56{
57 RTRCPTR pTargetGC;
58 RTRCPTR pCurInstrGC;
59 RTRCPTR pNextInstrGC;
60 RTRCPTR pReturnGC;
61} PATMCALLINFO, *PPATMCALLINFO;
62
63
64/*******************************************************************************
65* Defined Constants And Macros *
66*******************************************************************************/
67#define PATCHGEN_PROLOG_NODEF(pVM, pPatch) \
68 do { \
69 pPB = PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset; \
70 if (pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) \
71 { \
72 pVM->patm.s.fOutOfMemory = true; \
73 Assert(pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem); \
74 return VERR_NO_MEMORY; \
75 } \
76 } while (0)
77
78#define PATCHGEN_PROLOG(pVM, pPatch) \
79 uint8_t *pPB; \
80 PATCHGEN_PROLOG_NODEF(pVM, pPatch)
81
82#define PATCHGEN_EPILOG(pPatch, size) \
83 do { \
84 Assert(size <= 640); \
85 pPatch->uCurPatchOffset += size; \
86 } while (0)
87
88
89
90
91int patmPatchAddReloc32(PVM pVM, PPATCHINFO pPatch, uint8_t *pRelocHC, uint32_t uType,
92 RTRCPTR pSource /*= 0*/, RTRCPTR pDest /*= 0*/)
93{
94 PRELOCREC pRec;
95
96 Assert(uType == FIXUP_ABSOLUTE || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
97
98 LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%RRv source=%RRv dest=%RRv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest));
99
100 pRec = (PRELOCREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
101 Assert(pRec);
102 pRec->Core.Key = (AVLPVKEY)pRelocHC;
103 pRec->pRelocPos = pRelocHC; /* @todo redundant. */
104 pRec->pSource = pSource;
105 pRec->pDest = pDest;
106 pRec->uType = uType;
107
108 bool ret = RTAvlPVInsert(&pPatch->FixupTree, &pRec->Core);
109 Assert(ret); NOREF(ret);
110 pPatch->nrFixups++;
111
112 return VINF_SUCCESS;
113}
114
115int patmPatchAddJump(PVM pVM, PPATCHINFO pPatch, uint8_t *pJumpHC, uint32_t offset, RTRCPTR pTargetGC, uint32_t opcode)
116{
117 PJUMPREC pRec;
118
119 pRec = (PJUMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
120 Assert(pRec);
121
122 pRec->Core.Key = (AVLPVKEY)pJumpHC;
123 pRec->pJumpHC = pJumpHC; /* @todo redundant. */
124 pRec->offDispl = offset;
125 pRec->pTargetGC = pTargetGC;
126 pRec->opcode = opcode;
127
128 bool ret = RTAvlPVInsert(&pPatch->JumpTree, &pRec->Core);
129 Assert(ret); NOREF(ret);
130 pPatch->nrJumpRecs++;
131
132 return VINF_SUCCESS;
133}
134
135static uint32_t patmPatchGenCode(PVM pVM, PPATCHINFO pPatch, uint8_t *pPB, PCPATCHASMRECORD pAsmRecord,
136 RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fGenJump,
137 PPATMCALLINFO pCallInfo = 0)
138{
139 Assert(fGenJump == false || pReturnAddrGC);
140 Assert(fGenJump == false || pAsmRecord->offJump);
141 Assert(pAsmRecord);
142 Assert(pAsmRecord->cbFunction > sizeof(pAsmRecord->aRelocs[0].uType) * pAsmRecord->cRelocs);
143
144 // Copy the code block
145 memcpy(pPB, pAsmRecord->pbFunction, pAsmRecord->cbFunction);
146
147 // Process all fixups
148 uint32_t i, j;
149 for (j = 0, i = 0; i < pAsmRecord->cRelocs; i++)
150 {
151 for (; j < pAsmRecord->cbFunction; j++)
152 {
153 if (*(uint32_t*)&pPB[j] == pAsmRecord->aRelocs[i].uType)
154 {
155 RCPTRTYPE(uint32_t *) dest;
156
157#ifdef VBOX_STRICT
158 if (pAsmRecord->aRelocs[i].uType == PATM_FIXUP)
159 Assert(pAsmRecord->aRelocs[i].uInfo != 0);
160 else
161 Assert(pAsmRecord->aRelocs[i].uInfo == 0);
162#endif
163
164 /**
165 * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING A SAVED STATE WITH
166 * A DIFFERENT HYPERVISOR LAYOUT.
167 */
168 switch (pAsmRecord->aRelocs[i].uType)
169 {
170 case PATM_VMFLAGS:
171 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
172 break;
173
174 case PATM_PENDINGACTION:
175 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
176 break;
177
178 case PATM_FIXUP:
179 /* Offset in aRelocs[i].uInfo is from the base of the function. */
180 dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->aRelocs[i].uInfo
181 + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC);
182 break;
183#ifdef VBOX_WITH_STATISTICS
184 case PATM_ALLPATCHCALLS:
185 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
186 break;
187
188 case PATM_IRETEFLAGS:
189 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
190 break;
191
192 case PATM_IRETCS:
193 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
194 break;
195
196 case PATM_IRETEIP:
197 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
198 break;
199
200 case PATM_PERPATCHCALLS:
201 dest = patmPatchQueryStatAddress(pVM, pPatch);
202 break;
203#endif
204 case PATM_STACKPTR:
205 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
206 break;
207
208 /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
209 * part to store the original return addresses.
210 */
211 case PATM_STACKBASE:
212 dest = pVM->patm.s.pGCStackGC;
213 break;
214
215 case PATM_STACKBASE_GUEST:
216 dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
217 break;
218
219 case PATM_RETURNADDR: /* absolute guest address; no fixup required */
220 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
221 dest = pCallInfo->pReturnGC;
222 break;
223
224 case PATM_PATCHNEXTBLOCK: /* relative address of instruction following this block */
225 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
226
227 /** @note hardcoded assumption that we must return to the instruction following this block */
228 dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->cbFunction;
229 break;
230
231 case PATM_CALLTARGET: /* relative to patch address; no fixup required */
232 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
233
234 /* Address must be filled in later. (see patmr3SetBranchTargets) */
235 patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
236 dest = PATM_ILLEGAL_DESTINATION;
237 break;
238
239 case PATM_PATCHBASE: /* Patch GC base address */
240 dest = pVM->patm.s.pPatchMemGC;
241 break;
242
243 case PATM_CPUID_STD_PTR:
244 dest = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
245 break;
246
247 case PATM_CPUID_EXT_PTR:
248 dest = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
249 break;
250
251 case PATM_CPUID_CENTAUR_PTR:
252 dest = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
253 break;
254
255 case PATM_CPUID_DEF_PTR:
256 dest = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
257 break;
258
259 case PATM_CPUID_STD_MAX:
260 dest = CPUMR3GetGuestCpuIdPatmStdMax(pVM);
261 break;
262
263 case PATM_CPUID_EXT_MAX:
264 dest = CPUMR3GetGuestCpuIdPatmExtMax(pVM);
265 break;
266
267 case PATM_CPUID_CENTAUR_MAX:
268 dest = CPUMR3GetGuestCpuIdPatmCentaurMax(pVM);
269 break;
270
271 case PATM_INTERRUPTFLAG:
272 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
273 break;
274
275 case PATM_INHIBITIRQADDR:
276 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
277 break;
278
279 case PATM_NEXTINSTRADDR:
280 Assert(pCallInfo);
281 /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
282 dest = pCallInfo->pNextInstrGC;
283 break;
284
285 case PATM_CURINSTRADDR:
286 Assert(pCallInfo);
287 dest = pCallInfo->pCurInstrGC;
288 break;
289
290 case PATM_VM_FORCEDACTIONS:
291 /* @todo dirty assumptions when correcting this fixup during saved state loading. */
292 dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
293 break;
294
295 case PATM_TEMP_EAX:
296 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
297 break;
298 case PATM_TEMP_ECX:
299 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
300 break;
301 case PATM_TEMP_EDI:
302 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
303 break;
304 case PATM_TEMP_EFLAGS:
305 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
306 break;
307 case PATM_TEMP_RESTORE_FLAGS:
308 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
309 break;
310 case PATM_CALL_PATCH_TARGET_ADDR:
311 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr);
312 break;
313 case PATM_CALL_RETURN_ADDR:
314 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr);
315 break;
316
317 /* Relative address of global patm lookup and call function. */
318 case PATM_LOOKUP_AND_CALL_FUNCTION:
319 {
320 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
321 Assert(pVM->patm.s.pfnHelperCallGC);
322 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
323
324 /* Relative value is target minus address of instruction after the actual call instruction. */
325 dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
326 break;
327 }
328
329 case PATM_RETURN_FUNCTION:
330 {
331 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
332 Assert(pVM->patm.s.pfnHelperRetGC);
333 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
334
335 /* Relative value is target minus address of instruction after the actual call instruction. */
336 dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
337 break;
338 }
339
340 case PATM_IRET_FUNCTION:
341 {
342 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
343 Assert(pVM->patm.s.pfnHelperIretGC);
344 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
345
346 /* Relative value is target minus address of instruction after the actual call instruction. */
347 dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
348 break;
349 }
350
351 case PATM_LOOKUP_AND_JUMP_FUNCTION:
352 {
353 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
354 Assert(pVM->patm.s.pfnHelperJumpGC);
355 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
356
357 /* Relative value is target minus address of instruction after the actual call instruction. */
358 dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
359 break;
360 }
361
362 default:
363 dest = PATM_ILLEGAL_DESTINATION;
364 AssertRelease(0);
365 break;
366 }
367
368 *(RTRCPTR *)&pPB[j] = dest;
369 if (pAsmRecord->aRelocs[i].uType < PATM_NO_FIXUP)
370 {
371 patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE);
372 }
373 break;
374 }
375 }
376 Assert(j < pAsmRecord->cbFunction);
377 }
378 Assert(pAsmRecord->aRelocs[i].uInfo == 0xffffffff);
379
380 /* Add the jump back to guest code (if required) */
381 if (fGenJump)
382 {
383 int32_t displ = pReturnAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32);
384
385 /* Add lookup record for patch to guest address translation */
386 Assert(pPB[pAsmRecord->offJump - 1] == 0xE9);
387 patmR3AddP2GLookupRecord(pVM, pPatch, &pPB[pAsmRecord->offJump - 1], pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
388
389 *(uint32_t *)&pPB[pAsmRecord->offJump] = displ;
390 patmPatchAddReloc32(pVM, pPatch, &pPB[pAsmRecord->offJump], FIXUP_REL_JMPTOGUEST,
391 PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32,
392 pReturnAddrGC);
393 }
394
395 // Calculate the right size of this patch block
396 if ((fGenJump && pAsmRecord->offJump) || (!fGenJump && !pAsmRecord->offJump))
397 return pAsmRecord->cbFunction;
398 // if a jump instruction is present and we don't want one, then subtract SIZEOF_NEARJUMP32
399 return pAsmRecord->cbFunction - SIZEOF_NEARJUMP32;
400}
401
402/* Read bytes and check for overwritten instructions. */
403static int patmPatchReadBytes(PVM pVM, uint8_t *pDest, RTRCPTR pSrc, uint32_t cb)
404{
405 int rc = PGMPhysSimpleReadGCPtr(&pVM->aCpus[0], pDest, pSrc, cb);
406 AssertRCReturn(rc, rc);
407 /*
408 * Could be patched already; make sure this is checked!
409 */
410 for (uint32_t i=0;i<cb;i++)
411 {
412 uint8_t temp;
413
414 int rc2 = PATMR3QueryOpcode(pVM, pSrc+i, &temp);
415 if (RT_SUCCESS(rc2))
416 {
417 pDest[i] = temp;
418 }
419 else
420 break; /* no more */
421 }
422 return VINF_SUCCESS;
423}
424
425int patmPatchGenDuplicate(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
426{
427 int rc = VINF_SUCCESS;
428 PATCHGEN_PROLOG(pVM, pPatch);
429
430 uint32_t const cbInstrShutUpGcc = pCpu->cbInstr;
431 rc = patmPatchReadBytes(pVM, pPB, pCurInstrGC, cbInstrShutUpGcc);
432 AssertRC(rc);
433 PATCHGEN_EPILOG(pPatch, cbInstrShutUpGcc);
434 return rc;
435}
436
437int patmPatchGenIret(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, bool fSizeOverride)
438{
439 uint32_t size;
440 PATMCALLINFO callInfo;
441
442 PATCHGEN_PROLOG(pVM, pPatch);
443
444 AssertMsg(fSizeOverride == false, ("operand size override!!\n"));
445 callInfo.pCurInstrGC = pCurInstrGC;
446
447 if (EMIsRawRing1Enabled(pVM))
448 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmIretRing1Record, 0, false, &callInfo);
449 else
450 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmIretRecord, 0, false, &callInfo);
451
452 PATCHGEN_EPILOG(pPatch, size);
453 return VINF_SUCCESS;
454}
455
456int patmPatchGenCli(PVM pVM, PPATCHINFO pPatch)
457{
458 uint32_t size;
459 PATCHGEN_PROLOG(pVM, pPatch);
460
461 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmCliRecord, 0, false);
462
463 PATCHGEN_EPILOG(pPatch, size);
464 return VINF_SUCCESS;
465}
466
467/*
468 * Generate an STI patch
469 */
470int patmPatchGenSti(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RTRCPTR pNextInstrGC)
471{
472 PATMCALLINFO callInfo;
473 uint32_t size;
474
475 Log(("patmPatchGenSti at %RRv; next %RRv\n", pCurInstrGC, pNextInstrGC));
476 PATCHGEN_PROLOG(pVM, pPatch);
477 callInfo.pNextInstrGC = pNextInstrGC;
478 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmStiRecord, 0, false, &callInfo);
479 PATCHGEN_EPILOG(pPatch, size);
480
481 return VINF_SUCCESS;
482}
483
484
485int patmPatchGenPopf(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fSizeOverride, bool fGenJumpBack)
486{
487 uint32_t size;
488 PATMCALLINFO callInfo;
489
490 PATCHGEN_PROLOG(pVM, pPatch);
491
492 callInfo.pNextInstrGC = pReturnAddrGC;
493
494 Log(("patmPatchGenPopf at %RRv\n", pReturnAddrGC));
495
496 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
497 if (fSizeOverride == true)
498 {
499 Log(("operand size override!!\n"));
500 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &g_patmPopf16Record : &g_patmPopf16Record_NoExit , pReturnAddrGC, fGenJumpBack, &callInfo);
501 }
502 else
503 {
504 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &g_patmPopf32Record : &g_patmPopf32Record_NoExit, pReturnAddrGC, fGenJumpBack, &callInfo);
505 }
506
507 PATCHGEN_EPILOG(pPatch, size);
508 STAM_COUNTER_INC(&pVM->patm.s.StatGenPopf);
509 return VINF_SUCCESS;
510}
511
512int patmPatchGenPushf(PVM pVM, PPATCHINFO pPatch, bool fSizeOverride)
513{
514 uint32_t size;
515 PATCHGEN_PROLOG(pVM, pPatch);
516
517 if (fSizeOverride == true)
518 {
519 Log(("operand size override!!\n"));
520 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmPushf16Record, 0, false);
521 }
522 else
523 {
524 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmPushf32Record, 0, false);
525 }
526
527 PATCHGEN_EPILOG(pPatch, size);
528 return VINF_SUCCESS;
529}
530
531int patmPatchGenPushCS(PVM pVM, PPATCHINFO pPatch)
532{
533 uint32_t size;
534 PATCHGEN_PROLOG(pVM, pPatch);
535 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmPushCSRecord, 0, false);
536 PATCHGEN_EPILOG(pPatch, size);
537 return VINF_SUCCESS;
538}
539
540int patmPatchGenLoop(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
541{
542 uint32_t size = 0;
543 PCPATCHASMRECORD pPatchAsmRec;
544
545 PATCHGEN_PROLOG(pVM, pPatch);
546
547 switch (opcode)
548 {
549 case OP_LOOP:
550 pPatchAsmRec = &g_patmLoopRecord;
551 break;
552 case OP_LOOPNE:
553 pPatchAsmRec = &g_patmLoopNZRecord;
554 break;
555 case OP_LOOPE:
556 pPatchAsmRec = &g_patmLoopZRecord;
557 break;
558 case OP_JECXZ:
559 pPatchAsmRec = &g_patmJEcxRecord;
560 break;
561 default:
562 AssertMsgFailed(("PatchGenLoop: invalid opcode %d\n", opcode));
563 return VERR_INVALID_PARAMETER;
564 }
565 Assert(pPatchAsmRec->offSizeOverride && pPatchAsmRec->offRelJump);
566
567 Log(("PatchGenLoop %d jump %d to %08x offrel=%d\n", opcode, pPatch->nrJumpRecs, pTargetGC, pPatchAsmRec->offRelJump));
568
569 // Generate the patch code
570 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
571
572 if (fSizeOverride)
573 {
574 pPB[pPatchAsmRec->offSizeOverride] = 0x66; // ecx -> cx or vice versa
575 }
576
577 *(RTRCPTR *)&pPB[pPatchAsmRec->offRelJump] = 0xDEADBEEF;
578
579 patmPatchAddJump(pVM, pPatch, &pPB[pPatchAsmRec->offRelJump - 1], 1, pTargetGC, opcode);
580
581 PATCHGEN_EPILOG(pPatch, size);
582 return VINF_SUCCESS;
583}
584
585int patmPatchGenRelJump(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
586{
587 uint32_t offset = 0;
588 PATCHGEN_PROLOG(pVM, pPatch);
589
590 // internal relative jumps from patch code to patch code; no relocation record required
591
592 Assert(PATMIsPatchGCAddr(pVM, pTargetGC) == false);
593
594 switch (opcode)
595 {
596 case OP_JO:
597 pPB[1] = 0x80;
598 break;
599 case OP_JNO:
600 pPB[1] = 0x81;
601 break;
602 case OP_JC:
603 pPB[1] = 0x82;
604 break;
605 case OP_JNC:
606 pPB[1] = 0x83;
607 break;
608 case OP_JE:
609 pPB[1] = 0x84;
610 break;
611 case OP_JNE:
612 pPB[1] = 0x85;
613 break;
614 case OP_JBE:
615 pPB[1] = 0x86;
616 break;
617 case OP_JNBE:
618 pPB[1] = 0x87;
619 break;
620 case OP_JS:
621 pPB[1] = 0x88;
622 break;
623 case OP_JNS:
624 pPB[1] = 0x89;
625 break;
626 case OP_JP:
627 pPB[1] = 0x8A;
628 break;
629 case OP_JNP:
630 pPB[1] = 0x8B;
631 break;
632 case OP_JL:
633 pPB[1] = 0x8C;
634 break;
635 case OP_JNL:
636 pPB[1] = 0x8D;
637 break;
638 case OP_JLE:
639 pPB[1] = 0x8E;
640 break;
641 case OP_JNLE:
642 pPB[1] = 0x8F;
643 break;
644
645 case OP_JMP:
646 /* If interrupted here, then jump to the target instruction. Used by PATM.cpp for jumping to known instructions. */
647 /* Add lookup record for patch to guest address translation */
648 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pTargetGC, PATM_LOOKUP_PATCH2GUEST);
649
650 pPB[0] = 0xE9;
651 break;
652
653 case OP_JECXZ:
654 case OP_LOOP:
655 case OP_LOOPNE:
656 case OP_LOOPE:
657 return patmPatchGenLoop(pVM, pPatch, pTargetGC, opcode, fSizeOverride);
658
659 default:
660 AssertMsg(0, ("Invalid jump opcode %d\n", opcode));
661 return VERR_PATCHING_REFUSED;
662 }
663 if (opcode != OP_JMP)
664 {
665 pPB[0] = 0xF;
666 offset += 2;
667 }
668 else offset++;
669
670 *(RTRCPTR *)&pPB[offset] = 0xDEADBEEF;
671
672 patmPatchAddJump(pVM, pPatch, pPB, offset, pTargetGC, opcode);
673
674 offset += sizeof(RTRCPTR);
675
676 PATCHGEN_EPILOG(pPatch, offset);
677 return VINF_SUCCESS;
678}
679
680/*
681 * Rewrite call to dynamic or currently unknown function (on-demand patching of function)
682 */
683int patmPatchGenCall(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC, RTRCPTR pTargetGC, bool fIndirect)
684{
685 PATMCALLINFO callInfo;
686 uint32_t offset;
687 uint32_t i, size;
688 int rc;
689
690 /** @note Don't check for IF=1 here. The ret instruction will do this. */
691 /** @note It's dangerous to do this for 'normal' patches. the jump target might be inside the generated patch jump. (seen this!) */
692
693 /* 1: Clear PATM interrupt flag on entry. */
694 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
695 if (rc == VERR_NO_MEMORY)
696 return rc;
697 AssertRCReturn(rc, rc);
698
699 PATCHGEN_PROLOG(pVM, pPatch);
700 /* 2: We must push the target address onto the stack before appending the indirect call code. */
701
702 if (fIndirect)
703 {
704 Log(("patmPatchGenIndirectCall\n"));
705 Assert(pCpu->Param1.cb == 4);
706 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J);
707
708 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
709 * a page fault. The assembly code restores the stack afterwards.
710 */
711 offset = 0;
712 /* include prefix byte to make sure we don't use the incorrect selector register. */
713 if (pCpu->fPrefix & DISPREFIX_SEG)
714 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
715 pPB[offset++] = 0xFF; // push r/m32
716 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
717 i = 2; /* standard offset of modrm bytes */
718 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
719 i++; //skip operand prefix
720 if (pCpu->fPrefix & DISPREFIX_SEG)
721 i++; //skip segment prefix
722
723 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
724 AssertRCReturn(rc, rc);
725 offset += (pCpu->cbInstr - i);
726 }
727 else
728 {
729 AssertMsg(PATMIsPatchGCAddr(pVM, pTargetGC) == false, ("Target is already a patch address (%RRv)?!?\n", pTargetGC));
730 Assert(pTargetGC);
731 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J);
732
733 /** @todo wasting memory as the complex search is overkill and we need only one lookup slot... */
734
735 /* Relative call to patch code (patch to patch -> no fixup). */
736 Log(("PatchGenCall from %RRv (next=%RRv) to %RRv\n", pCurInstrGC, pCurInstrGC + pCpu->cbInstr, pTargetGC));
737
738 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
739 * a page fault. The assembly code restores the stack afterwards.
740 */
741 offset = 0;
742 pPB[offset++] = 0x68; // push %Iv
743 *(RTRCPTR *)&pPB[offset] = pTargetGC;
744 offset += sizeof(RTRCPTR);
745 }
746
747 /* align this block properly to make sure the jump table will not be misaligned. */
748 size = (RTHCUINTPTR)&pPB[offset] & 3;
749 if (size)
750 size = 4 - size;
751
752 for (i=0;i<size;i++)
753 {
754 pPB[offset++] = 0x90; /* nop */
755 }
756 PATCHGEN_EPILOG(pPatch, offset);
757
758 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
759 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
760 callInfo.pReturnGC = pCurInstrGC + pCpu->cbInstr;
761 callInfo.pTargetGC = (fIndirect) ? 0xDEADBEEF : pTargetGC;
762 size = patmPatchGenCode(pVM, pPatch, pPB, (fIndirect) ? &g_patmCallIndirectRecord : &g_patmCallRecord, 0, false, &callInfo);
763 PATCHGEN_EPILOG(pPatch, size);
764
765 /* Need to set PATM_INTERRUPTFLAG after the patched ret returns here. */
766 rc = patmPatchGenSetPIF(pVM, pPatch, pCurInstrGC);
767 if (rc == VERR_NO_MEMORY)
768 return rc;
769 AssertRCReturn(rc, rc);
770
771 STAM_COUNTER_INC(&pVM->patm.s.StatGenCall);
772 return VINF_SUCCESS;
773}
774
775/**
776 * Generate indirect jump to unknown destination
777 *
778 * @returns VBox status code.
779 * @param pVM Pointer to the VM.
780 * @param pPatch Patch record
781 * @param pCpu Disassembly state
782 * @param pCurInstrGC Current instruction address
783 */
784int patmPatchGenJump(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
785{
786 PATMCALLINFO callInfo;
787 uint32_t offset;
788 uint32_t i, size;
789 int rc;
790
791 /* 1: Clear PATM interrupt flag on entry. */
792 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
793 if (rc == VERR_NO_MEMORY)
794 return rc;
795 AssertRCReturn(rc, rc);
796
797 PATCHGEN_PROLOG(pVM, pPatch);
798 /* 2: We must push the target address onto the stack before appending the indirect call code. */
799
800 Log(("patmPatchGenIndirectJump\n"));
801 Assert(pCpu->Param1.cb == 4);
802 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J);
803
804 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
805 * a page fault. The assembly code restores the stack afterwards.
806 */
807 offset = 0;
808 /* include prefix byte to make sure we don't use the incorrect selector register. */
809 if (pCpu->fPrefix & DISPREFIX_SEG)
810 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
811
812 pPB[offset++] = 0xFF; // push r/m32
813 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
814 i = 2; /* standard offset of modrm bytes */
815 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
816 i++; //skip operand prefix
817 if (pCpu->fPrefix & DISPREFIX_SEG)
818 i++; //skip segment prefix
819
820 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
821 AssertRCReturn(rc, rc);
822 offset += (pCpu->cbInstr - i);
823
824 /* align this block properly to make sure the jump table will not be misaligned. */
825 size = (RTHCUINTPTR)&pPB[offset] & 3;
826 if (size)
827 size = 4 - size;
828
829 for (i=0;i<size;i++)
830 {
831 pPB[offset++] = 0x90; /* nop */
832 }
833 PATCHGEN_EPILOG(pPatch, offset);
834
835 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
836 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
837 callInfo.pReturnGC = pCurInstrGC + pCpu->cbInstr;
838 callInfo.pTargetGC = 0xDEADBEEF;
839 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmJumpIndirectRecord, 0, false, &callInfo);
840 PATCHGEN_EPILOG(pPatch, size);
841
842 STAM_COUNTER_INC(&pVM->patm.s.StatGenJump);
843 return VINF_SUCCESS;
844}
845
846/**
847 * Generate return instruction
848 *
849 * @returns VBox status code.
850 * @param pVM Pointer to the VM.
851 * @param pPatch Patch structure
852 * @param pCpu Disassembly struct
853 * @param pCurInstrGC Current instruction pointer
854 *
855 */
856int patmPatchGenRet(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
857{
858 int size = 0, rc;
859 RTRCPTR pPatchRetInstrGC;
860
861 /* Remember start of this patch for below. */
862 pPatchRetInstrGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
863
864 Log(("patmPatchGenRet %RRv\n", pCurInstrGC));
865
866 /** @note optimization: multiple identical ret instruction in a single patch can share a single patched ret. */
867 if ( pPatch->pTempInfo->pPatchRetInstrGC
868 && pPatch->pTempInfo->uPatchRetParam1 == (uint32_t)pCpu->Param1.uValue) /* nr of bytes popped off the stack should be identical of course! */
869 {
870 Assert(pCpu->pCurInstr->uOpcode == OP_RETN);
871 STAM_COUNTER_INC(&pVM->patm.s.StatGenRetReused);
872
873 return patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, pPatch->pTempInfo->pPatchRetInstrGC);
874 }
875
876 /* Jump back to the original instruction if IF is set again. */
877 Assert(!patmFindActivePatchByEntrypoint(pVM, pCurInstrGC));
878 rc = patmPatchGenCheckIF(pVM, pPatch, pCurInstrGC);
879 AssertRCReturn(rc, rc);
880
881 /* align this block properly to make sure the jump table will not be misaligned. */
882 PATCHGEN_PROLOG(pVM, pPatch);
883 size = (RTHCUINTPTR)pPB & 3;
884 if (size)
885 size = 4 - size;
886
887 for (int i=0;i<size;i++)
888 pPB[i] = 0x90; /* nop */
889 PATCHGEN_EPILOG(pPatch, size);
890
891 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
892 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmRetRecord, 0, false);
893 PATCHGEN_EPILOG(pPatch, size);
894
895 STAM_COUNTER_INC(&pVM->patm.s.StatGenRet);
896 /* Duplicate the ret or ret n instruction; it will use the PATM return address */
897 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
898
899 if (rc == VINF_SUCCESS)
900 {
901 pPatch->pTempInfo->pPatchRetInstrGC = pPatchRetInstrGC;
902 pPatch->pTempInfo->uPatchRetParam1 = pCpu->Param1.uValue;
903 }
904 return rc;
905}
906
907/**
908 * Generate all global patm functions
909 *
910 * @returns VBox status code.
911 * @param pVM Pointer to the VM.
912 * @param pPatch Patch structure
913 *
914 */
915int patmPatchGenGlobalFunctions(PVM pVM, PPATCHINFO pPatch)
916{
917 int size = 0;
918
919 pVM->patm.s.pfnHelperCallGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
920 PATCHGEN_PROLOG(pVM, pPatch);
921 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmLookupAndCallRecord, 0, false);
922 PATCHGEN_EPILOG(pPatch, size);
923
924 /* Round to next 8 byte boundary. */
925 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
926
927 pVM->patm.s.pfnHelperRetGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
928 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
929 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmRetFunctionRecord, 0, false);
930 PATCHGEN_EPILOG(pPatch, size);
931
932 /* Round to next 8 byte boundary. */
933 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
934
935 pVM->patm.s.pfnHelperJumpGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
936 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
937 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmLookupAndJumpRecord, 0, false);
938 PATCHGEN_EPILOG(pPatch, size);
939
940 /* Round to next 8 byte boundary. */
941 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
942
943 pVM->patm.s.pfnHelperIretGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
944 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
945 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmIretFunctionRecord, 0, false);
946 PATCHGEN_EPILOG(pPatch, size);
947
948 Log(("pfnHelperCallGC %RRv\n", pVM->patm.s.pfnHelperCallGC));
949 Log(("pfnHelperRetGC %RRv\n", pVM->patm.s.pfnHelperRetGC));
950 Log(("pfnHelperJumpGC %RRv\n", pVM->patm.s.pfnHelperJumpGC));
951 Log(("pfnHelperIretGC %RRv\n", pVM->patm.s.pfnHelperIretGC));
952
953 return VINF_SUCCESS;
954}
955
956/**
957 * Generate illegal instruction (int 3)
958 *
959 * @returns VBox status code.
960 * @param pVM Pointer to the VM.
961 * @param pPatch Patch structure
962 *
963 */
964int patmPatchGenIllegalInstr(PVM pVM, PPATCHINFO pPatch)
965{
966 PATCHGEN_PROLOG(pVM, pPatch);
967
968 pPB[0] = 0xCC;
969
970 PATCHGEN_EPILOG(pPatch, 1);
971 return VINF_SUCCESS;
972}
973
974/**
975 * Check virtual IF flag and jump back to original guest code if set
976 *
977 * @returns VBox status code.
978 * @param pVM Pointer to the VM.
979 * @param pPatch Patch structure
980 * @param pCurInstrGC Guest context pointer to the current instruction
981 *
982 */
983int patmPatchGenCheckIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
984{
985 uint32_t size;
986
987 PATCHGEN_PROLOG(pVM, pPatch);
988
989 /* Add lookup record for patch to guest address translation */
990 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
991
992 /* Generate code to check for IF=1 before executing the call to the duplicated function. */
993 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmCheckIFRecord, pCurInstrGC, true);
994
995 PATCHGEN_EPILOG(pPatch, size);
996 return VINF_SUCCESS;
997}
998
999/**
1000 * Set PATM interrupt flag
1001 *
1002 * @returns VBox status code.
1003 * @param pVM Pointer to the VM.
1004 * @param pPatch Patch structure
1005 * @param pInstrGC Corresponding guest instruction
1006 *
1007 */
1008int patmPatchGenSetPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1009{
1010 PATCHGEN_PROLOG(pVM, pPatch);
1011
1012 /* Add lookup record for patch to guest address translation */
1013 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1014
1015 int size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmSetPIFRecord, 0, false);
1016 PATCHGEN_EPILOG(pPatch, size);
1017 return VINF_SUCCESS;
1018}
1019
1020/**
1021 * Clear PATM interrupt flag
1022 *
1023 * @returns VBox status code.
1024 * @param pVM Pointer to the VM.
1025 * @param pPatch Patch structure
1026 * @param pInstrGC Corresponding guest instruction
1027 *
1028 */
1029int patmPatchGenClearPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1030{
1031 PATCHGEN_PROLOG(pVM, pPatch);
1032
1033 /* Add lookup record for patch to guest address translation */
1034 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1035
1036 int size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmClearPIFRecord, 0, false);
1037 PATCHGEN_EPILOG(pPatch, size);
1038 return VINF_SUCCESS;
1039}
1040
1041
1042/**
1043 * Clear PATM inhibit irq flag
1044 *
1045 * @returns VBox status code.
1046 * @param pVM Pointer to the VM.
1047 * @param pPatch Patch structure
1048 * @param pNextInstrGC Next guest instruction
1049 */
1050int patmPatchGenClearInhibitIRQ(PVM pVM, PPATCHINFO pPatch, RTRCPTR pNextInstrGC)
1051{
1052 int size;
1053 PATMCALLINFO callInfo;
1054
1055 PATCHGEN_PROLOG(pVM, pPatch);
1056
1057 Assert((pPatch->flags & (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION)) != (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION));
1058
1059 /* Add lookup record for patch to guest address translation */
1060 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pNextInstrGC, PATM_LOOKUP_PATCH2GUEST);
1061
1062 callInfo.pNextInstrGC = pNextInstrGC;
1063
1064 if (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1065 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmClearInhibitIRQContIF0Record, 0, false, &callInfo);
1066 else
1067 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmClearInhibitIRQFaultIF0Record, 0, false, &callInfo);
1068
1069 PATCHGEN_EPILOG(pPatch, size);
1070 return VINF_SUCCESS;
1071}
1072
1073/**
1074 * Generate an interrupt handler entrypoint
1075 *
1076 * @returns VBox status code.
1077 * @param pVM Pointer to the VM.
1078 * @param pPatch Patch record
1079 * @param pIntHandlerGC IDT handler address
1080 *
1081 ** @todo must check if virtual IF is already cleared on entry!!!!!!!!!!!!!!!!!!!!!!!
1082 */
1083int patmPatchGenIntEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pIntHandlerGC)
1084{
1085 int rc = VINF_SUCCESS;
1086
1087 if (!EMIsRawRing1Enabled(pVM)) /* direct passthru of interrupts is not allowed in the ring-1 support case as we can't
1088 deal with the ring-1/2 ambiguity in the patm asm code and we don't need it either as
1089 TRPMForwardTrap takes care of the details. */
1090 {
1091 uint32_t size;
1092 PATCHGEN_PROLOG(pVM, pPatch);
1093
1094 /* Add lookup record for patch to guest address translation */
1095 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1096
1097 /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
1098 size = patmPatchGenCode(pVM, pPatch, pPB,
1099 (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &g_patmIntEntryRecordErrorCode : &g_patmIntEntryRecord,
1100 0, false);
1101
1102 PATCHGEN_EPILOG(pPatch, size);
1103 }
1104
1105 // Interrupt gates set IF to 0
1106 rc = patmPatchGenCli(pVM, pPatch);
1107 AssertRCReturn(rc, rc);
1108
1109 return rc;
1110}
1111
1112/**
1113 * Generate a trap handler entrypoint
1114 *
1115 * @returns VBox status code.
1116 * @param pVM Pointer to the VM.
1117 * @param pPatch Patch record
1118 * @param pTrapHandlerGC IDT handler address
1119 */
1120int patmPatchGenTrapEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTrapHandlerGC)
1121{
1122 uint32_t size;
1123
1124 Assert(!EMIsRawRing1Enabled(pVM));
1125
1126 PATCHGEN_PROLOG(pVM, pPatch);
1127
1128 /* Add lookup record for patch to guest address translation */
1129 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pTrapHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1130
1131 /* Generate entrypoint for the trap handler (correcting CS in the interrupt stack frame) */
1132 size = patmPatchGenCode(pVM, pPatch, pPB,
1133 (pPatch->flags & PATMFL_TRAPHANDLER_WITH_ERRORCODE) ? &g_patmTrapEntryRecordErrorCode : &g_patmTrapEntryRecord,
1134 pTrapHandlerGC, true);
1135 PATCHGEN_EPILOG(pPatch, size);
1136
1137 return VINF_SUCCESS;
1138}
1139
1140#ifdef VBOX_WITH_STATISTICS
1141int patmPatchGenStats(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1142{
1143 uint32_t size;
1144
1145 PATCHGEN_PROLOG(pVM, pPatch);
1146
1147 /* Add lookup record for stats code -> guest handler. */
1148 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1149
1150 /* Generate code to keep calling statistics for this patch */
1151 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmStatsRecord, pInstrGC, false);
1152 PATCHGEN_EPILOG(pPatch, size);
1153
1154 return VINF_SUCCESS;
1155}
1156#endif
1157
1158/**
1159 * Debug register moves to or from general purpose registers
1160 * mov GPR, DRx
1161 * mov DRx, GPR
1162 *
1163 * @todo: if we ever want to support hardware debug registers natively, then
1164 * this will need to be changed!
1165 */
1166int patmPatchGenMovDebug(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1167{
1168 int rc = VINF_SUCCESS;
1169 unsigned reg, mod, rm, dbgreg;
1170 uint32_t offset;
1171
1172 PATCHGEN_PROLOG(pVM, pPatch);
1173
1174 mod = 0; //effective address (only)
1175 rm = 5; //disp32
1176 if (pCpu->pCurInstr->fParam1 == OP_PARM_Dd)
1177 {
1178 Assert(0); // You not come here. Illegal!
1179
1180 // mov DRx, GPR
1181 pPB[0] = 0x89; //mov disp32, GPR
1182 Assert(pCpu->Param1.fUse & DISUSE_REG_DBG);
1183 Assert(pCpu->Param2.fUse & DISUSE_REG_GEN32);
1184
1185 dbgreg = pCpu->Param1.Base.idxDbgReg;
1186 reg = pCpu->Param2.Base.idxGenReg;
1187 }
1188 else
1189 {
1190 // mov GPR, DRx
1191 Assert(pCpu->Param1.fUse & DISUSE_REG_GEN32);
1192 Assert(pCpu->Param2.fUse & DISUSE_REG_DBG);
1193
1194 pPB[0] = 0x8B; // mov GPR, disp32
1195 reg = pCpu->Param1.Base.idxGenReg;
1196 dbgreg = pCpu->Param2.Base.idxDbgReg;
1197 }
1198
1199 pPB[1] = MAKE_MODRM(mod, reg, rm);
1200
1201 AssertReturn(dbgreg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1202 offset = RT_OFFSETOF(CPUMCTX, dr[dbgreg]);
1203
1204 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1205 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1206
1207 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
1208 return rc;
1209}
1210
1211/*
1212 * Control register moves to or from general purpose registers
1213 * mov GPR, CRx
1214 * mov CRx, GPR
1215 */
1216int patmPatchGenMovControl(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1217{
1218 int rc = VINF_SUCCESS;
1219 int reg, mod, rm, ctrlreg;
1220 uint32_t offset;
1221
1222 PATCHGEN_PROLOG(pVM, pPatch);
1223
1224 mod = 0; //effective address (only)
1225 rm = 5; //disp32
1226 if (pCpu->pCurInstr->fParam1 == OP_PARM_Cd)
1227 {
1228 Assert(0); // You not come here. Illegal!
1229
1230 // mov CRx, GPR
1231 pPB[0] = 0x89; //mov disp32, GPR
1232 ctrlreg = pCpu->Param1.Base.idxCtrlReg;
1233 reg = pCpu->Param2.Base.idxGenReg;
1234 Assert(pCpu->Param1.fUse & DISUSE_REG_CR);
1235 Assert(pCpu->Param2.fUse & DISUSE_REG_GEN32);
1236 }
1237 else
1238 {
1239 // mov GPR, CRx
1240 Assert(pCpu->Param1.fUse & DISUSE_REG_GEN32);
1241 Assert(pCpu->Param2.fUse & DISUSE_REG_CR);
1242
1243 pPB[0] = 0x8B; // mov GPR, disp32
1244 reg = pCpu->Param1.Base.idxGenReg;
1245 ctrlreg = pCpu->Param2.Base.idxCtrlReg;
1246 }
1247
1248 pPB[1] = MAKE_MODRM(mod, reg, rm);
1249
1250 /// @todo: make this an array in the context structure
1251 switch (ctrlreg)
1252 {
1253 case DISCREG_CR0:
1254 offset = RT_OFFSETOF(CPUMCTX, cr0);
1255 break;
1256 case DISCREG_CR2:
1257 offset = RT_OFFSETOF(CPUMCTX, cr2);
1258 break;
1259 case DISCREG_CR3:
1260 offset = RT_OFFSETOF(CPUMCTX, cr3);
1261 break;
1262 case DISCREG_CR4:
1263 offset = RT_OFFSETOF(CPUMCTX, cr4);
1264 break;
1265 default: /* Shut up compiler warning. */
1266 AssertFailed();
1267 offset = 0;
1268 break;
1269 }
1270 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1271 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1272
1273 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
1274 return rc;
1275}
1276
1277/*
1278 * mov GPR, SS
1279 */
1280int patmPatchGenMovFromSS(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1281{
1282 uint32_t size, offset;
1283
1284 Log(("patmPatchGenMovFromSS %RRv\n", pCurInstrGC));
1285
1286 Assert(pPatch->flags & PATMFL_CODE32);
1287
1288 PATCHGEN_PROLOG(pVM, pPatch);
1289 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmClearPIFRecord, 0, false);
1290 PATCHGEN_EPILOG(pPatch, size);
1291
1292 /* push ss */
1293 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1294 offset = 0;
1295 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
1296 pPB[offset++] = 0x66; /* size override -> 16 bits push */
1297 pPB[offset++] = 0x16;
1298 PATCHGEN_EPILOG(pPatch, offset);
1299
1300 /* checks and corrects RPL of pushed ss*/
1301 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1302 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmMovFromSSRecord, 0, false);
1303 PATCHGEN_EPILOG(pPatch, size);
1304
1305 /* pop general purpose register */
1306 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1307 offset = 0;
1308 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
1309 pPB[offset++] = 0x66; /* size override -> 16 bits pop */
1310 pPB[offset++] = 0x58 + pCpu->Param1.Base.idxGenReg;
1311 PATCHGEN_EPILOG(pPatch, offset);
1312
1313
1314 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1315 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmSetPIFRecord, 0, false);
1316 PATCHGEN_EPILOG(pPatch, size);
1317
1318 return VINF_SUCCESS;
1319}
1320
1321
1322/**
1323 * Generate an sldt or str patch instruction
1324 *
1325 * @returns VBox status code.
1326 * @param pVM Pointer to the VM.
1327 * @param pPatch Patch record
1328 * @param pCpu Disassembly state
1329 * @param pCurInstrGC Guest instruction address
1330 */
1331int patmPatchGenSldtStr(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1332{
1333 // sldt %Ew
1334 int rc = VINF_SUCCESS;
1335 uint32_t offset = 0;
1336 uint32_t i;
1337
1338 /** @todo segment prefix (untested) */
1339 Assert(pCpu->fPrefix == DISPREFIX_NONE || pCpu->fPrefix == DISPREFIX_OPSIZE);
1340
1341 PATCHGEN_PROLOG(pVM, pPatch);
1342
1343 if (pCpu->Param1.fUse == DISUSE_REG_GEN32 || pCpu->Param1.fUse == DISUSE_REG_GEN16)
1344 {
1345 /* Register operand */
1346 // 8B 15 [32 bits addr] mov edx, CPUMCTX.tr/ldtr
1347
1348 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1349 pPB[offset++] = 0x66;
1350
1351 pPB[offset++] = 0x8B; // mov destreg, CPUMCTX.tr/ldtr
1352 /* Modify REG part according to destination of original instruction */
1353 pPB[offset++] = MAKE_MODRM(0, pCpu->Param1.Base.idxGenReg, 5);
1354 if (pCpu->pCurInstr->uOpcode == OP_STR)
1355 {
1356 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1357 }
1358 else
1359 {
1360 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1361 }
1362 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1363 offset += sizeof(RTRCPTR);
1364 }
1365 else
1366 {
1367 /* Memory operand */
1368 //50 push eax
1369 //52 push edx
1370 //8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1371 //66 A1 48 7C 42 00 mov ax, CPUMCTX.tr/ldtr
1372 //66 89 02 mov word ptr [edx],ax
1373 //5A pop edx
1374 //58 pop eax
1375
1376 pPB[offset++] = 0x50; // push eax
1377 pPB[offset++] = 0x52; // push edx
1378
1379 if (pCpu->fPrefix == DISPREFIX_SEG)
1380 {
1381 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1382 }
1383 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1384 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1385 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, DISGREG_EDX , pCpu->ModRM.Bits.Rm);
1386
1387 i = 3; /* standard offset of modrm bytes */
1388 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1389 i++; //skip operand prefix
1390 if (pCpu->fPrefix == DISPREFIX_SEG)
1391 i++; //skip segment prefix
1392
1393 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
1394 AssertRCReturn(rc, rc);
1395 offset += (pCpu->cbInstr - i);
1396
1397 pPB[offset++] = 0x66; // mov ax, CPUMCTX.tr/ldtr
1398 pPB[offset++] = 0xA1;
1399 if (pCpu->pCurInstr->uOpcode == OP_STR)
1400 {
1401 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1402 }
1403 else
1404 {
1405 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1406 }
1407 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1408 offset += sizeof(RTRCPTR);
1409
1410 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1411 pPB[offset++] = 0x89;
1412 pPB[offset++] = 0x02;
1413
1414 pPB[offset++] = 0x5A; // pop edx
1415 pPB[offset++] = 0x58; // pop eax
1416 }
1417
1418 PATCHGEN_EPILOG(pPatch, offset);
1419
1420 return rc;
1421}
1422
1423/**
1424 * Generate an sgdt or sidt patch instruction
1425 *
1426 * @returns VBox status code.
1427 * @param pVM Pointer to the VM.
1428 * @param pPatch Patch record
1429 * @param pCpu Disassembly state
1430 * @param pCurInstrGC Guest instruction address
1431 */
1432int patmPatchGenSxDT(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1433{
1434 int rc = VINF_SUCCESS;
1435 uint32_t offset = 0, offset_base, offset_limit;
1436 uint32_t i;
1437
1438 /* @todo segment prefix (untested) */
1439 Assert(pCpu->fPrefix == DISPREFIX_NONE);
1440
1441 // sgdt %Ms
1442 // sidt %Ms
1443
1444 switch (pCpu->pCurInstr->uOpcode)
1445 {
1446 case OP_SGDT:
1447 offset_base = RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1448 offset_limit = RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1449 break;
1450
1451 case OP_SIDT:
1452 offset_base = RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1453 offset_limit = RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1454 break;
1455
1456 default:
1457 return VERR_INVALID_PARAMETER;
1458 }
1459
1460//50 push eax
1461//52 push edx
1462//8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1463//66 A1 48 7C 42 00 mov ax, CPUMCTX.gdtr.limit
1464//66 89 02 mov word ptr [edx],ax
1465//A1 48 7C 42 00 mov eax, CPUMCTX.gdtr.base
1466//89 42 02 mov dword ptr [edx+2],eax
1467//5A pop edx
1468//58 pop eax
1469
1470 PATCHGEN_PROLOG(pVM, pPatch);
1471 pPB[offset++] = 0x50; // push eax
1472 pPB[offset++] = 0x52; // push edx
1473
1474 if (pCpu->fPrefix == DISPREFIX_SEG)
1475 {
1476 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1477 }
1478 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1479 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1480 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, DISGREG_EDX , pCpu->ModRM.Bits.Rm);
1481
1482 i = 3; /* standard offset of modrm bytes */
1483 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1484 i++; //skip operand prefix
1485 if (pCpu->fPrefix == DISPREFIX_SEG)
1486 i++; //skip segment prefix
1487 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
1488 AssertRCReturn(rc, rc);
1489 offset += (pCpu->cbInstr - i);
1490
1491 pPB[offset++] = 0x66; // mov ax, CPUMCTX.gdtr.limit
1492 pPB[offset++] = 0xA1;
1493 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_limit;
1494 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1495 offset += sizeof(RTRCPTR);
1496
1497 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1498 pPB[offset++] = 0x89;
1499 pPB[offset++] = 0x02;
1500
1501 pPB[offset++] = 0xA1; // mov eax, CPUMCTX.gdtr.base
1502 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_base;
1503 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1504 offset += sizeof(RTRCPTR);
1505
1506 pPB[offset++] = 0x89; // mov dword ptr [edx+2],eax
1507 pPB[offset++] = 0x42;
1508 pPB[offset++] = 0x02;
1509
1510 pPB[offset++] = 0x5A; // pop edx
1511 pPB[offset++] = 0x58; // pop eax
1512
1513 PATCHGEN_EPILOG(pPatch, offset);
1514
1515 return rc;
1516}
1517
1518/**
1519 * Generate a cpuid patch instruction
1520 *
1521 * @returns VBox status code.
1522 * @param pVM Pointer to the VM.
1523 * @param pPatch Patch record
1524 * @param pCurInstrGC Guest instruction address
1525 */
1526int patmPatchGenCpuid(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
1527{
1528 uint32_t size;
1529 PATCHGEN_PROLOG(pVM, pPatch);
1530
1531 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmCpuidRecord, 0, false);
1532
1533 PATCHGEN_EPILOG(pPatch, size);
1534 NOREF(pCurInstrGC);
1535 return VINF_SUCCESS;
1536}
1537
1538/**
1539 * Generate the jump from guest to patch code
1540 *
1541 * @returns VBox status code.
1542 * @param pVM Pointer to the VM.
1543 * @param pPatch Patch record
1544 * @param pTargetGC Guest target jump
1545 * @param fClearInhibitIRQs Clear inhibit irq flag
1546 */
1547int patmPatchGenJumpToGuest(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fClearInhibitIRQs)
1548{
1549 int rc = VINF_SUCCESS;
1550 uint32_t size;
1551
1552 if (fClearInhibitIRQs)
1553 {
1554 rc = patmPatchGenClearInhibitIRQ(pVM, pPatch, pReturnAddrGC);
1555 if (rc == VERR_NO_MEMORY)
1556 return rc;
1557 AssertRCReturn(rc, rc);
1558 }
1559
1560 PATCHGEN_PROLOG(pVM, pPatch);
1561
1562 /* Add lookup record for patch to guest address translation */
1563 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
1564
1565 /* Generate code to jump to guest code if IF=1, else fault. */
1566 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpToGuest_IF1Record, pReturnAddrGC, true);
1567 PATCHGEN_EPILOG(pPatch, size);
1568
1569 return rc;
1570}
1571
1572/*
1573 * Relative jump from patch code to patch code (no fixup required)
1574 */
1575int patmPatchGenPatchJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RCPTRTYPE(uint8_t *) pPatchAddrGC, bool fAddLookupRecord)
1576{
1577 int32_t displ;
1578 int rc = VINF_SUCCESS;
1579
1580 Assert(PATMIsPatchGCAddr(pVM, pPatchAddrGC));
1581 PATCHGEN_PROLOG(pVM, pPatch);
1582
1583 if (fAddLookupRecord)
1584 {
1585 /* Add lookup record for patch to guest address translation */
1586 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
1587 }
1588
1589 pPB[0] = 0xE9; //JMP
1590
1591 displ = pPatchAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + SIZEOF_NEARJUMP32);
1592
1593 *(uint32_t *)&pPB[1] = displ;
1594
1595 PATCHGEN_EPILOG(pPatch, SIZEOF_NEARJUMP32);
1596
1597 return rc;
1598}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette