VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp@ 48568

Last change on this file since 48568 was 46150, checked in by vboxsync, 12 years ago

PATM: Patch symbols.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 52.8 KB
Line 
1/* $Id: PATMPatch.cpp 46150 2013-05-17 17:21:45Z vboxsync $ */
2/** @file
3 * PATMPatch - Dynamic Guest OS Instruction patches
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2013 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/trpm.h>
30#include <VBox/vmm/csam.h>
31#include "PATMInternal.h"
32#include <VBox/vmm/vm.h>
33#include <VBox/param.h>
34
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/dis.h>
38#include <VBox/disopcode.h>
39
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43
44#include "PATMA.h"
45#include "PATMPatch.h"
46
47/* internal structure for passing more information about call fixups to patmPatchGenCode */
48typedef struct
49{
50 RTRCPTR pTargetGC;
51 RTRCPTR pCurInstrGC;
52 RTRCPTR pNextInstrGC;
53 RTRCPTR pReturnGC;
54} PATMCALLINFO, *PPATMCALLINFO;
55
56int patmPatchAddReloc32(PVM pVM, PPATCHINFO pPatch, uint8_t *pRelocHC, uint32_t uType, RTRCPTR pSource, RTRCPTR pDest)
57{
58 PRELOCREC pRec;
59
60 Assert(uType == FIXUP_ABSOLUTE || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
61
62 LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%RRv source=%RRv dest=%RRv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest));
63
64 pRec = (PRELOCREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
65 Assert(pRec);
66 pRec->Core.Key = (AVLPVKEY)pRelocHC;
67 pRec->pRelocPos = pRelocHC; /* @todo redundant. */
68 pRec->pSource = pSource;
69 pRec->pDest = pDest;
70 pRec->uType = uType;
71
72 bool ret = RTAvlPVInsert(&pPatch->FixupTree, &pRec->Core);
73 Assert(ret); NOREF(ret);
74 pPatch->nrFixups++;
75
76 return VINF_SUCCESS;
77}
78
79int patmPatchAddJump(PVM pVM, PPATCHINFO pPatch, uint8_t *pJumpHC, uint32_t offset, RTRCPTR pTargetGC, uint32_t opcode)
80{
81 PJUMPREC pRec;
82
83 pRec = (PJUMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
84 Assert(pRec);
85
86 pRec->Core.Key = (AVLPVKEY)pJumpHC;
87 pRec->pJumpHC = pJumpHC; /* @todo redundant. */
88 pRec->offDispl = offset;
89 pRec->pTargetGC = pTargetGC;
90 pRec->opcode = opcode;
91
92 bool ret = RTAvlPVInsert(&pPatch->JumpTree, &pRec->Core);
93 Assert(ret); NOREF(ret);
94 pPatch->nrJumpRecs++;
95
96 return VINF_SUCCESS;
97}
98
99#define PATCHGEN_PROLOG_NODEF(pVM, pPatch) \
100 pPB = PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset; \
101 \
102 if (pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) \
103 { \
104 pVM->patm.s.fOutOfMemory = true; \
105 Assert(pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem); \
106 return VERR_NO_MEMORY; \
107 }
108
109#define PATCHGEN_PROLOG(pVM, pPatch) \
110 uint8_t *pPB; \
111 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
112
113
114#define PATCHGEN_EPILOG(pPatch, size) \
115 Assert(size <= 640); \
116 pPatch->uCurPatchOffset += size;
117
118
119static uint32_t patmPatchGenCode(PVM pVM, PPATCHINFO pPatch, uint8_t *pPB, PPATCHASMRECORD pAsmRecord, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fGenJump,
120 PPATMCALLINFO pCallInfo = 0)
121{
122 uint32_t i, j;
123
124 Assert(fGenJump == false || pReturnAddrGC);
125 Assert(fGenJump == false || pAsmRecord->offJump);
126 Assert(pAsmRecord && pAsmRecord->size > sizeof(pAsmRecord->uReloc[0]));
127
128 // Copy the code block
129 memcpy(pPB, pAsmRecord->pFunction, pAsmRecord->size);
130
131 // Process all fixups
132 for (j=0,i=0;i<pAsmRecord->nrRelocs*2; i+=2)
133 {
134 for (;j<pAsmRecord->size;j++)
135 {
136 if (*(uint32_t*)&pPB[j] == pAsmRecord->uReloc[i])
137 {
138 RCPTRTYPE(uint32_t *) dest;
139
140#ifdef VBOX_STRICT
141 if (pAsmRecord->uReloc[i] == PATM_FIXUP)
142 Assert(pAsmRecord->uReloc[i+1] != 0);
143 else
144 Assert(pAsmRecord->uReloc[i+1] == 0);
145#endif
146
147 /**
148 * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING A SAVED STATE WITH
149 * A DIFFERENT HYPERVISOR LAYOUT.
150 */
151 switch (pAsmRecord->uReloc[i])
152 {
153 case PATM_VMFLAGS:
154 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
155 break;
156
157 case PATM_PENDINGACTION:
158 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
159 break;
160
161 case PATM_FIXUP:
162 /* Offset in uReloc[i+1] is from the base of the function. */
163 dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->uReloc[i+1] + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC);
164 break;
165#ifdef VBOX_WITH_STATISTICS
166 case PATM_ALLPATCHCALLS:
167 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
168 break;
169
170 case PATM_IRETEFLAGS:
171 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
172 break;
173
174 case PATM_IRETCS:
175 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
176 break;
177
178 case PATM_IRETEIP:
179 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
180 break;
181
182 case PATM_PERPATCHCALLS:
183 dest = patmPatchQueryStatAddress(pVM, pPatch);
184 break;
185#endif
186 case PATM_STACKPTR:
187 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
188 break;
189
190 /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
191 * part to store the original return addresses.
192 */
193 case PATM_STACKBASE:
194 dest = pVM->patm.s.pGCStackGC;
195 break;
196
197 case PATM_STACKBASE_GUEST:
198 dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
199 break;
200
201 case PATM_RETURNADDR: /* absolute guest address; no fixup required */
202 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
203 dest = pCallInfo->pReturnGC;
204 break;
205
206 case PATM_PATCHNEXTBLOCK: /* relative address of instruction following this block */
207 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
208
209 /** @note hardcoded assumption that we must return to the instruction following this block */
210 dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->size;
211 break;
212
213 case PATM_CALLTARGET: /* relative to patch address; no fixup required */
214 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
215
216 /* Address must be filled in later. (see patmr3SetBranchTargets) */
217 patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
218 dest = PATM_ILLEGAL_DESTINATION;
219 break;
220
221 case PATM_PATCHBASE: /* Patch GC base address */
222 dest = pVM->patm.s.pPatchMemGC;
223 break;
224
225 case PATM_CPUID_STD_PTR:
226 /* @todo dirty hack when correcting this fixup (state restore) */
227 dest = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
228 break;
229
230 case PATM_CPUID_EXT_PTR:
231 /* @todo dirty hack when correcting this fixup (state restore) */
232 dest = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
233 break;
234
235 case PATM_CPUID_CENTAUR_PTR:
236 /* @todo dirty hack when correcting this fixup (state restore) */
237 dest = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
238 break;
239
240 case PATM_CPUID_DEF_PTR:
241 /* @todo dirty hack when correcting this fixup (state restore) */
242 dest = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
243 break;
244
245 case PATM_CPUID_STD_MAX:
246 dest = CPUMGetGuestCpuIdStdMax(pVM);
247 break;
248
249 case PATM_CPUID_EXT_MAX:
250 dest = CPUMGetGuestCpuIdExtMax(pVM);
251 break;
252
253 case PATM_CPUID_CENTAUR_MAX:
254 dest = CPUMGetGuestCpuIdCentaurMax(pVM);
255 break;
256
257 case PATM_INTERRUPTFLAG:
258 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
259 break;
260
261 case PATM_INHIBITIRQADDR:
262 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
263 break;
264
265 case PATM_NEXTINSTRADDR:
266 Assert(pCallInfo);
267 /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
268 dest = pCallInfo->pNextInstrGC;
269 break;
270
271 case PATM_CURINSTRADDR:
272 Assert(pCallInfo);
273 dest = pCallInfo->pCurInstrGC;
274 break;
275
276 case PATM_VM_FORCEDACTIONS:
277 /* @todo dirty assumptions when correcting this fixup during saved state loading. */
278 dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
279 break;
280
281 case PATM_TEMP_EAX:
282 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
283 break;
284 case PATM_TEMP_ECX:
285 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
286 break;
287 case PATM_TEMP_EDI:
288 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
289 break;
290 case PATM_TEMP_EFLAGS:
291 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
292 break;
293 case PATM_TEMP_RESTORE_FLAGS:
294 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
295 break;
296 case PATM_CALL_PATCH_TARGET_ADDR:
297 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr);
298 break;
299 case PATM_CALL_RETURN_ADDR:
300 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr);
301 break;
302
303 /* Relative address of global patm lookup and call function. */
304 case PATM_LOOKUP_AND_CALL_FUNCTION:
305 {
306 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
307 Assert(pVM->patm.s.pfnHelperCallGC);
308 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
309
310 /* Relative value is target minus address of instruction after the actual call instruction. */
311 dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
312 break;
313 }
314
315 case PATM_RETURN_FUNCTION:
316 {
317 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
318 Assert(pVM->patm.s.pfnHelperRetGC);
319 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
320
321 /* Relative value is target minus address of instruction after the actual call instruction. */
322 dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
323 break;
324 }
325
326 case PATM_IRET_FUNCTION:
327 {
328 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
329 Assert(pVM->patm.s.pfnHelperIretGC);
330 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
331
332 /* Relative value is target minus address of instruction after the actual call instruction. */
333 dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
334 break;
335 }
336
337 case PATM_LOOKUP_AND_JUMP_FUNCTION:
338 {
339 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
340 Assert(pVM->patm.s.pfnHelperJumpGC);
341 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
342
343 /* Relative value is target minus address of instruction after the actual call instruction. */
344 dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
345 break;
346 }
347
348 default:
349 dest = PATM_ILLEGAL_DESTINATION;
350 AssertRelease(0);
351 break;
352 }
353
354 *(RTRCPTR *)&pPB[j] = dest;
355 if (pAsmRecord->uReloc[i] < PATM_NO_FIXUP)
356 {
357 patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE);
358 }
359 break;
360 }
361 }
362 Assert(j < pAsmRecord->size);
363 }
364 Assert(pAsmRecord->uReloc[i] == 0xffffffff);
365
366 /* Add the jump back to guest code (if required) */
367 if (fGenJump)
368 {
369 int32_t displ = pReturnAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32);
370
371 /* Add lookup record for patch to guest address translation */
372 Assert(pPB[pAsmRecord->offJump - 1] == 0xE9);
373 patmR3AddP2GLookupRecord(pVM, pPatch, &pPB[pAsmRecord->offJump - 1], pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
374
375 *(uint32_t *)&pPB[pAsmRecord->offJump] = displ;
376 patmPatchAddReloc32(pVM, pPatch, &pPB[pAsmRecord->offJump], FIXUP_REL_JMPTOGUEST,
377 PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32,
378 pReturnAddrGC);
379 }
380
381 // Calculate the right size of this patch block
382 if ((fGenJump && pAsmRecord->offJump) || (!fGenJump && !pAsmRecord->offJump))
383 {
384 return pAsmRecord->size;
385 }
386 else {
387 // if a jump instruction is present and we don't want one, then subtract SIZEOF_NEARJUMP32
388 return pAsmRecord->size - SIZEOF_NEARJUMP32;
389 }
390}
391
392/* Read bytes and check for overwritten instructions. */
393static int patmPatchReadBytes(PVM pVM, uint8_t *pDest, RTRCPTR pSrc, uint32_t cb)
394{
395 int rc = PGMPhysSimpleReadGCPtr(&pVM->aCpus[0], pDest, pSrc, cb);
396 AssertRCReturn(rc, rc);
397 /*
398 * Could be patched already; make sure this is checked!
399 */
400 for (uint32_t i=0;i<cb;i++)
401 {
402 uint8_t temp;
403
404 int rc2 = PATMR3QueryOpcode(pVM, pSrc+i, &temp);
405 if (RT_SUCCESS(rc2))
406 {
407 pDest[i] = temp;
408 }
409 else
410 break; /* no more */
411 }
412 return VINF_SUCCESS;
413}
414
415int patmPatchGenDuplicate(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
416{
417 int rc = VINF_SUCCESS;
418 PATCHGEN_PROLOG(pVM, pPatch);
419
420 uint32_t const cbInstrShutUpGcc = pCpu->cbInstr;
421 rc = patmPatchReadBytes(pVM, pPB, pCurInstrGC, cbInstrShutUpGcc);
422 AssertRC(rc);
423 PATCHGEN_EPILOG(pPatch, cbInstrShutUpGcc);
424 return rc;
425}
426
427int patmPatchGenIret(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, bool fSizeOverride)
428{
429 uint32_t size;
430 PATMCALLINFO callInfo;
431
432 PATCHGEN_PROLOG(pVM, pPatch);
433
434 AssertMsg(fSizeOverride == false, ("operand size override!!\n"));
435 callInfo.pCurInstrGC = pCurInstrGC;
436
437 if (EMIsRawRing1Enabled(pVM))
438 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRing1Record, 0, false, &callInfo);
439 else
440 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRecord, 0, false, &callInfo);
441
442 PATCHGEN_EPILOG(pPatch, size);
443 return VINF_SUCCESS;
444}
445
446int patmPatchGenCli(PVM pVM, PPATCHINFO pPatch)
447{
448 uint32_t size;
449 PATCHGEN_PROLOG(pVM, pPatch);
450
451 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCliRecord, 0, false);
452
453 PATCHGEN_EPILOG(pPatch, size);
454 return VINF_SUCCESS;
455}
456
457/*
458 * Generate an STI patch
459 */
460int patmPatchGenSti(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RTRCPTR pNextInstrGC)
461{
462 PATMCALLINFO callInfo;
463 uint32_t size;
464
465 Log(("patmPatchGenSti at %RRv; next %RRv\n", pCurInstrGC, pNextInstrGC));
466 PATCHGEN_PROLOG(pVM, pPatch);
467 callInfo.pNextInstrGC = pNextInstrGC;
468 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMStiRecord, 0, false, &callInfo);
469 PATCHGEN_EPILOG(pPatch, size);
470
471 return VINF_SUCCESS;
472}
473
474
475int patmPatchGenPopf(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fSizeOverride, bool fGenJumpBack)
476{
477 uint32_t size;
478 PATMCALLINFO callInfo;
479
480 PATCHGEN_PROLOG(pVM, pPatch);
481
482 callInfo.pNextInstrGC = pReturnAddrGC;
483
484 Log(("patmPatchGenPopf at %RRv\n", pReturnAddrGC));
485
486 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
487 if (fSizeOverride == true)
488 {
489 Log(("operand size override!!\n"));
490 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf16Record : &PATMPopf16Record_NoExit , pReturnAddrGC, fGenJumpBack, &callInfo);
491 }
492 else
493 {
494 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf32Record : &PATMPopf32Record_NoExit, pReturnAddrGC, fGenJumpBack, &callInfo);
495 }
496
497 PATCHGEN_EPILOG(pPatch, size);
498 STAM_COUNTER_INC(&pVM->patm.s.StatGenPopf);
499 return VINF_SUCCESS;
500}
501
502int patmPatchGenPushf(PVM pVM, PPATCHINFO pPatch, bool fSizeOverride)
503{
504 uint32_t size;
505 PATCHGEN_PROLOG(pVM, pPatch);
506
507 if (fSizeOverride == true)
508 {
509 Log(("operand size override!!\n"));
510 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushf16Record, 0, false);
511 }
512 else
513 {
514 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushf32Record, 0, false);
515 }
516
517 PATCHGEN_EPILOG(pPatch, size);
518 return VINF_SUCCESS;
519}
520
521int patmPatchGenPushCS(PVM pVM, PPATCHINFO pPatch)
522{
523 uint32_t size;
524 PATCHGEN_PROLOG(pVM, pPatch);
525 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushCSRecord, 0, false);
526 PATCHGEN_EPILOG(pPatch, size);
527 return VINF_SUCCESS;
528}
529
530int patmPatchGenLoop(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
531{
532 uint32_t size = 0;
533 PPATCHASMRECORD pPatchAsmRec;
534
535 PATCHGEN_PROLOG(pVM, pPatch);
536
537 switch (opcode)
538 {
539 case OP_LOOP:
540 pPatchAsmRec = &PATMLoopRecord;
541 break;
542 case OP_LOOPNE:
543 pPatchAsmRec = &PATMLoopNZRecord;
544 break;
545 case OP_LOOPE:
546 pPatchAsmRec = &PATMLoopZRecord;
547 break;
548 case OP_JECXZ:
549 pPatchAsmRec = &PATMJEcxRecord;
550 break;
551 default:
552 AssertMsgFailed(("PatchGenLoop: invalid opcode %d\n", opcode));
553 return VERR_INVALID_PARAMETER;
554 }
555 Assert(pPatchAsmRec->offSizeOverride && pPatchAsmRec->offRelJump);
556
557 Log(("PatchGenLoop %d jump %d to %08x offrel=%d\n", opcode, pPatch->nrJumpRecs, pTargetGC, pPatchAsmRec->offRelJump));
558
559 // Generate the patch code
560 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
561
562 if (fSizeOverride)
563 {
564 pPB[pPatchAsmRec->offSizeOverride] = 0x66; // ecx -> cx or vice versa
565 }
566
567 *(RTRCPTR *)&pPB[pPatchAsmRec->offRelJump] = 0xDEADBEEF;
568
569 patmPatchAddJump(pVM, pPatch, &pPB[pPatchAsmRec->offRelJump - 1], 1, pTargetGC, opcode);
570
571 PATCHGEN_EPILOG(pPatch, size);
572 return VINF_SUCCESS;
573}
574
575int patmPatchGenRelJump(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
576{
577 uint32_t offset = 0;
578 PATCHGEN_PROLOG(pVM, pPatch);
579
580 // internal relative jumps from patch code to patch code; no relocation record required
581
582 Assert(PATMIsPatchGCAddr(pVM, pTargetGC) == false);
583
584 switch (opcode)
585 {
586 case OP_JO:
587 pPB[1] = 0x80;
588 break;
589 case OP_JNO:
590 pPB[1] = 0x81;
591 break;
592 case OP_JC:
593 pPB[1] = 0x82;
594 break;
595 case OP_JNC:
596 pPB[1] = 0x83;
597 break;
598 case OP_JE:
599 pPB[1] = 0x84;
600 break;
601 case OP_JNE:
602 pPB[1] = 0x85;
603 break;
604 case OP_JBE:
605 pPB[1] = 0x86;
606 break;
607 case OP_JNBE:
608 pPB[1] = 0x87;
609 break;
610 case OP_JS:
611 pPB[1] = 0x88;
612 break;
613 case OP_JNS:
614 pPB[1] = 0x89;
615 break;
616 case OP_JP:
617 pPB[1] = 0x8A;
618 break;
619 case OP_JNP:
620 pPB[1] = 0x8B;
621 break;
622 case OP_JL:
623 pPB[1] = 0x8C;
624 break;
625 case OP_JNL:
626 pPB[1] = 0x8D;
627 break;
628 case OP_JLE:
629 pPB[1] = 0x8E;
630 break;
631 case OP_JNLE:
632 pPB[1] = 0x8F;
633 break;
634
635 case OP_JMP:
636 /* If interrupted here, then jump to the target instruction. Used by PATM.cpp for jumping to known instructions. */
637 /* Add lookup record for patch to guest address translation */
638 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pTargetGC, PATM_LOOKUP_PATCH2GUEST);
639
640 pPB[0] = 0xE9;
641 break;
642
643 case OP_JECXZ:
644 case OP_LOOP:
645 case OP_LOOPNE:
646 case OP_LOOPE:
647 return patmPatchGenLoop(pVM, pPatch, pTargetGC, opcode, fSizeOverride);
648
649 default:
650 AssertMsg(0, ("Invalid jump opcode %d\n", opcode));
651 return VERR_PATCHING_REFUSED;
652 }
653 if (opcode != OP_JMP)
654 {
655 pPB[0] = 0xF;
656 offset += 2;
657 }
658 else offset++;
659
660 *(RTRCPTR *)&pPB[offset] = 0xDEADBEEF;
661
662 patmPatchAddJump(pVM, pPatch, pPB, offset, pTargetGC, opcode);
663
664 offset += sizeof(RTRCPTR);
665
666 PATCHGEN_EPILOG(pPatch, offset);
667 return VINF_SUCCESS;
668}
669
670/*
671 * Rewrite call to dynamic or currently unknown function (on-demand patching of function)
672 */
673int patmPatchGenCall(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC, RTRCPTR pTargetGC, bool fIndirect)
674{
675 PATMCALLINFO callInfo;
676 uint32_t offset;
677 uint32_t i, size;
678 int rc;
679
680 /** @note Don't check for IF=1 here. The ret instruction will do this. */
681 /** @note It's dangerous to do this for 'normal' patches. the jump target might be inside the generated patch jump. (seen this!) */
682
683 /* 1: Clear PATM interrupt flag on entry. */
684 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
685 if (rc == VERR_NO_MEMORY)
686 return rc;
687 AssertRCReturn(rc, rc);
688
689 PATCHGEN_PROLOG(pVM, pPatch);
690 /* 2: We must push the target address onto the stack before appending the indirect call code. */
691
692 if (fIndirect)
693 {
694 Log(("patmPatchGenIndirectCall\n"));
695 Assert(pCpu->Param1.cb == 4);
696 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J);
697
698 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
699 * a page fault. The assembly code restores the stack afterwards.
700 */
701 offset = 0;
702 /* include prefix byte to make sure we don't use the incorrect selector register. */
703 if (pCpu->fPrefix & DISPREFIX_SEG)
704 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
705 pPB[offset++] = 0xFF; // push r/m32
706 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
707 i = 2; /* standard offset of modrm bytes */
708 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
709 i++; //skip operand prefix
710 if (pCpu->fPrefix & DISPREFIX_SEG)
711 i++; //skip segment prefix
712
713 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
714 AssertRCReturn(rc, rc);
715 offset += (pCpu->cbInstr - i);
716 }
717 else
718 {
719 AssertMsg(PATMIsPatchGCAddr(pVM, pTargetGC) == false, ("Target is already a patch address (%RRv)?!?\n", pTargetGC));
720 Assert(pTargetGC);
721 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J);
722
723 /** @todo wasting memory as the complex search is overkill and we need only one lookup slot... */
724
725 /* Relative call to patch code (patch to patch -> no fixup). */
726 Log(("PatchGenCall from %RRv (next=%RRv) to %RRv\n", pCurInstrGC, pCurInstrGC + pCpu->cbInstr, pTargetGC));
727
728 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
729 * a page fault. The assembly code restores the stack afterwards.
730 */
731 offset = 0;
732 pPB[offset++] = 0x68; // push %Iv
733 *(RTRCPTR *)&pPB[offset] = pTargetGC;
734 offset += sizeof(RTRCPTR);
735 }
736
737 /* align this block properly to make sure the jump table will not be misaligned. */
738 size = (RTHCUINTPTR)&pPB[offset] & 3;
739 if (size)
740 size = 4 - size;
741
742 for (i=0;i<size;i++)
743 {
744 pPB[offset++] = 0x90; /* nop */
745 }
746 PATCHGEN_EPILOG(pPatch, offset);
747
748 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
749 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
750 callInfo.pReturnGC = pCurInstrGC + pCpu->cbInstr;
751 callInfo.pTargetGC = (fIndirect) ? 0xDEADBEEF : pTargetGC;
752 size = patmPatchGenCode(pVM, pPatch, pPB, (fIndirect) ? &PATMCallIndirectRecord : &PATMCallRecord, 0, false, &callInfo);
753 PATCHGEN_EPILOG(pPatch, size);
754
755 /* Need to set PATM_INTERRUPTFLAG after the patched ret returns here. */
756 rc = patmPatchGenSetPIF(pVM, pPatch, pCurInstrGC);
757 if (rc == VERR_NO_MEMORY)
758 return rc;
759 AssertRCReturn(rc, rc);
760
761 STAM_COUNTER_INC(&pVM->patm.s.StatGenCall);
762 return VINF_SUCCESS;
763}
764
765/**
766 * Generate indirect jump to unknown destination
767 *
768 * @returns VBox status code.
769 * @param pVM Pointer to the VM.
770 * @param pPatch Patch record
771 * @param pCpu Disassembly state
772 * @param pCurInstrGC Current instruction address
773 */
774int patmPatchGenJump(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
775{
776 PATMCALLINFO callInfo;
777 uint32_t offset;
778 uint32_t i, size;
779 int rc;
780
781 /* 1: Clear PATM interrupt flag on entry. */
782 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
783 if (rc == VERR_NO_MEMORY)
784 return rc;
785 AssertRCReturn(rc, rc);
786
787 PATCHGEN_PROLOG(pVM, pPatch);
788 /* 2: We must push the target address onto the stack before appending the indirect call code. */
789
790 Log(("patmPatchGenIndirectJump\n"));
791 Assert(pCpu->Param1.cb == 4);
792 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J);
793
794 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
795 * a page fault. The assembly code restores the stack afterwards.
796 */
797 offset = 0;
798 /* include prefix byte to make sure we don't use the incorrect selector register. */
799 if (pCpu->fPrefix & DISPREFIX_SEG)
800 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
801
802 pPB[offset++] = 0xFF; // push r/m32
803 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
804 i = 2; /* standard offset of modrm bytes */
805 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
806 i++; //skip operand prefix
807 if (pCpu->fPrefix & DISPREFIX_SEG)
808 i++; //skip segment prefix
809
810 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
811 AssertRCReturn(rc, rc);
812 offset += (pCpu->cbInstr - i);
813
814 /* align this block properly to make sure the jump table will not be misaligned. */
815 size = (RTHCUINTPTR)&pPB[offset] & 3;
816 if (size)
817 size = 4 - size;
818
819 for (i=0;i<size;i++)
820 {
821 pPB[offset++] = 0x90; /* nop */
822 }
823 PATCHGEN_EPILOG(pPatch, offset);
824
825 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
826 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
827 callInfo.pReturnGC = pCurInstrGC + pCpu->cbInstr;
828 callInfo.pTargetGC = 0xDEADBEEF;
829 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpIndirectRecord, 0, false, &callInfo);
830 PATCHGEN_EPILOG(pPatch, size);
831
832 STAM_COUNTER_INC(&pVM->patm.s.StatGenJump);
833 return VINF_SUCCESS;
834}
835
836/**
837 * Generate return instruction
838 *
839 * @returns VBox status code.
840 * @param pVM Pointer to the VM.
841 * @param pPatch Patch structure
842 * @param pCpu Disassembly struct
843 * @param pCurInstrGC Current instruction pointer
844 *
845 */
846int patmPatchGenRet(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
847{
848 int size = 0, rc;
849 RTRCPTR pPatchRetInstrGC;
850
851 /* Remember start of this patch for below. */
852 pPatchRetInstrGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
853
854 Log(("patmPatchGenRet %RRv\n", pCurInstrGC));
855
856 /** @note optimization: multiple identical ret instruction in a single patch can share a single patched ret. */
857 if ( pPatch->pTempInfo->pPatchRetInstrGC
858 && pPatch->pTempInfo->uPatchRetParam1 == (uint32_t)pCpu->Param1.uValue) /* nr of bytes popped off the stack should be identical of course! */
859 {
860 Assert(pCpu->pCurInstr->uOpcode == OP_RETN);
861 STAM_COUNTER_INC(&pVM->patm.s.StatGenRetReused);
862
863 return patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, pPatch->pTempInfo->pPatchRetInstrGC);
864 }
865
866 /* Jump back to the original instruction if IF is set again. */
867 Assert(!patmFindActivePatchByEntrypoint(pVM, pCurInstrGC));
868 rc = patmPatchGenCheckIF(pVM, pPatch, pCurInstrGC);
869 AssertRCReturn(rc, rc);
870
871 /* align this block properly to make sure the jump table will not be misaligned. */
872 PATCHGEN_PROLOG(pVM, pPatch);
873 size = (RTHCUINTPTR)pPB & 3;
874 if (size)
875 size = 4 - size;
876
877 for (int i=0;i<size;i++)
878 pPB[i] = 0x90; /* nop */
879 PATCHGEN_EPILOG(pPatch, size);
880
881 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
882 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMRetRecord, 0, false);
883 PATCHGEN_EPILOG(pPatch, size);
884
885 STAM_COUNTER_INC(&pVM->patm.s.StatGenRet);
886 /* Duplicate the ret or ret n instruction; it will use the PATM return address */
887 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
888
889 if (rc == VINF_SUCCESS)
890 {
891 pPatch->pTempInfo->pPatchRetInstrGC = pPatchRetInstrGC;
892 pPatch->pTempInfo->uPatchRetParam1 = pCpu->Param1.uValue;
893 }
894 return rc;
895}
896
897/**
898 * Generate all global patm functions
899 *
900 * @returns VBox status code.
901 * @param pVM Pointer to the VM.
902 * @param pPatch Patch structure
903 *
904 */
905int patmPatchGenGlobalFunctions(PVM pVM, PPATCHINFO pPatch)
906{
907 int size = 0;
908
909 pVM->patm.s.pfnHelperCallGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
910 PATCHGEN_PROLOG(pVM, pPatch);
911 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMLookupAndCallRecord, 0, false);
912 PATCHGEN_EPILOG(pPatch, size);
913
914 /* Round to next 8 byte boundary. */
915 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
916
917 pVM->patm.s.pfnHelperRetGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
918 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
919 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMRetFunctionRecord, 0, false);
920 PATCHGEN_EPILOG(pPatch, size);
921
922 /* Round to next 8 byte boundary. */
923 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
924
925 pVM->patm.s.pfnHelperJumpGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
926 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
927 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMLookupAndJumpRecord, 0, false);
928 PATCHGEN_EPILOG(pPatch, size);
929
930 /* Round to next 8 byte boundary. */
931 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
932
933 pVM->patm.s.pfnHelperIretGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
934 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
935 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretFunctionRecord, 0, false);
936 PATCHGEN_EPILOG(pPatch, size);
937
938 Log(("pfnHelperCallGC %RRv\n", pVM->patm.s.pfnHelperCallGC));
939 Log(("pfnHelperRetGC %RRv\n", pVM->patm.s.pfnHelperRetGC));
940 Log(("pfnHelperJumpGC %RRv\n", pVM->patm.s.pfnHelperJumpGC));
941 Log(("pfnHelperIretGC %RRv\n", pVM->patm.s.pfnHelperIretGC));
942
943 return VINF_SUCCESS;
944}
945
946/**
947 * Generate illegal instruction (int 3)
948 *
949 * @returns VBox status code.
950 * @param pVM Pointer to the VM.
951 * @param pPatch Patch structure
952 *
953 */
954int patmPatchGenIllegalInstr(PVM pVM, PPATCHINFO pPatch)
955{
956 PATCHGEN_PROLOG(pVM, pPatch);
957
958 pPB[0] = 0xCC;
959
960 PATCHGEN_EPILOG(pPatch, 1);
961 return VINF_SUCCESS;
962}
963
964/**
965 * Check virtual IF flag and jump back to original guest code if set
966 *
967 * @returns VBox status code.
968 * @param pVM Pointer to the VM.
969 * @param pPatch Patch structure
970 * @param pCurInstrGC Guest context pointer to the current instruction
971 *
972 */
973int patmPatchGenCheckIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
974{
975 uint32_t size;
976
977 PATCHGEN_PROLOG(pVM, pPatch);
978
979 /* Add lookup record for patch to guest address translation */
980 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
981
982 /* Generate code to check for IF=1 before executing the call to the duplicated function. */
983 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCheckIFRecord, pCurInstrGC, true);
984
985 PATCHGEN_EPILOG(pPatch, size);
986 return VINF_SUCCESS;
987}
988
989/**
990 * Set PATM interrupt flag
991 *
992 * @returns VBox status code.
993 * @param pVM Pointer to the VM.
994 * @param pPatch Patch structure
995 * @param pInstrGC Corresponding guest instruction
996 *
997 */
998int patmPatchGenSetPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
999{
1000 PATCHGEN_PROLOG(pVM, pPatch);
1001
1002 /* Add lookup record for patch to guest address translation */
1003 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1004
1005 int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMSetPIFRecord, 0, false);
1006 PATCHGEN_EPILOG(pPatch, size);
1007 return VINF_SUCCESS;
1008}
1009
1010/**
1011 * Clear PATM interrupt flag
1012 *
1013 * @returns VBox status code.
1014 * @param pVM Pointer to the VM.
1015 * @param pPatch Patch structure
1016 * @param pInstrGC Corresponding guest instruction
1017 *
1018 */
1019int patmPatchGenClearPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1020{
1021 PATCHGEN_PROLOG(pVM, pPatch);
1022
1023 /* Add lookup record for patch to guest address translation */
1024 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1025
1026 int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearPIFRecord, 0, false);
1027 PATCHGEN_EPILOG(pPatch, size);
1028 return VINF_SUCCESS;
1029}
1030
1031
1032/**
1033 * Clear PATM inhibit irq flag
1034 *
1035 * @returns VBox status code.
1036 * @param pVM Pointer to the VM.
1037 * @param pPatch Patch structure
1038 * @param pNextInstrGC Next guest instruction
1039 */
1040int patmPatchGenClearInhibitIRQ(PVM pVM, PPATCHINFO pPatch, RTRCPTR pNextInstrGC)
1041{
1042 int size;
1043 PATMCALLINFO callInfo;
1044
1045 PATCHGEN_PROLOG(pVM, pPatch);
1046
1047 Assert((pPatch->flags & (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION)) != (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION));
1048
1049 /* Add lookup record for patch to guest address translation */
1050 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pNextInstrGC, PATM_LOOKUP_PATCH2GUEST);
1051
1052 callInfo.pNextInstrGC = pNextInstrGC;
1053
1054 if (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1055 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearInhibitIRQContIF0Record, 0, false, &callInfo);
1056 else
1057 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearInhibitIRQFaultIF0Record, 0, false, &callInfo);
1058
1059 PATCHGEN_EPILOG(pPatch, size);
1060 return VINF_SUCCESS;
1061}
1062
1063/**
1064 * Generate an interrupt handler entrypoint
1065 *
1066 * @returns VBox status code.
1067 * @param pVM Pointer to the VM.
1068 * @param pPatch Patch record
1069 * @param pIntHandlerGC IDT handler address
1070 *
1071 ** @todo must check if virtual IF is already cleared on entry!!!!!!!!!!!!!!!!!!!!!!!
1072 */
1073int patmPatchGenIntEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pIntHandlerGC)
1074{
1075 int rc = VINF_SUCCESS;
1076
1077 if (!EMIsRawRing1Enabled(pVM)) /* direct passthru of interrupts is not allowed in the ring-1 support case as we can't
1078 deal with the ring-1/2 ambiguity in the patm asm code and we don't need it either as
1079 TRPMForwardTrap takes care of the details. */
1080 {
1081 uint32_t size;
1082 PATCHGEN_PROLOG(pVM, pPatch);
1083
1084 /* Add lookup record for patch to guest address translation */
1085 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1086
1087 /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
1088 size = patmPatchGenCode(pVM, pPatch, pPB,
1089 (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord,
1090 0, false);
1091
1092 PATCHGEN_EPILOG(pPatch, size);
1093 }
1094
1095 // Interrupt gates set IF to 0
1096 rc = patmPatchGenCli(pVM, pPatch);
1097 AssertRCReturn(rc, rc);
1098
1099 return rc;
1100}
1101
1102/**
1103 * Generate a trap handler entrypoint
1104 *
1105 * @returns VBox status code.
1106 * @param pVM Pointer to the VM.
1107 * @param pPatch Patch record
1108 * @param pTrapHandlerGC IDT handler address
1109 */
1110int patmPatchGenTrapEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTrapHandlerGC)
1111{
1112 uint32_t size;
1113
1114 Assert(!EMIsRawRing1Enabled(pVM));
1115
1116 PATCHGEN_PROLOG(pVM, pPatch);
1117
1118 /* Add lookup record for patch to guest address translation */
1119 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pTrapHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1120
1121 /* Generate entrypoint for the trap handler (correcting CS in the interrupt stack frame) */
1122 size = patmPatchGenCode(pVM, pPatch, pPB,
1123 (pPatch->flags & PATMFL_TRAPHANDLER_WITH_ERRORCODE) ? &PATMTrapEntryRecordErrorCode : &PATMTrapEntryRecord,
1124 pTrapHandlerGC, true);
1125 PATCHGEN_EPILOG(pPatch, size);
1126
1127 return VINF_SUCCESS;
1128}
1129
1130#ifdef VBOX_WITH_STATISTICS
1131int patmPatchGenStats(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1132{
1133 uint32_t size;
1134
1135 PATCHGEN_PROLOG(pVM, pPatch);
1136
1137 /* Add lookup record for stats code -> guest handler. */
1138 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1139
1140 /* Generate code to keep calling statistics for this patch */
1141 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMStatsRecord, pInstrGC, false);
1142 PATCHGEN_EPILOG(pPatch, size);
1143
1144 return VINF_SUCCESS;
1145}
1146#endif
1147
1148/**
1149 * Debug register moves to or from general purpose registers
1150 * mov GPR, DRx
1151 * mov DRx, GPR
1152 *
1153 * @todo: if we ever want to support hardware debug registers natively, then
1154 * this will need to be changed!
1155 */
1156int patmPatchGenMovDebug(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1157{
1158 int rc = VINF_SUCCESS;
1159 unsigned reg, mod, rm, dbgreg;
1160 uint32_t offset;
1161
1162 PATCHGEN_PROLOG(pVM, pPatch);
1163
1164 mod = 0; //effective address (only)
1165 rm = 5; //disp32
1166 if (pCpu->pCurInstr->fParam1 == OP_PARM_Dd)
1167 {
1168 Assert(0); // You not come here. Illegal!
1169
1170 // mov DRx, GPR
1171 pPB[0] = 0x89; //mov disp32, GPR
1172 Assert(pCpu->Param1.fUse & DISUSE_REG_DBG);
1173 Assert(pCpu->Param2.fUse & DISUSE_REG_GEN32);
1174
1175 dbgreg = pCpu->Param1.Base.idxDbgReg;
1176 reg = pCpu->Param2.Base.idxGenReg;
1177 }
1178 else
1179 {
1180 // mov GPR, DRx
1181 Assert(pCpu->Param1.fUse & DISUSE_REG_GEN32);
1182 Assert(pCpu->Param2.fUse & DISUSE_REG_DBG);
1183
1184 pPB[0] = 0x8B; // mov GPR, disp32
1185 reg = pCpu->Param1.Base.idxGenReg;
1186 dbgreg = pCpu->Param2.Base.idxDbgReg;
1187 }
1188
1189 pPB[1] = MAKE_MODRM(mod, reg, rm);
1190
1191 AssertReturn(dbgreg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1192 offset = RT_OFFSETOF(CPUMCTX, dr[dbgreg]);
1193
1194 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1195 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1196
1197 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
1198 return rc;
1199}
1200
1201/*
1202 * Control register moves to or from general purpose registers
1203 * mov GPR, CRx
1204 * mov CRx, GPR
1205 */
1206int patmPatchGenMovControl(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1207{
1208 int rc = VINF_SUCCESS;
1209 int reg, mod, rm, ctrlreg;
1210 uint32_t offset;
1211
1212 PATCHGEN_PROLOG(pVM, pPatch);
1213
1214 mod = 0; //effective address (only)
1215 rm = 5; //disp32
1216 if (pCpu->pCurInstr->fParam1 == OP_PARM_Cd)
1217 {
1218 Assert(0); // You not come here. Illegal!
1219
1220 // mov CRx, GPR
1221 pPB[0] = 0x89; //mov disp32, GPR
1222 ctrlreg = pCpu->Param1.Base.idxCtrlReg;
1223 reg = pCpu->Param2.Base.idxGenReg;
1224 Assert(pCpu->Param1.fUse & DISUSE_REG_CR);
1225 Assert(pCpu->Param2.fUse & DISUSE_REG_GEN32);
1226 }
1227 else
1228 {
1229 // mov GPR, CRx
1230 Assert(pCpu->Param1.fUse & DISUSE_REG_GEN32);
1231 Assert(pCpu->Param2.fUse & DISUSE_REG_CR);
1232
1233 pPB[0] = 0x8B; // mov GPR, disp32
1234 reg = pCpu->Param1.Base.idxGenReg;
1235 ctrlreg = pCpu->Param2.Base.idxCtrlReg;
1236 }
1237
1238 pPB[1] = MAKE_MODRM(mod, reg, rm);
1239
1240 /// @todo: make this an array in the context structure
1241 switch (ctrlreg)
1242 {
1243 case DISCREG_CR0:
1244 offset = RT_OFFSETOF(CPUMCTX, cr0);
1245 break;
1246 case DISCREG_CR2:
1247 offset = RT_OFFSETOF(CPUMCTX, cr2);
1248 break;
1249 case DISCREG_CR3:
1250 offset = RT_OFFSETOF(CPUMCTX, cr3);
1251 break;
1252 case DISCREG_CR4:
1253 offset = RT_OFFSETOF(CPUMCTX, cr4);
1254 break;
1255 default: /* Shut up compiler warning. */
1256 AssertFailed();
1257 offset = 0;
1258 break;
1259 }
1260 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1261 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1262
1263 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
1264 return rc;
1265}
1266
1267/*
1268 * mov GPR, SS
1269 */
1270int patmPatchGenMovFromSS(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1271{
1272 uint32_t size, offset;
1273
1274 Log(("patmPatchGenMovFromSS %RRv\n", pCurInstrGC));
1275
1276 Assert(pPatch->flags & PATMFL_CODE32);
1277
1278 PATCHGEN_PROLOG(pVM, pPatch);
1279 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearPIFRecord, 0, false);
1280 PATCHGEN_EPILOG(pPatch, size);
1281
1282 /* push ss */
1283 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1284 offset = 0;
1285 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
1286 pPB[offset++] = 0x66; /* size override -> 16 bits push */
1287 pPB[offset++] = 0x16;
1288 PATCHGEN_EPILOG(pPatch, offset);
1289
1290 /* checks and corrects RPL of pushed ss*/
1291 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1292 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMMovFromSSRecord, 0, false);
1293 PATCHGEN_EPILOG(pPatch, size);
1294
1295 /* pop general purpose register */
1296 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1297 offset = 0;
1298 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
1299 pPB[offset++] = 0x66; /* size override -> 16 bits pop */
1300 pPB[offset++] = 0x58 + pCpu->Param1.Base.idxGenReg;
1301 PATCHGEN_EPILOG(pPatch, offset);
1302
1303
1304 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
1305 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMSetPIFRecord, 0, false);
1306 PATCHGEN_EPILOG(pPatch, size);
1307
1308 return VINF_SUCCESS;
1309}
1310
1311
1312/**
1313 * Generate an sldt or str patch instruction
1314 *
1315 * @returns VBox status code.
1316 * @param pVM Pointer to the VM.
1317 * @param pPatch Patch record
1318 * @param pCpu Disassembly state
1319 * @param pCurInstrGC Guest instruction address
1320 */
1321int patmPatchGenSldtStr(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1322{
1323 // sldt %Ew
1324 int rc = VINF_SUCCESS;
1325 uint32_t offset = 0;
1326 uint32_t i;
1327
1328 /** @todo segment prefix (untested) */
1329 Assert(pCpu->fPrefix == DISPREFIX_NONE || pCpu->fPrefix == DISPREFIX_OPSIZE);
1330
1331 PATCHGEN_PROLOG(pVM, pPatch);
1332
1333 if (pCpu->Param1.fUse == DISUSE_REG_GEN32 || pCpu->Param1.fUse == DISUSE_REG_GEN16)
1334 {
1335 /* Register operand */
1336 // 8B 15 [32 bits addr] mov edx, CPUMCTX.tr/ldtr
1337
1338 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1339 pPB[offset++] = 0x66;
1340
1341 pPB[offset++] = 0x8B; // mov destreg, CPUMCTX.tr/ldtr
1342 /* Modify REG part according to destination of original instruction */
1343 pPB[offset++] = MAKE_MODRM(0, pCpu->Param1.Base.idxGenReg, 5);
1344 if (pCpu->pCurInstr->uOpcode == OP_STR)
1345 {
1346 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1347 }
1348 else
1349 {
1350 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1351 }
1352 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1353 offset += sizeof(RTRCPTR);
1354 }
1355 else
1356 {
1357 /* Memory operand */
1358 //50 push eax
1359 //52 push edx
1360 //8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1361 //66 A1 48 7C 42 00 mov ax, CPUMCTX.tr/ldtr
1362 //66 89 02 mov word ptr [edx],ax
1363 //5A pop edx
1364 //58 pop eax
1365
1366 pPB[offset++] = 0x50; // push eax
1367 pPB[offset++] = 0x52; // push edx
1368
1369 if (pCpu->fPrefix == DISPREFIX_SEG)
1370 {
1371 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1372 }
1373 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1374 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1375 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, DISGREG_EDX , pCpu->ModRM.Bits.Rm);
1376
1377 i = 3; /* standard offset of modrm bytes */
1378 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1379 i++; //skip operand prefix
1380 if (pCpu->fPrefix == DISPREFIX_SEG)
1381 i++; //skip segment prefix
1382
1383 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
1384 AssertRCReturn(rc, rc);
1385 offset += (pCpu->cbInstr - i);
1386
1387 pPB[offset++] = 0x66; // mov ax, CPUMCTX.tr/ldtr
1388 pPB[offset++] = 0xA1;
1389 if (pCpu->pCurInstr->uOpcode == OP_STR)
1390 {
1391 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1392 }
1393 else
1394 {
1395 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1396 }
1397 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1398 offset += sizeof(RTRCPTR);
1399
1400 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1401 pPB[offset++] = 0x89;
1402 pPB[offset++] = 0x02;
1403
1404 pPB[offset++] = 0x5A; // pop edx
1405 pPB[offset++] = 0x58; // pop eax
1406 }
1407
1408 PATCHGEN_EPILOG(pPatch, offset);
1409
1410 return rc;
1411}
1412
1413/**
1414 * Generate an sgdt or sidt patch instruction
1415 *
1416 * @returns VBox status code.
1417 * @param pVM Pointer to the VM.
1418 * @param pPatch Patch record
1419 * @param pCpu Disassembly state
1420 * @param pCurInstrGC Guest instruction address
1421 */
1422int patmPatchGenSxDT(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1423{
1424 int rc = VINF_SUCCESS;
1425 uint32_t offset = 0, offset_base, offset_limit;
1426 uint32_t i;
1427
1428 /* @todo segment prefix (untested) */
1429 Assert(pCpu->fPrefix == DISPREFIX_NONE);
1430
1431 // sgdt %Ms
1432 // sidt %Ms
1433
1434 switch (pCpu->pCurInstr->uOpcode)
1435 {
1436 case OP_SGDT:
1437 offset_base = RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1438 offset_limit = RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1439 break;
1440
1441 case OP_SIDT:
1442 offset_base = RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1443 offset_limit = RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1444 break;
1445
1446 default:
1447 return VERR_INVALID_PARAMETER;
1448 }
1449
1450//50 push eax
1451//52 push edx
1452//8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1453//66 A1 48 7C 42 00 mov ax, CPUMCTX.gdtr.limit
1454//66 89 02 mov word ptr [edx],ax
1455//A1 48 7C 42 00 mov eax, CPUMCTX.gdtr.base
1456//89 42 02 mov dword ptr [edx+2],eax
1457//5A pop edx
1458//58 pop eax
1459
1460 PATCHGEN_PROLOG(pVM, pPatch);
1461 pPB[offset++] = 0x50; // push eax
1462 pPB[offset++] = 0x52; // push edx
1463
1464 if (pCpu->fPrefix == DISPREFIX_SEG)
1465 {
1466 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1467 }
1468 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1469 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1470 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, DISGREG_EDX , pCpu->ModRM.Bits.Rm);
1471
1472 i = 3; /* standard offset of modrm bytes */
1473 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1474 i++; //skip operand prefix
1475 if (pCpu->fPrefix == DISPREFIX_SEG)
1476 i++; //skip segment prefix
1477 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
1478 AssertRCReturn(rc, rc);
1479 offset += (pCpu->cbInstr - i);
1480
1481 pPB[offset++] = 0x66; // mov ax, CPUMCTX.gdtr.limit
1482 pPB[offset++] = 0xA1;
1483 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_limit;
1484 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1485 offset += sizeof(RTRCPTR);
1486
1487 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1488 pPB[offset++] = 0x89;
1489 pPB[offset++] = 0x02;
1490
1491 pPB[offset++] = 0xA1; // mov eax, CPUMCTX.gdtr.base
1492 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_base;
1493 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1494 offset += sizeof(RTRCPTR);
1495
1496 pPB[offset++] = 0x89; // mov dword ptr [edx+2],eax
1497 pPB[offset++] = 0x42;
1498 pPB[offset++] = 0x02;
1499
1500 pPB[offset++] = 0x5A; // pop edx
1501 pPB[offset++] = 0x58; // pop eax
1502
1503 PATCHGEN_EPILOG(pPatch, offset);
1504
1505 return rc;
1506}
1507
1508/**
1509 * Generate a cpuid patch instruction
1510 *
1511 * @returns VBox status code.
1512 * @param pVM Pointer to the VM.
1513 * @param pPatch Patch record
1514 * @param pCurInstrGC Guest instruction address
1515 */
1516int patmPatchGenCpuid(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
1517{
1518 uint32_t size;
1519 PATCHGEN_PROLOG(pVM, pPatch);
1520
1521 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCpuidRecord, 0, false);
1522
1523 PATCHGEN_EPILOG(pPatch, size);
1524 NOREF(pCurInstrGC);
1525 return VINF_SUCCESS;
1526}
1527
1528/**
1529 * Generate the jump from guest to patch code
1530 *
1531 * @returns VBox status code.
1532 * @param pVM Pointer to the VM.
1533 * @param pPatch Patch record
1534 * @param pTargetGC Guest target jump
1535 * @param fClearInhibitIRQs Clear inhibit irq flag
1536 */
1537int patmPatchGenJumpToGuest(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fClearInhibitIRQs)
1538{
1539 int rc = VINF_SUCCESS;
1540 uint32_t size;
1541
1542 if (fClearInhibitIRQs)
1543 {
1544 rc = patmPatchGenClearInhibitIRQ(pVM, pPatch, pReturnAddrGC);
1545 if (rc == VERR_NO_MEMORY)
1546 return rc;
1547 AssertRCReturn(rc, rc);
1548 }
1549
1550 PATCHGEN_PROLOG(pVM, pPatch);
1551
1552 /* Add lookup record for patch to guest address translation */
1553 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
1554
1555 /* Generate code to jump to guest code if IF=1, else fault. */
1556 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpToGuest_IF1Record, pReturnAddrGC, true);
1557 PATCHGEN_EPILOG(pPatch, size);
1558
1559 return rc;
1560}
1561
1562/*
1563 * Relative jump from patch code to patch code (no fixup required)
1564 */
1565int patmPatchGenPatchJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RCPTRTYPE(uint8_t *) pPatchAddrGC, bool fAddLookupRecord)
1566{
1567 int32_t displ;
1568 int rc = VINF_SUCCESS;
1569
1570 Assert(PATMIsPatchGCAddr(pVM, pPatchAddrGC));
1571 PATCHGEN_PROLOG(pVM, pPatch);
1572
1573 if (fAddLookupRecord)
1574 {
1575 /* Add lookup record for patch to guest address translation */
1576 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
1577 }
1578
1579 pPB[0] = 0xE9; //JMP
1580
1581 displ = pPatchAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + SIZEOF_NEARJUMP32);
1582
1583 *(uint32_t *)&pPB[1] = displ;
1584
1585 PATCHGEN_EPILOG(pPatch, SIZEOF_NEARJUMP32);
1586
1587 return rc;
1588}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette