Changeset 54763 in vbox
- Timestamp:
- Mar 15, 2015 3:15:58 AM (10 years ago)
- svn:sync-xref-src-repo-rev:
- 98941
- Location:
- trunk
- Files:
-
- 1 added
- 18 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r54738 r54763 1280 1280 1281 1281 # if defined(VBOX_WITH_RAW_MODE) || defined(DOXYGEN_RUNNING) 1282 /** @name APIs for the CPUID raw-mode patch .1282 /** @name APIs for the CPUID raw-mode patch (legacy). 1283 1283 * @{ */ 1284 1284 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM); 1285 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayRCPtr(PVM pVM);1286 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(PVM pVM);1287 VMMR3_INT_DECL(CPUMUNKNOWNCPUID) CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(PVM pVM);1288 /* Legacy: */1289 1285 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM); 1290 1286 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM); -
trunk/include/VBox/vmm/patm.h
r53615 r54763 141 141 #define PATMIsEnabled(a_pVM) ((a_pVM)->fPATMEnabled) 142 142 143 VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR pAddr); 143 VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR uGCAddr); 144 VMMDECL(bool) PATMIsPatchGCAddrExclHelpers(PVM pVM, RTRCUINTPTR uGCAddr); 144 145 VMM_INT_DECL(int) PATMReadPatchCode(PVM pVM, RTGCPTR GCPtrPatchCode, void *pvDst, size_t cbToRead, size_t *pcbRead); 145 146 … … 182 183 VMMR3_INT_DECL(int) PATMR3Init(PVM pVM); 183 184 VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM); 184 VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM );185 VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM, RTRCINTPTR offDelta); 185 186 VMMR3_INT_DECL(int) PATMR3Term(PVM pVM); 186 187 VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM); 187 188 188 VMMR3_INT_DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb);189 VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb);190 189 VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr); 191 190 VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC); 192 VMMR3_INT_DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, void *pAddrHC);193 191 VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC); 194 192 VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM); -
trunk/include/iprt/asmdefs.mac
r54713 r54763 199 199 200 200 ;; 201 ; Gets the pointer to an imported object. 202 %ifdef ASM_FORMAT_PE 203 %ifdef RT_ARCH_AMD64 204 %define IMP_SEG(SegOverride, name) qword [SegOverride:IMPNAME(name) wrt rip] 205 %else 206 %define IMP_SEG(SegOverride, name) dword [SegOverride:IMPNAME(name)] 207 %endif 208 %else 209 %define IMP_SEG(SegOverride, name) IMPNAME(name) 210 %endif 211 212 ;; 201 213 ; Declares an imported object for use with IMP2. 202 214 ; @note May change the current section! -
trunk/src/VBox/VMM/Makefile.kmk
r54385 r54763 439 439 VMMRC/CPUMRC.cpp \ 440 440 VMMRC/CPUMRCA.asm \ 441 VMMRC/CPUMRCPatchHlp.asm \ 441 442 VMMRC/EMRCA.asm \ 442 443 VMMRC/IOMRC.cpp \ -
trunk/src/VBox/VMM/VMMAll/PATMAll.cpp
r47427 r54763 256 256 VMM_INT_DECL(bool) PATMShouldUseRawMode(PVM pVM, RTRCPTR pAddrGC) 257 257 { 258 return ( PATMIsEnabled(pVM) 259 && ((pAddrGC >= (RTRCPTR)pVM->patm.s.pPatchMemGC && pAddrGC < (RTRCPTR)((RTRCUINTPTR)pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem)))) ? true : false; 258 return PATMIsEnabled(pVM) 259 && ( (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem 260 || (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC < pVM->patm.s.cbPatchHelpers); 260 261 } 261 262 … … 273 274 274 275 /** 275 * Checks whether the GC address is part of our patch region276 * Checks whether the GC address is part of our patch or helper regions. 276 277 * 277 278 * @returns VBox status code. 278 279 * @param pVM Pointer to the VM. 279 * @param pAddrGC Guest context address280 * @param uGCAddr Guest context address. 280 281 * @internal 281 282 */ 282 VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR pAddrGC) 283 { 284 return (PATMIsEnabled(pVM) && pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem) ? true : false; 283 VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR uGCAddr) 284 { 285 return PATMIsEnabled(pVM) 286 && ( uGCAddr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem 287 || uGCAddr - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC < pVM->patm.s.cbPatchHelpers); 288 } 289 290 /** 291 * Checks whether the GC address is part of our patch region. 292 * 293 * @returns VBox status code. 294 * @param pVM Pointer to the VM. 295 * @param uGCAddr Guest context address. 296 * @internal 297 */ 298 VMMDECL(bool) PATMIsPatchGCAddrExclHelpers(PVM pVM, RTRCUINTPTR uGCAddr) 299 { 300 return PATMIsEnabled(pVM) 301 && uGCAddr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem; 285 302 } 286 303 … … 288 305 * Reads patch code. 289 306 * 290 * @returns291 307 * @retval VINF_SUCCESS on success. 292 308 * @retval VERR_PATCH_NOT_FOUND if the request is entirely outside the patch … … 307 323 Assert(!HMIsEnabled(pVM)); 308 324 309 RTGCPTR offPatchedInstr = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pPatchMemGC; 310 if (offPatchedInstr >= pVM->patm.s.cbPatchMem) 311 return VERR_PATCH_NOT_FOUND; 312 313 uint32_t cbMaxRead = pVM->patm.s.cbPatchMem - (uint32_t)offPatchedInstr; 314 if (cbToRead > cbMaxRead) 315 cbToRead = cbMaxRead; 316 325 /* 326 * Check patch code and patch helper code. We assume the requested bytes 327 * are not in either. 328 */ 329 RTGCPTR offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pPatchMemGC; 330 if (offPatchCode >= pVM->patm.s.cbPatchMem) 331 { 332 offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pbPatchHelpersRC; 333 if (offPatchCode >= pVM->patm.s.cbPatchHelpers) 334 return VERR_PATCH_NOT_FOUND; 335 336 /* 337 * Patch helper memory. 338 */ 339 uint32_t cbMaxRead = pVM->patm.s.cbPatchHelpers - (uint32_t)offPatchCode; 340 if (cbToRead > cbMaxRead) 341 cbToRead = cbMaxRead; 317 342 #ifdef IN_RC 318 memcpy(pvDst, pVM->patm.s.pPatchMemGC + (uint32_t)offPatchedInstr, cbToRead);343 memcpy(pvDst, pVM->patm.s.pbPatchHelpersRC + (uint32_t)offPatchCode, cbToRead); 319 344 #else 320 memcpy(pvDst, pVM->patm.s.pPatchMemHC + (uint32_t)offPatchedInstr, cbToRead);345 memcpy(pvDst, pVM->patm.s.pbPatchHelpersR3 + (uint32_t)offPatchCode, cbToRead); 321 346 #endif 347 } 348 else 349 { 350 /* 351 * Patch memory. 352 */ 353 uint32_t cbMaxRead = pVM->patm.s.cbPatchMem - (uint32_t)offPatchCode; 354 if (cbToRead > cbMaxRead) 355 cbToRead = cbMaxRead; 356 #ifdef IN_RC 357 memcpy(pvDst, pVM->patm.s.pPatchMemGC + (uint32_t)offPatchCode, cbToRead); 358 #else 359 memcpy(pvDst, pVM->patm.s.pPatchMemHC + (uint32_t)offPatchCode, cbToRead); 360 #endif 361 } 362 322 363 if (pcbRead) 323 364 *pcbRead = cbToRead; -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r54749 r54763 5057 5057 5058 5058 /** 5059 * Gets a pointer to the CPUID leaf array.5060 *5061 * @returns Raw-mode pointer to the CPUID leaf array.5062 * @param pVM Pointer to the VM.5063 * @remark Intended for PATM only.5064 */5065 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayRCPtr(PVM pVM)5066 {5067 Assert(MMHyperRCToR3(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesRC) == pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);5068 return pVM->cpum.s.GuestInfo.paCpuIdLeavesRC;5069 }5070 5071 5072 /**5073 * Gets a pointer to the CPUID leaf array.5074 *5075 * @returns Raw-mode pointer to the end of CPUID leaf array (exclusive).5076 * @param pVM Pointer to the VM.5077 * @remark Intended for PATM only.5078 */5079 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(PVM pVM)5080 {5081 Assert(MMHyperRCToR3(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesRC) == pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);5082 return pVM->cpum.s.GuestInfo.paCpuIdLeavesRC5083 + pVM->cpum.s.GuestInfo.cCpuIdLeaves * sizeof(CPUMCPUIDLEAF);5084 }5085 5086 5087 /**5088 * Gets the unknown CPUID leaf method.5089 *5090 * @returns Unknown CPUID leaf method.5091 * @param pVM Pointer to the VM.5092 * @remark Intended for PATM only.5093 */5094 VMMR3_INT_DECL(CPUMUNKNOWNCPUID) CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(PVM pVM)5095 {5096 return pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod;5097 }5098 5099 5100 5101 /**5102 5059 * Gets a number of standard CPUID leaves (PATM only). 5103 5060 * -
trunk/src/VBox/VMM/VMMR3/PATM.cpp
r54761 r54763 24 24 #include <VBox/vmm/patm.h> 25 25 #include <VBox/vmm/stam.h> 26 #include <VBox/vmm/pdmapi.h> 26 27 #include <VBox/vmm/pgm.h> 27 28 #include <VBox/vmm/cpum.h> … … 109 110 110 111 static int patmReinit(PVM pVM); 111 static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);112 static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam); 112 113 static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC); 113 114 static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch); … … 331 332 return VINF_SUCCESS; 332 333 333 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */ 334 /* 335 * The GC state, stack and statistics must be read/write for the guest 336 * (supervisor only of course). 337 * 338 * Remember, we run guest code at ring-1 and ring-2 levels, which are 339 * considered supervisor levels by the paging structures. We run the VMM 340 * in ring-0 with CR0.WP=0 and mapping all VMM structures as read-only 341 * pages. The following structures are exceptions and must be mapped with 342 * write access so the ring-1 and ring-2 code can modify them. 343 */ 334 344 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW); 335 if (RT_FAILURE(rc)) 336 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc)); 345 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCState accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc); 337 346 338 347 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW); 339 if (RT_FAILURE(rc)) 340 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc)); 348 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCStack accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc); 341 349 342 350 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW); 343 if (RT_FAILURE(rc)) 344 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc)); 345 351 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the stats struct accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc); 352 353 /* 354 * Find the patch helper segment so we can identify code running there as patch code. 355 */ 356 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpBegin", &pVM->patm.s.pbPatchHelpersRC); 357 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpBegin: %Rrc\n", rc), rc); 358 pVM->patm.s.pbPatchHelpersR3 = (uint8_t *)MMHyperRCToR3(pVM, pVM->patm.s.pbPatchHelpersRC); 359 AssertLogRelReturn(pVM->patm.s.pbPatchHelpersR3 != NULL, VERR_INTERNAL_ERROR_3); 360 361 RTRCPTR RCPtrEnd; 362 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpEnd", &RCPtrEnd); 363 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpEnd: %Rrc\n", rc), rc); 364 365 pVM->patm.s.cbPatchHelpers = RCPtrEnd - pVM->patm.s.pbPatchHelpersRC; 366 AssertLogRelMsgReturn(pVM->patm.s.cbPatchHelpers < _128K, 367 ("%RRv-%RRv => %#x\n", pVM->patm.s.pbPatchHelpersRC, RCPtrEnd, pVM->patm.s.cbPatchHelpers), 368 VERR_INTERNAL_ERROR_4); 346 369 return rc; 347 370 } … … 460 483 * The PATM will update the addresses used by the switcher. 461 484 * 462 * @param pVM The VM. 463 */ 464 VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM) 485 * @param pVM The VM. 486 * @param offDelta The relocation delta. 487 */ 488 VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM, RTRCINTPTR offDelta) 465 489 { 466 490 if (HMIsEnabled(pVM)) … … 468 492 469 493 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC); 470 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;471 472 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));473 if ( delta)494 Assert((RTRCINTPTR)(GCPtrNew - pVM->patm.s.pGCStateGC) == offDelta); 495 496 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, offDelta)); 497 if (offDelta) 474 498 { 475 499 PCPUMCTX pCtx; 476 500 477 501 /* Update CPUMCTX guest context pointer. */ 478 pVM->patm.s.pCPUMCtxGC += delta; 479 480 pVM->patm.s.deltaReloc = delta; 481 482 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM); 483 502 pVM->patm.s.pCPUMCtxGC += offDelta; 503 504 pVM->patm.s.deltaReloc = offDelta; 505 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmR3RelocatePatches, (void *)pVM); 506 507 pVM->patm.s.pGCStateGC = GCPtrNew; 508 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC); 509 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC); 510 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC); 511 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC); 512 513 if (pVM->patm.s.pfnSysEnterPatchGC) 514 pVM->patm.s.pfnSysEnterPatchGC += offDelta; 515 516 /* If we are running patch code right now, then also adjust EIP. */ 484 517 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)); 485 486 /* If we are running patch code right now, then also adjust EIP. */487 518 if (PATMIsPatchGCAddr(pVM, pCtx->eip)) 488 pCtx->eip += delta; 489 490 pVM->patm.s.pGCStateGC = GCPtrNew; 491 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC); 492 493 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC); 494 495 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC); 496 497 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC); 498 499 if (pVM->patm.s.pfnSysEnterPatchGC) 500 pVM->patm.s.pfnSysEnterPatchGC += delta; 519 pCtx->eip += offDelta; 501 520 502 521 /* Deal with the global patch functions. */ 503 pVM->patm.s.pfnHelperCallGC += delta; 504 pVM->patm.s.pfnHelperRetGC += delta; 505 pVM->patm.s.pfnHelperIretGC += delta; 506 pVM->patm.s.pfnHelperJumpGC += delta; 507 508 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM); 522 pVM->patm.s.pfnHelperCallGC += offDelta; 523 pVM->patm.s.pfnHelperRetGC += offDelta; 524 pVM->patm.s.pfnHelperIretGC += offDelta; 525 pVM->patm.s.pfnHelperJumpGC += offDelta; 526 527 pVM->patm.s.pbPatchHelpersRC += offDelta; 528 529 patmR3RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM); 509 530 } 510 531 } … … 730 751 * @param pParam Pointer to the VM. 731 752 */ 732 static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)753 static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam) 733 754 { 734 755 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode; … … 917 938 } 918 939 940 case FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL: 919 941 case FIXUP_CONSTANT_IN_PATCH_ASM_TMPL: 920 942 /* Only applicable when loading state. */ … … 1000 1022 1001 1023 #endif /* VBOX_WITH_DEBUGGER */ 1002 #ifdef UNUSED_FUNCTIONS1003 1004 /**1005 * Returns the host context pointer and size of the patch memory block1006 *1007 * @returns Host context pointer.1008 * @param pVM Pointer to the VM.1009 * @param pcb Size of the patch memory block1010 * @internal1011 */1012 VMMR3_INT_DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)1013 {1014 AssertReturn(!HMIsEnabled(pVM), NULL);1015 if (pcb)1016 *pcb = pVM->patm.s.cbPatchMem;1017 return pVM->patm.s.pPatchMemHC;1018 }1019 1020 1021 /**1022 * Returns the guest context pointer and size of the patch memory block1023 *1024 * @returns Guest context pointer.1025 * @param pVM Pointer to the VM.1026 * @param pcb Size of the patch memory block1027 */1028 VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)1029 {1030 AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);1031 if (pcb)1032 *pcb = pVM->patm.s.cbPatchMem;1033 return pVM->patm.s.pPatchMemGC;1034 }1035 1036 #endif /* UNUSED_FUNCTIONS */1037 1024 1038 1025 /** … … 1047 1034 return pVM->patm.s.pGCStateHC; 1048 1035 } 1049 1050 1051 #ifdef UNUSED_FUNCTION1052 /**1053 * Checks whether the HC address is part of our patch region1054 *1055 * @returns true/false.1056 * @param pVM Pointer to the VM.1057 * @param pAddrHC Host context ring-3 address to check.1058 */1059 VMMR3_INT_DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, void *pAddrHC)1060 {1061 return (uintptr_t)pAddrHC >= (uintptr_t)pVM->patm.s.pPatchMemHC1062 && (uintptr_t)pAddrHC < (uintptr_t)pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem;1063 }1064 #endif1065 1036 1066 1037 … … 1111 1082 { 1112 1083 AssertReturn(!HMIsEnabled(pVM), NULL); 1113 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC) 1114 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC); 1115 return NULL; 1084 RTRCUINTPTR offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; 1085 if (offPatch >= pVM->patm.s.cbPatchMem) 1086 { 1087 offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC; 1088 if (offPatch >= pVM->patm.s.cbPatchHelpers) 1089 return NULL; 1090 return pVM->patm.s.pbPatchHelpersR3 + offPatch; 1091 } 1092 return pVM->patm.s.pPatchMemHC + offPatch; 1116 1093 } 1117 1094 … … 1134 1111 uint32_t offset; 1135 1112 1136 if (PATMIsPatchGCAddr(pVM, pGCPtr)) 1137 { 1113 offset = (RTRCUINTPTR)pGCPtr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; 1114 if (offset < pVM->patm.s.cbPatchMem) 1115 { 1116 #ifdef VBOX_STRICT 1138 1117 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch; 1139 Assert(pPatch); 1140 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch)); 1141 } 1118 Assert(pPatch); Assert(offset - pPatch->pPatchBlockOffset < pPatch->cbPatchBlockSize); 1119 #endif 1120 return pVM->patm.s.pPatchMemHC + offset; 1121 } 1122 /* Note! We're _not_ including the patch helpers here. */ 1142 1123 1143 1124 offset = pGCPtr & PAGE_OFFSET_MASK; -
trunk/src/VBox/VMM/VMMR3/PATMA.asm
r54737 r54763 1723 1723 ; PATMCpuidReplacement 1724 1724 ; 1725 ; Calls a helper function that does the job. 1726 ; 1727 ; This way we can change the CPUID structures and how we organize them without 1728 ; breaking patches. It also saves a bit of memory for patch code and fixups. 1729 ; 1725 1730 BEGIN_PATCH g_patmCpuidRecord, PATMCpuidReplacement 1726 not dword [esp-16] ; probe stack before starting, just in case. 1727 not dword [esp-16] 1731 not dword [esp-32] ; probe stack before starting 1732 not dword [esp-32] 1733 1728 1734 mov dword [ss:PATM_INTERRUPTFLAG], 0 1729 1735 PATCH_FIXUP PATM_INTERRUPTFLAG 1730 1736 pushf 1731 1737 1732 ;; @todo We could put all this stuff in a CPUM assembly function can simply call it. 1733 1734 ; Save the registers we use for passthru and sub-leaf matching (eax is not used). 1735 push edx 1736 push ecx 1737 push ebx 1738 1739 ; 1740 ; Perform a linear search of the strictly sorted CPUID leaf array. 1741 ; 1742 ; (Was going to do a binary search, but that ended up being complicated if 1743 ; we want a flexible leaf size. Linear search is probably good enough.) 1744 ; 1745 mov ebx, PATM_CPUID_ARRAY_PTR 1746 PATCH_FIXUP PATM_CPUID_ARRAY_PTR 1747 mov edx, PATM_CPUID_ARRAY_END_PTR 1748 PATCH_FIXUP PATM_CPUID_ARRAY_END_PTR 1749 cmp ebx, edx 1750 jae cpuid_unknown 1751 1752 cpuid_lookup_leaf: 1753 cmp eax, [ss:ebx + CPUMCPUIDLEAF.uLeaf] 1754 jbe cpuid_maybe_match_eax 1755 add ebx, PATM_CPUID_ARRAY_ENTRY_SIZE 1756 PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE 1757 cmp ebx, edx 1758 jb cpuid_lookup_leaf 1759 jmp cpuid_unknown 1760 1761 cpuid_maybe_match_eax: 1762 jne cpuid_unknown 1763 1764 ; Sub-leaf match too? 1765 mov ecx, [esp + 4] 1766 and ecx, [ss:ebx + CPUMCPUIDLEAF.fSubLeafMask] 1767 cmp ecx, [ss:ebx + CPUMCPUIDLEAF.uSubLeaf] 1768 je cpuid_fetch 1769 1770 ; Search forward until we've got a matching sub-leaf (or not). 1771 cpuid_subleaf_lookup: 1772 add ebx, PATM_CPUID_ARRAY_ENTRY_SIZE 1773 PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE 1774 cmp ebx, edx 1775 jae cpuid_subleaf_not_found_sub_ebx 1776 cmp eax, [ss:ebx + CPUMCPUIDLEAF.uLeaf] 1777 jne cpuid_subleaf_not_found_sub_ebx 1778 cmp ecx, [ss:ebx + CPUMCPUIDLEAF.uSubLeaf] 1779 ja cpuid_subleaf_lookup 1780 je cpuid_fetch 1781 cpuid_subleaf_not_found_sub_ebx: 1782 sub ebx, PATM_CPUID_ARRAY_ENTRY_SIZE 1783 PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE 1784 1785 ; 1786 ; Out of range sub-leaves aren't quite as easy and pretty as we emulate them 1787 ; here, but we do an adequate job. 1788 ; 1789 cpuid_subleaf_not_found: 1790 xor ecx, ecx 1791 test dword [ss:ebx + CPUMCPUIDLEAF.fFlags], CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES 1792 jz cpuid_load_zeros_except_ecx 1793 mov ecx, [esp + 4] 1794 and ecx, 0ffh 1795 cpuid_load_zeros_except_ecx: 1796 xor edx, edx 1797 xor eax, eax 1798 xor ebx, ebx 1799 jmp cpuid_done 1800 1801 ; 1802 ; Different CPUs have different ways of dealing with unknown CPUID leaves. 1803 ; 1804 cpuid_unknown: 1805 mov edx, PATM_CPUID_UNKNOWN_METHOD 1806 PATCH_FIXUP PATM_CPUID_UNKNOWN_METHOD 1807 cmp edx, CPUMUNKNOWNCPUID_PASSTHRU 1808 je cpuid_unknown_passthru 1809 ; Load the default cpuid leaf. 1810 cpuid_unknown_def_leaf: 1811 mov ebx, PATM_CPUID_DEF_PTR 1812 PATCH_FIXUP PATM_CPUID_DEF_PTR 1813 mov edx, [ss:ebx + CPUMCPUID.uEdx] 1814 mov ecx, [ss:ebx + CPUMCPUID.uEcx] 1815 mov eax, [ss:ebx + CPUMCPUID.uEax] 1816 mov ebx, [ss:ebx + CPUMCPUID.uEbx] 1817 jmp cpuid_done 1818 ; Pass thru the input values unmodified (eax is still virgin). 1819 cpuid_unknown_passthru: 1820 mov edx, [esp + 8] 1821 mov ecx, [esp + 4] 1822 mov ebx, [esp] 1823 jmp cpuid_done 1824 1825 ; 1826 ; Normal return. 1827 ; 1828 cpuid_fetch: 1829 mov edx, [ss:ebx + CPUMCPUIDLEAF.uEdx] 1830 mov ecx, [ss:ebx + CPUMCPUIDLEAF.uEcx] 1831 mov eax, [ss:ebx + CPUMCPUIDLEAF.uEax] 1832 mov ebx, [ss:ebx + CPUMCPUIDLEAF.uEbx] 1833 1834 cpuid_done: 1835 add esp, 12 1738 db 0e8h ; call 1739 dd PATM_ASMFIX_PATCH_HLP_CPUM_CPUID 1740 PATCH_FIXUP PATM_ASMFIX_PATCH_HLP_CPUM_CPUID 1741 1836 1742 popf 1837 1743 mov dword [ss:PATM_INTERRUPTFLAG], 1 -
trunk/src/VBox/VMM/VMMR3/PATMA.mac
r54714 r54763 53 53 %define PATM_CALL_RETURN_ADDR 0xF1ABCD19 54 54 %define PATM_CPUID_CENTAUR_PTR 0xF1ABCD1a 55 %define PATM_ CPUID_ARRAY_PTR0xF1ABCD1b56 %define PATM_ CPUID_ARRAY_END_PTR0xF1ABCD1c57 %define PATM_ CPUID_ARRAY_ENTRY_SIZE0xF1ABCD1d58 %define PATM_ CPUID_UNKNOWN_METHOD0xF1ABCD1e59 55 %define PATM_ASMFIX_REUSE_LATER_0 0xF1ABCD1b 56 %define PATM_ASMFIX_REUSE_LATER_1 0xF1ABCD1c 57 %define PATM_ASMFIX_REUSE_LATER_2 0xF1ABCD1d 58 %define PATM_ASMFIX_REUSE_LATER_3 0xF1ABCD1e 59 %define PATM_ASMFIX_PATCH_HLP_CPUM_CPUID 0xF1ABCD1f 60 60 61 61 ;/* Anything larger doesn't require a fixup */ -
trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp
r54762 r54763 23 23 #define LOG_GROUP LOG_GROUP_PATM 24 24 #include <VBox/vmm/patm.h> 25 #include <VBox/vmm/pdmapi.h> 25 26 #include <VBox/vmm/pgm.h> 26 27 #include <VBox/vmm/cpum.h> … … 104 105 105 106 Assert( uType == FIXUP_ABSOLUTE 106 || ( (uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL || uType == FIXUP_CONSTANT_IN_PATCH_ASM_TMPL) 107 || ( ( uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL 108 || uType == FIXUP_CONSTANT_IN_PATCH_ASM_TMPL 109 || uType == FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL) 107 110 && pSource == pDest 108 111 && PATM_IS_FIXUP_TYPE(pSource)) … … 361 364 dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions); 362 365 break; 363 case PATM_CPUID_DEF_PTR: 366 367 case PATM_CPUID_DEF_PTR: /* saved state only */ 364 368 dest = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM); 365 369 break; 366 case PATM_CPUID_ARRAY_PTR:367 dest = CPUMR3GetGuestCpuIdPatmArrayRCPtr(pVM);368 break;369 case PATM_CPUID_ARRAY_END_PTR:370 dest = CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(pVM);371 break;372 373 370 case PATM_CPUID_STD_PTR: /* saved state only */ 374 371 dest = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM); … … 382 379 383 380 /* 384 * The following fixups are constants that needs to be corrected when385 * loading saved state as these may change between VBox versions.381 * The following fixups are constants and helper code calls that only 382 * needs to be corrected when loading saved state. 386 383 */ 387 case PATM_CPUID_ARRAY_ENTRY_SIZE: 388 dest = sizeof(CPUMCPUIDLEAF); 389 uRelocType = FIXUP_CONSTANT_IN_PATCH_ASM_TMPL; 390 break; 391 case PATM_CPUID_UNKNOWN_METHOD: 392 dest = CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(pVM); 393 uRelocType = FIXUP_CONSTANT_IN_PATCH_ASM_TMPL; 394 break; 384 case PATM_ASMFIX_HELPER_CPUM_CPUID: 385 { 386 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "CPUMPatchHlpCpuId", &dest); 387 AssertReleaseRCBreakStmt(rc, dest = PATM_ILLEGAL_DESTINATION); 388 uRelocType = FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL; 389 break; 390 } 395 391 396 392 /* 397 393 * Unknown fixup. 398 394 */ 395 case PATM_ASMFIX_REUSE_LATER_0: 396 case PATM_ASMFIX_REUSE_LATER_1: 397 case PATM_ASMFIX_REUSE_LATER_2: 398 case PATM_ASMFIX_REUSE_LATER_3: 399 399 default: 400 400 AssertReleaseMsgFailed(("Unknown fixup: %#x\n", pAsmRecord->aRelocs[i].uType)); … … 403 403 } 404 404 405 *(RTRCPTR *)&pPB[j] = dest; 405 if (uRelocType == FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL) 406 { 407 RTRCUINTPTR RCPtrAfter = pVM->patm.s.pPatchMemGC 408 + (RTRCUINTPTR)(&pPB[j + sizeof(RTRCPTR)] - pVM->patm.s.pPatchMemHC); 409 dest -= RCPtrAfter; 410 } 411 412 *(PRTRCPTR)&pPB[j] = dest; 413 406 414 if (pAsmRecord->aRelocs[i].uType < PATM_NO_FIXUP) 407 415 { -
trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp
r54761 r54763 7 7 8 8 /* 9 * Copyright (C) 2006-201 4Oracle Corporation9 * Copyright (C) 2006-2015 Oracle Corporation 10 10 * 11 11 * This file is part of VirtualBox Open Source Edition (OSE), as … … 23 23 #define LOG_GROUP LOG_GROUP_PATM 24 24 #include <VBox/vmm/patm.h> 25 #include <VBox/vmm/pdmapi.h> 25 26 #include <VBox/vmm/cpum.h> 26 27 #include <VBox/vmm/cpumctx-v1_6.h> … … 1361 1362 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM); 1362 1363 break; 1363 case PATM_CPUID_ARRAY_PTR:1364 *pFixup = CPUMR3GetGuestCpuIdPatmArrayRCPtr(pVM);1365 break;1366 case PATM_CPUID_ARRAY_END_PTR:1367 *pFixup = CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(pVM);1368 break;1369 1364 case PATM_CPUID_STD_PTR: /* Saved again patches only. */ 1370 1365 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM); … … 1376 1371 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM); 1377 1372 break; 1373 case PATM_ASMFIX_REUSE_LATER_0: /* Was only used for a few days. Don't want to keep this legacy around. */ 1374 case PATM_ASMFIX_REUSE_LATER_1: 1375 AssertLogRelMsgFailedReturn(("Unsupported PATM fixup. You have to discard this saved state or snapshot."), 1376 VERR_INTERNAL_ERROR); 1377 break; 1378 1378 } 1379 1379 } … … 1387 1387 switch (pRec->pSource) 1388 1388 { 1389 case PATM_CPUID_ARRAY_ENTRY_SIZE: 1390 *pFixup = sizeof(CPUMCPUIDLEAF); 1391 break; 1392 case PATM_CPUID_UNKNOWN_METHOD: 1393 *pFixup = CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(pVM); 1389 case PATM_ASMFIX_REUSE_LATER_2: /* Was only used for a few days. Don't want to keep this legacy around. */ 1390 case PATM_ASMFIX_REUSE_LATER_3: 1391 AssertLogRelMsgFailedReturn(("Unsupported PATM fixup. You have to discard this saved state or snapshot."), 1392 VERR_INTERNAL_ERROR); 1394 1393 break; 1395 1394 default: … … 1397 1396 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1398 1397 } 1398 } 1399 /* 1400 * Relative fixups for calling or jumping to helper functions inside VMMRC. 1401 * (The distance between the helper function and the patch is subject to 1402 * new code being added to VMMRC as well as VM configurations influencing 1403 * heap allocations and so on and so forth.) 1404 */ 1405 else if (pRec->uType == FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL) 1406 { 1407 AssertLogRelReturn(uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED); 1408 Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_FIXUP_TYPE(pRec->pSource)); 1409 int rc; 1410 RTRCPTR uRCPtrDest; 1411 switch (pRec->pSource) 1412 { 1413 case PATM_ASMFIX_HELPER_CPUM_CPUID: 1414 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "CPUMPatchHlpCpuId", &uRCPtrDest); 1415 AssertLogRelRCReturn(rc, rc); 1416 break; 1417 default: 1418 AssertLogRelMsgFailed(("Unknown FIXUP_REL_HLP_CALL_IN_PATCH_ASM_TMPL fixup: %#x\n", pRec->pSource)); 1419 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1420 } 1421 RTRCPTR uRCPtrAfter = pVM->patm.s.pPatchMemGC + ((uintptr_t)&pFixup[1] - (uintptr_t)pVM->patm.s.pPatchMemHC); 1422 *pFixup = uRCPtrDest - uRCPtrAfter; 1399 1423 } 1400 1424 -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r54065 r54763 1250 1250 TRPMR3Relocate(pVM, offDelta); 1251 1251 #ifdef VBOX_WITH_RAW_MODE 1252 PATMR3Relocate(pVM );1252 PATMR3Relocate(pVM, (RTRCINTPTR)offDelta); 1253 1253 CSAMR3Relocate(pVM, offDelta); 1254 1254 #endif -
trunk/src/VBox/VMM/VMMRC/VMMRC.mac
r44528 r54763 26 26 ; @param %1 The segment name. 27 27 ; @remark Use BEGINCODE to switch back to the code segment. 28 29 ;; @def VMMR0_SEG_CODE 30 ; Set the output segment to one of the special VMMR0 code segments. 31 ; @param %1 The segment name. 28 32 %ifdef ASM_FORMAT_OMF 29 33 %macro VMMR0_SEG 1 30 segment VMMR0.%1 public CLASS=CONST align=1 use32 34 segment VMMR0.%1 public CLASS=CONST align=1 use32 flat 31 35 %endmacro 32 %define VMMR0_SEG_DEFINED 36 37 %macro VMMR0_CODE_SEG 1 38 segment VMMR0.%1 public CLASS=CODE align=16 use32 flat 39 %endmacro 33 40 %endif 34 41 … … 42 49 %endif 43 50 %endmacro 44 %define VMMR0_SEG_DEFINED 51 52 %macro VMMR0_CODE_SEG 1 53 %ifndef DEFINED_VMMR0_CODE_SEG.%1 54 %define DEFINED_VMMR0_CODE_SEG.%1 1 55 [section .VMMR0.%1 progbits alloc exec nowrite align=16 ] 56 %else 57 [section .VMMR0.%1 ] 58 %endif 59 %endmacro 45 60 %endif 46 61 … … 60 75 %endmacro 61 76 %endif 62 %define VMMR0_SEG_DEFINED 77 78 %ifdef __YASM__ 79 %macro VMMR0_CODE_SEG 1 80 %ifndef DEFINED_VMMR0_CODE_SEG.%1 81 %define DEFINED_VMMR0_CODE_SEG.%1 1 82 [section VMMR0 %1 exec align=16 ] 83 %else 84 [section VMMR0 %1 ] 85 %endif 86 %endmacro 87 %else 88 %macro VMMR0_CODE_SEG 1 89 [section VMMR0.%1 exec align=16 ] 90 %endmacro 91 %endif 63 92 %endif 64 93 … … 67 96 [section .rdata$VMMR0.%1 align=1 ] 68 97 %endmacro 69 %define VMMR0_SEG_DEFINED 98 99 %macro VMMR0_CODE_SEG 1 100 [section .text$VMMR0.%1 align=16 ] 101 %endmacro 70 102 %endif 71 103 72 %ifndef VMMR0_SEG_DEFINED 73 %error "VMMR0_SEG / ASM_FORMAT_xxx" 104 %ifnmacro VMMR0_SEG 105 %error "VMMR0_CODE_SEG / ASM_FORMAT_xxx" 106 %endif 107 %ifnmacro VMMR0_CODE_SEG 108 %error "VMMR0_CODE_SEG / ASM_FORMAT_xxx" 74 109 %endif 75 110 … … 138 173 139 174 175 176 ;; @def PATCH_HLP_SEG 177 ; Set the output segment a special code segment for patch helpers (runs in ring-1 or ring-2). 178 ; @param %1 The segment name. 179 ; @remark Use BEGINCODE to switch back to the code segment. 180 %macro BEGIN_PATCH_HLP_SEG 0 181 VMMR0_CODE_SEG PatchHlp 182 %endmacro 183 140 184 %endif -
trunk/src/VBox/VMM/VMMRC/VMMRC0.asm
r44528 r54763 1 1 ; $Id$ 2 2 ;; @file 3 ; VMM GC0 - The first object module in the link.3 ; VMMRC0 - The first object module in the link. 4 4 ; 5 5 6 6 ; 7 ; Copyright (C) 2006-201 0Oracle Corporation7 ; Copyright (C) 2006-2015 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 34 34 GLOBALNAME g_aTrap0eHandlers 35 35 36 ;; 37 ; Start the patch helper segment 38 BEGIN_PATCH_HLP_SEG 39 EXPORTEDNAME g_PatchHlpBegin 36 40 -
trunk/src/VBox/VMM/VMMRC/VMMRC99.asm
r44528 r54763 1 1 ; $Id$ 2 2 ;; @file 3 ; VMM GC99 - The last object module in the link.3 ; VMMRC99 - The last object module in the link. 4 4 ; 5 5 6 ; Copyright (C) 2006-201 0Oracle Corporation6 ; Copyright (C) 2006-2015 Oracle Corporation 7 7 ; 8 8 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 22 22 VMMR0_SEG Trap0b 23 23 GLOBALNAME g_aTrap0bHandlersEnd 24 dd 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 024 dd 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 25 25 26 26 … … 29 29 VMMR0_SEG Trap0d 30 30 GLOBALNAME g_aTrap0dHandlersEnd 31 dd 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 031 dd 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 32 32 33 33 … … 36 36 VMMR0_SEG Trap0e 37 37 GLOBALNAME g_aTrap0eHandlersEnd 38 dd 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 038 dd 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 39 39 40 41 ;; 42 ; End the patch helper segment 43 BEGIN_PATCH_HLP_SEG 44 EXPORTEDNAME g_PatchHlpEnd 45 dd 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 46 -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r54737 r54763 17 17 18 18 %include "VBox/asmdefs.mac" 19 20 ;; 21 ; CPU info 22 struc CPUMINFO 23 .cMsrRanges resd 1 ; uint32_t 24 .fMsrMask resd 1 ; uint32_t 25 .cCpuIdLeaves resd 1 ; uint32_t 26 .iFirstExtCpuIdLeaf resd 1 ; uint32_t 27 .uPadding resd 1 ; uint32_t 28 .enmUnknownCpuIdMethod resd 1 ; CPUMUNKNOWNCPUID 29 .DefCpuId resb CPUMCPUID_size ; CPUMCPUID 30 .uScalableBusFreq resq 1 ; uint64_t 31 .paMsrRangesR0 RTR0PTR_RES 1 ; R0PTRTYPE(PCPUMMSRRANGE) 32 .paCpuIdLeavesR0 RTR0PTR_RES 1 ; R0PTRTYPE(PCPUMCPUIDLEAF) 33 .paMsrRangesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMMSRRANGE) 34 .paCpuIdLeavesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMCPUIDLEAF) 35 .paMsrRangesRC RTRCPTR_RES 1 ; RCPTRTYPE(PCPUMMSRRANGE) 36 .paCpuIdLeavesRC RTRCPTR_RES 1 ; RCPTRTYPE(PCPUMCPUIDLEAF) 37 endstruc 19 38 20 39 -
trunk/src/VBox/VMM/include/PATMA.h
r54714 r54763 37 37 #define PATM_CPUID_STD_PTR 0xF1ABCD09 /**< Legacy, saved state only. */ 38 38 #define PATM_CPUID_EXT_PTR 0xF1ABCD0a /**< Legacy, saved state only. */ 39 #define PATM_CPUID_DEF_PTR 0xF1ABCD0b 39 #define PATM_CPUID_DEF_PTR 0xF1ABCD0b /**< Legacy, saved state only. */ 40 40 #define PATM_STACKBASE 0xF1ABCD0c /**< Stack to store our private patch return addresses */ 41 41 #define PATM_STACKBASE_GUEST 0xF1ABCD0d /**< Stack to store guest return addresses */ … … 53 53 #define PATM_CALL_RETURN_ADDR 0xF1ABCD19 54 54 #define PATM_CPUID_CENTAUR_PTR 0xF1ABCD1a /**< Legacy, saved state only. */ 55 #define PATM_CPUID_ARRAY_PTR 0xF1ABCD1b 56 #define PATM_CPUID_ARRAY_END_PTR 0xF1ABCD1c 57 #define PATM_CPUID_ARRAY_ENTRY_SIZE 0xF1ABCD1d 58 #define PATM_CPUID_UNKNOWN_METHOD 0xF1ABCD1e 55 #define PATM_ASMFIX_REUSE_LATER_0 0xF1ABCD1b 56 #define PATM_ASMFIX_REUSE_LATER_1 0xF1ABCD1c 57 #define PATM_ASMFIX_REUSE_LATER_2 0xF1ABCD1d 58 #define PATM_ASMFIX_REUSE_LATER_3 0xF1ABCD1e 59 #define PATM_ASMFIX_HELPER_CPUM_CPUID 0xF1ABCD1f 59 60 60 61 /* Anything larger doesn't require a fixup */ -
trunk/src/VBox/VMM/include/PATMInternal.h
r54761 r54763 32 32 /** @name Saved state version numbers. 33 33 * @{ */ 34 /** New concept of helper code (for CPUID). */ 35 #define PATM_SAVED_STATE_VERSION 58 34 36 /** New fixup type FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL. */ 35 #define PATM_SAVED_STATE_VERSION 37 #define PATM_SAVED_STATE_VERSION_FORGET_THIS_ONE 57 36 38 /** Uses normal structure serialization with markers and everything. */ 37 39 #define PATM_SAVED_STATE_VERSION_NO_RAW_MEM 56 … … 118 120 * like for FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL. */ 119 121 #define FIXUP_CONSTANT_IN_PATCH_ASM_TMPL 4 122 /** Relative call to a patch helper routine in VMMRC. The source and destination 123 * address are set like for FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL. */ 124 #define FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL 5 120 125 /** @} */ 121 126 … … 446 451 * Used only during PATMR3Relocate(). */ 447 452 int32_t deltaReloc; 453 454 /** The ring-3 address of the PatchHlp segment (for PATMReadPatchCode). */ 455 R3PTRTYPE(uint8_t *) pbPatchHelpersR3; 456 /** The raw-mode address of the PatchHlp segment. */ 457 RCPTRTYPE(uint8_t *) pbPatchHelpersRC; 458 /** Size of the PatchHlp segment containing the callable helper code. */ 459 uint32_t cbPatchHelpers; 460 448 461 /** GC PATM state pointer - HC pointer. */ 449 462 R3PTRTYPE(PPATMGCSTATE) pGCStateHC; 450 463 /** GC PATM state pointer - RC pointer. */ 451 464 RCPTRTYPE(PPATMGCSTATE) pGCStateGC; 465 452 466 /** PATM stack page for call instruction execution. 453 467 * 2 parts: one for our private stack and one to store the original return … … 458 472 /** GC pointer to CPUMCTX structure. */ 459 473 RCPTRTYPE(PCPUMCTX) pCPUMCtxGC; 474 460 475 /** GC statistics pointer. */ 461 476 RCPTRTYPE(PSTAMRATIOU32) pStatsGC; 462 477 /** HC statistics pointer. */ 463 478 R3PTRTYPE(PSTAMRATIOU32) pStatsHC; 464 /* Current free index value (uPatchRun/uPatchTrap arrays). */ 479 480 /** Current free index value (uPatchRun/uPatchTrap arrays). */ 465 481 uint32_t uCurrentPatchIdx; 466 /* Temporary counter for patch installation call depth. (in order not to go on forever) */482 /** Temporary counter for patch installation call depth. (in order not to go on forever) */ 467 483 uint32_t ulCallDepth; 468 484 /** Number of page lookup records. */
Note:
See TracChangeset
for help on using the changeset viewer.