Changeset 42193 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 17, 2012 2:34:30 PM (13 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r42188 r42193 78 78 79 79 80 #if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0) 81 80 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 82 81 83 82 /** … … 156 155 } 157 156 158 #endif 157 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 159 158 160 159 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r42165 r42193 77 77 #define LOG_GROUP LOG_GROUP_IEM 78 78 #include <VBox/vmm/iem.h> 79 #include <VBox/vmm/cpum.h> 79 80 #include <VBox/vmm/pgm.h> 80 81 #include <internal/pgm.h> … … 83 84 #include <VBox/vmm/tm.h> 84 85 #include <VBox/vmm/dbgf.h> 86 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 87 # include <VBox/vmm/patm.h> 88 #endif 85 89 #ifdef IEM_VERIFICATION_MODE 86 90 # include <VBox/vmm/rem.h> … … 629 633 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 630 634 635 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 631 636 CPUMGuestLazyLoadHiddenCsAndSs(IEMCPU_TO_VMCPU(pIemCpu)); 637 #endif 632 638 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu)); 633 639 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx) … … 697 703 if (RT_FAILURE(rc)) 698 704 { 705 #if defined(IN_RC) && defined(VBOX_WITH_RAW_MODE) 706 /* Allow interpretation of patch manager code blocks since they can for 707 instance throw #PFs for perfectly good reasons. */ 708 if ( (pCtx->cs.Sel & X86_SEL_RPL) == 1 709 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), GCPtrPC)) 710 { 711 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK); 712 if (cbToTryRead > cbLeftOnPage) 713 cbToTryRead = cbLeftOnPage; 714 if (cbToTryRead > sizeof(pIemCpu->abOpcode)) 715 cbToTryRead = sizeof(pIemCpu->abOpcode); 716 memcpy(pIemCpu->abOpcode, (void const *)(uintptr_t)GCPtrPC, cbToTryRead); 717 pIemCpu->cbOpcode = cbToTryRead; 718 return VINF_SUCCESS; 719 } 720 #endif 699 721 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc)); 700 722 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc); … … 2644 2666 * @param iSegReg The segment register. 2645 2667 */ 2646 static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg) 2647 { 2648 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 2668 static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg) 2669 { 2670 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 2671 PCPUMSELREG pSReg; 2649 2672 switch (iSegReg) 2650 2673 { 2651 case X86_SREG_ES: return &pCtx->es; 2652 case X86_SREG_CS: return &pCtx->cs; 2653 case X86_SREG_SS: return &pCtx->ss; 2654 case X86_SREG_DS: return &pCtx->ds; 2655 case X86_SREG_FS: return &pCtx->fs; 2656 case X86_SREG_GS: return &pCtx->gs; 2657 } 2658 AssertFailedReturn(NULL); 2674 case X86_SREG_ES: pSReg = &pCtx->es; break; 2675 case X86_SREG_CS: pSReg = &pCtx->cs; break; 2676 case X86_SREG_SS: pSReg = &pCtx->ss; break; 2677 case X86_SREG_DS: pSReg = &pCtx->ds; break; 2678 case X86_SREG_FS: pSReg = &pCtx->fs; break; 2679 case X86_SREG_GS: pSReg = &pCtx->gs; break; 2680 default: 2681 AssertFailedReturn(NULL); 2682 } 2683 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 2684 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg)) 2685 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg); 2686 #else 2687 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg)); 2688 #endif 2689 return pSReg; 2659 2690 } 2660 2691 … … 4408 4439 * @param fAccess The intended access. 4409 4440 * @param ppvMem Where to return the mapping address. 4410 */ 4411 static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem) 4441 * @param pLock The PGM lock. 4442 */ 4443 static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock) 4412 4444 { 4413 4445 #ifdef IEM_VERIFICATION_MODE … … 4423 4455 /** @todo need some better API. */ 4424 4456 #ifdef IN_RING3 4457 RT_ZERO(*pLock); 4425 4458 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), 4426 4459 GCPhysMem, … … 4428 4461 ppvMem); 4429 4462 #else 4430 //# error "Implement me"4431 4463 if (fAccess & IEM_ACCESS_TYPE_WRITE) 4432 4464 return PGMPhysGCPhys2CCPtr(IEMCPU_TO_VM(pIemCpu), 4433 4465 GCPhysMem, 4434 4466 ppvMem, 4435 /** @todo pLock */ NULL);4467 pLock); 4436 4468 return PGMPhysGCPhys2CCPtrReadOnly(IEMCPU_TO_VM(pIemCpu), 4437 4469 GCPhysMem, 4438 4470 (void const **)ppvMem, 4439 /** @todo pLock */ NULL);4471 pLock); 4440 4472 #endif 4441 4473 } … … 4451 4483 * @param fAccess The intended access. 4452 4484 * @param pvMem What iemMemPageMap returned. 4453 */ 4454 DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem) 4485 * @param pLock The PGM lock. 4486 */ 4487 DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock) 4455 4488 { 4456 4489 NOREF(pIemCpu); … … 4458 4491 NOREF(fAccess); 4459 4492 NOREF(pvMem); 4493 #ifndef IN_RING3 4494 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock); 4495 #endif 4460 4496 } 4461 4497 … … 4857 4893 4858 4894 void *pvMem; 4859 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem );4895 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock); 4860 4896 if (rcStrict != VINF_SUCCESS) 4861 4897 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict); … … 4896 4932 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE)) 4897 4933 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap); 4934 4935 #ifndef IN_RING3 4936 /* Unlock it. */ 4937 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock); 4938 #endif 4898 4939 4899 4940 /* Free the entry. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r41906 r42193 130 130 * until the end of the current page. 131 131 */ 132 PGMPAGEMAPLOCK PgLockSrc2Mem; 132 133 OP_TYPE const *puSrc2Mem; 133 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem );134 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem); 134 135 if (rcStrict == VINF_SUCCESS) 135 136 { 137 PGMPAGEMAPLOCK PgLockSrc1Mem; 136 138 OP_TYPE const *puSrc1Mem; 137 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem );139 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem); 138 140 if (rcStrict == VINF_SUCCESS) 139 141 { … … 168 170 pCtx->eflags.u = uEFlags; 169 171 170 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem );171 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem );172 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem); 173 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem); 172 174 continue; 173 175 } 174 176 } 175 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem );177 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem); 176 178 } 177 179 … … 288 290 */ 289 291 OP_TYPE const *puSrc2Mem; 290 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem); 292 PGMPAGEMAPLOCK PgLockSrc2Mem; 293 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem); 291 294 if (rcStrict == VINF_SUCCESS) 292 295 { 293 296 OP_TYPE const *puSrc1Mem; 294 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem); 297 PGMPAGEMAPLOCK PgLockSrc1Mem; 298 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem); 295 299 if (rcStrict == VINF_SUCCESS) 296 300 { … … 325 329 pCtx->eflags.u = uEFlags; 326 330 327 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem );328 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem );331 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem); 332 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem); 329 333 continue; 330 334 } 331 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem );335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem); 332 336 } 333 337 } … … 427 431 * until the end of the current page. 428 432 */ 433 PGMPAGEMAPLOCK PgLockMem; 429 434 OP_TYPE const *puMem; 430 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem );435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem); 431 436 if (rcStrict == VINF_SUCCESS) 432 437 { … … 447 452 pCtx->eflags.u = uEFlags; 448 453 Assert(!(uEFlags & X86_EFL_ZF) == (i < cLeftPage)); 449 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem );454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); 450 455 if (fQuit) 451 456 break; … … 551 556 * until the end of the current page. 552 557 */ 558 PGMPAGEMAPLOCK PgLockMem; 553 559 OP_TYPE const *puMem; 554 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem );560 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem); 555 561 if (rcStrict == VINF_SUCCESS) 556 562 { … … 571 577 pCtx->eflags.u = uEFlags; 572 578 Assert((!(uEFlags & X86_EFL_ZF) != (i < cLeftPage)) || (i == cLeftPage)); 573 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem );579 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); 574 580 if (fQuit) 575 581 break; … … 693 699 * until the end of the current page. 694 700 */ 701 PGMPAGEMAPLOCK PgLockDstMem; 695 702 OP_TYPE *puDstMem; 696 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem );703 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem); 697 704 if (rcStrict == VINF_SUCCESS) 698 705 { 706 PGMPAGEMAPLOCK PgLockSrcMem; 699 707 OP_TYPE const *puSrcMem; 700 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem );708 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem); 701 709 if (rcStrict == VINF_SUCCESS) 702 710 { … … 709 717 pCtx->ADDR_rCX = uCounterReg -= cLeftPage; 710 718 711 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem );712 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem );719 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem); 720 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem); 713 721 continue; 714 722 } 715 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem );723 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem); 716 724 } 717 725 } … … 805 813 * until the end of the current page. 806 814 */ 815 PGMPAGEMAPLOCK PgLockMem; 807 816 OP_TYPE *puMem; 808 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem );817 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem); 809 818 if (rcStrict == VINF_SUCCESS) 810 819 { … … 823 832 #endif 824 833 825 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem );834 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem); 826 835 827 836 /* If unaligned, we drop thru and do the page crossing access … … 917 926 * just reading the last value on the page. 918 927 */ 928 PGMPAGEMAPLOCK PgLockMem; 919 929 OP_TYPE const *puMem; 920 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem );930 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem); 921 931 if (rcStrict == VINF_SUCCESS) 922 932 { … … 929 939 pCtx->ADDR_rCX = uCounterReg -= cLeftPage; 930 940 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr; 931 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem );941 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); 932 942 933 943 /* If unaligned, we drop thru and do the page crossing access … … 1093 1103 * mapped buffers instead of leaving those bits to the 1094 1104 * device implementation? */ 1105 PGMPAGEMAPLOCK PgLockMem; 1095 1106 OP_TYPE *puMem; 1096 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem );1107 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem); 1097 1108 if (rcStrict == VINF_SUCCESS) 1098 1109 { … … 1116 1127 if (uCounterReg == 0) 1117 1128 iemRegAddToRip(pIemCpu, cbInstr); 1118 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem );1129 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem); 1119 1130 return rcStrict; 1120 1131 } 1121 1132 off++; 1122 1133 } 1123 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem );1134 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem); 1124 1135 1125 1136 /* If unaligned, we drop thru and do the page crossing access … … 1293 1304 * mapped buffers instead of leaving those bits to the 1294 1305 * device implementation? */ 1306 PGMPAGEMAPLOCK PgLockMem; 1295 1307 OP_TYPE const *puMem; 1296 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem );1308 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem); 1297 1309 if (rcStrict == VINF_SUCCESS) 1298 1310 { … … 1315 1327 if (uCounterReg == 0) 1316 1328 iemRegAddToRip(pIemCpu, cbInstr); 1317 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem );1329 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); 1318 1330 return rcStrict; 1319 1331 } 1320 1332 off++; 1321 1333 } 1322 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem );1334 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); 1323 1335 1324 1336 /* If unaligned, we drop thru and do the page crossing access -
trunk/src/VBox/VMM/include/IEMInternal.h
r41829 r42193 310 310 #endif 311 311 } aMemMappings[3]; 312 313 /** Locking records for the mapped memory. */ 314 union 315 { 316 PGMPAGEMAPLOCK Lock; 317 uint64_t au64Padding[2]; 318 } aMemMappingLocks[3]; 312 319 313 320 /** Bounce buffer info.
Note:
See TracChangeset
for help on using the changeset viewer.