Changeset 94838 in vbox for trunk/src/VBox
- Timestamp:
- May 5, 2022 10:29:49 AM (3 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r94800 r94838 605 605 * @param pVCpu The cross context virtual CPU structure of the calling 606 606 * thread. 607 * @note Currently not used. 607 * @note Currently not used. 608 608 */ 609 609 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu) … … 4138 4138 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 4139 4139 } 4140 4141 #ifdef IEM_WITH_SETJMP 4142 /** \#AC(0) - 11, longjmp. */ 4143 DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT 4144 { 4145 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu))); 4146 } 4147 #endif 4140 4148 4141 4149 … … 5911 5919 */ 5912 5920 /** @todo testcase: check when A and D bits are actually set by the CPU. */ 5913 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_P G_NO_ACCESSED;5921 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED; 5914 5922 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty) 5915 5923 { … … 6175 6183 && (fAccess & IEM_ACCESS_TYPE_WRITE) 6176 6184 && ( ( pVCpu->iem.s.uCpl == 3 6177 && !(fAccess & IEM_ACCESS_WHAT_SYS)) 6185 && !(fAccess & IEM_ACCESS_WHAT_SYS)) /** @todo check this. Not sure WP applies to all SYS writes... */ 6178 6186 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP))) 6179 6187 { … … 6205 6213 */ 6206 6214 /** @todo testcase: check when A and D bits are actually set by the CPU. */ 6207 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_P G_NO_ACCESSED;6215 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED; 6208 6216 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty) 6209 6217 { … … 6600 6608 uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT 6601 6609 { 6602 # if 0 //def IEM_WITH_DATA_TLB 6610 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 6611 /* 6612 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. 6613 */ 6603 6614 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem); 6604 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t))) 6605 { 6606 /// @todo more soon... 6607 } 6608 6615 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t))) 6616 { 6617 /* 6618 * TLB lookup. 6619 */ 6620 uint64_t uTag = ((GCPtrEff << 16) >> (X86_PAGE_SHIFT + 16)); 6621 Assert(!(uTag >> (48 - X86_PAGE_SHIFT))); 6622 uTag |= pVCpu->iem.s.DataTlb.uTlbRevision; 6623 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256); 6624 PIEMTLBENTRY const pTlbe = &pVCpu->iem.s.DataTlb.aEntries[(uint8_t)uTag]; 6625 if (pTlbe->uTag == uTag) 6626 { 6627 /* 6628 * Check TLB page table level access flags. 6629 */ 6630 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0; 6631 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ 6632 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser)) 6633 == pVCpu->iem.s.DataTlb.uTlbPhysRev) 6634 { 6635 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 6636 6637 /* 6638 * Alignment check: 6639 */ 6640 /** @todo check priority \#AC vs \#PF */ 6641 if ( !(GCPtrEff & (sizeof(uint32_t) - 1)) 6642 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM) 6643 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC 6644 || pVCpu->iem.s.uCpl != 3) 6645 { 6646 /* 6647 * Fetch and return the dword 6648 */ 6649 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 6650 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 6651 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 6652 } 6653 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff)); 6654 iemRaiseAlignmentCheckExceptionJmp(pVCpu); 6655 } 6656 } 6657 } 6658 6659 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 6660 outdated page pointer, or other troubles. */ 6661 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem)); 6609 6662 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem); 6663 6610 6664 # else 6611 6665 /* The lazy approach. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp
r94800 r94838 64 64 # define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { a_u64Disp = 0; RT_NOREF(a_offDisp); } while (0) 65 65 # define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { a_u64Disp = 0; RT_NOREF(a_offDisp); } while (0) 66 # if 166 # if 0 67 67 # error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary." 68 68 # endif -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r94801 r94838 232 232 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "NX" : " X", 233 233 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "RO" : "RW", 234 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_P G_NO_ACCESSED ? "-" : "A",234 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A", 235 235 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D", 236 236 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w", -
trunk/src/VBox/VMM/include/IEMInline.h
r94800 r94838 2339 2339 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 2340 2340 { 2341 if (iSegReg >= X86_SREG_FS )2341 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX) 2342 2342 { 2343 2343 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); … … 2348 2348 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1))) 2349 2349 return GCPtrMem; 2350 iemRaiseGeneralProtectionFault0Jmp(pVCpu); 2350 2351 } 2351 2352 /* 2352 2353 * 16-bit and 32-bit segmentation. 2353 2354 */ 2354 else 2355 { 2355 else if (iSegReg != UINT8_MAX) 2356 { 2357 /** @todo Does this apply to segments with 4G-1 limit? */ 2356 2358 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1; 2357 2359 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem)) … … 2359 2361 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 2360 2362 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg); 2361 switch (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN)) 2363 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE 2364 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */ 2365 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */ 2366 | X86_SEL_TYPE_CODE)) 2362 2367 { 2363 case X86DESCATTR_P: /* data, expand up */ 2368 case X86DESCATTR_P: /* readonly data, expand up */ 2369 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */ 2364 2370 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */ 2371 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */ 2365 2372 /* expand up */ 2366 2373 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit)) 2367 2374 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base; 2375 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n", 2376 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit)); 2368 2377 break; 2369 2378 2370 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* data, expand down */ 2379 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */ 2380 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */ 2371 2381 /* expand down */ 2372 2382 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit … … 2374 2384 || GCPtrLast32 <= UINT32_C(0xffff)) )) 2375 2385 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base; 2386 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n", 2387 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX)); 2376 2388 break; 2377 2389 2378 2390 default: 2391 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u)); 2379 2392 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R); 2380 2393 break; 2381 2394 } 2382 2395 } 2396 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32)); 2383 2397 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R); 2384 2398 } 2385 iemRaiseGeneralProtectionFault0Jmp(pVCpu); 2399 /* 2400 * 32-bit flat address. 2401 */ 2402 else 2403 return GCPtrMem; 2386 2404 } 2387 2405 -
trunk/src/VBox/VMM/include/IEMInternal.h
r94800 r94838 320 320 #define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */ 321 321 #define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */ 322 #define IEMTLBE_F_P G_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */322 #define IEMTLBE_F_PT_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */ 323 323 #define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */ 324 324 #define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(7) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */ … … 2547 2547 VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu); 2548 2548 VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu); 2549 #ifdef IEM_WITH_SETJMP 2550 DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT; 2551 #endif 2549 2552 2550 2553 IEM_CIMPL_DEF_0(iemCImplRaiseDivideError);
Note:
See TracChangeset
for help on using the changeset viewer.