VirtualBox

Changeset 95410 in vbox for trunk/src/VBox/VMM/include


Ignore:
Timestamp:
Jun 28, 2022 6:33:26 PM (3 years ago)
Author:
vboxsync
Message:

VMM/IEM: Alignment checks (#AC(0)/#GP(0)). bugref:9898

Location:
trunk/src/VBox/VMM/include
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/include/IEMInline.h

    r94838 r95410  
    21982198 */
    21992199
     2200
     2201/**
     2202 * Checks whether alignment checks are enabled or not.
     2203 *
     2204 * @returns true if enabled, false if not.
     2205 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2206 */
     2207DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu)
     2208{
     2209    AssertCompile(X86_CR0_AM == X86_EFL_AC);
     2210    return pVCpu->iem.s.uCpl == 3
     2211        && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
     2212}
     2213
    22002214/**
    22012215 * Checks if the given segment can be written to, raise the appropriate
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r95403 r95410  
    28112811/** @name   Memory access.
    28122812 * @{ */
    2813 VBOXSTRICTRC    iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT;
     2813
     2814/** Report a \#GP instead of \#AC and do not restrict to ring-3 */
     2815#define IEM_MEMMAP_F_ALIGN_GP       RT_BIT_32(16)
     2816/** SSE access that should report a \#GP instead of \#AC, unless MXCSR.MM=1
     2817 *  when it works like normal \#AC. Always used with IEM_MEMMAP_F_ALIGN_GP. */
     2818#define IEM_MEMMAP_F_ALIGN_SSE      RT_BIT_32(17)
     2819/** If \#AC is applicable, raise it. Always used with IEM_MEMMAP_F_ALIGN_GP.
     2820 * Users include FXSAVE & FXRSTOR. */
     2821#define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18)
     2822
     2823VBOXSTRICTRC    iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
     2824                          uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
    28142825VBOXSTRICTRC    iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
    28152826#ifndef IN_RING3
     
    28992910#endif
    29002911
    2901 VBOXSTRICTRC    iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
     2912VBOXSTRICTRC    iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
     2913                                            void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
    29022914VBOXSTRICTRC    iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT;
    29032915VBOXSTRICTRC    iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
     
    29082920VBOXSTRICTRC    iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
    29092921VBOXSTRICTRC    iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
    2910 VBOXSTRICTRC    iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
     2922VBOXSTRICTRC    iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
     2923                                           void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
    29112924VBOXSTRICTRC    iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
    29122925VBOXSTRICTRC    iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT;
  • trunk/src/VBox/VMM/include/IEMMc.h

    r95403 r95410  
    883883 */
    884884#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
    885     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
     885    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), \
     886                                       (a_GCPtrMem), (a_fAccess), sizeof(*(a_pMem)) - 1))
    886887
    887888/** Maps guest memory for direct or bounce buffered access.
     
    890891 */
    891892#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
    892     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
     893    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), \
     894                                       (a_GCPtrMem), (a_fAccess), (a_cbMem) - 1))
    893895
    894896/** Commits the memory and unmaps the guest memory.
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette