VirtualBox

Changeset 107202 in vbox


Ignore:
Timestamp:
Nov 30, 2024 1:00:29 AM (7 weeks ago)
Author:
vboxsync
Message:

VMM/IEM: Completely relaxed the VM::fGlobalForcedActions access optimization in iemNativeRecompFunc_BltIn_CheckTimersAndIrqsCommon to deal with any kind of address space randomization the VMCPU and VM structures may be subjected to. jiraref:VBP-1466

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp

    r106465 r107202  
    280280           variable, so we can't OR it directly on x86.
    281281
    282            Note! We take a tiny liberty here and ASSUME that the VM and associated
    283                  VMCPU mappings are less than 2 GiB away from one another, so we
    284                  can access VM::fGlobalForcedActions via a 32-bit signed displacement.
    285 
    286                  This is _only_ a potential issue with VMs using the _support_ _driver_
    287                  for manging the structure, as it maps the individual bits separately
    288                  and the mapping order differs between host platforms.  Linux may
    289                  map the VM structure higher than the VMCPU ones, whereas windows may
    290                  do put the VM structure in the lowest address.  On all hosts there
    291                  is a chance that virtual memory fragmentation could cause the bits to
    292                  end up at a greater distance from one another, but it is rather
    293                  doubtful and we just ASSUME it won't happen for now...
    294 
    295                  When the VM structure is allocated in userland, there is one
    296                  allocation for it and all the associated VMCPU components, thus no
    297                  problems. */
     282           We try avoid loading the pVM address here by seeing if we can access
     283           VM::fGlobalForcedActions via 32-bit pVCpu displacement.  For
     284           driverless configs, this should go without problems (negative
     285           offset), but when using the driver the layout is randomized and we
     286           need to be flexible. */
    298287        AssertCompile(VM_FF_ALL_MASK == UINT32_MAX);
    299         intptr_t const offGlobalForcedActions = (intptr_t)&pReNative->pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions
    300                                               - (intptr_t)pReNative->pVCpu;
     288        uintptr_t const uAddrGlobalForcedActions = (uintptr_t)&pReNative->pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions;
     289        intptr_t const  offGlobalForcedActions   = (intptr_t)uAddrGlobalForcedActions - (intptr_t)pReNative->pVCpu;
     290
     291# ifdef RT_ARCH_AMD64
    301292        if (RT_LIKELY((int32_t)offGlobalForcedActions == offGlobalForcedActions))
    302         { /* likely */ }
     293        {
     294            if (idxTmpReg2 >= 8)
     295                pCodeBuf[off++] = X86_OP_REX_R;
     296            pCodeBuf[off++] = 0x8b; /* mov */
     297            off = iemNativeEmitGprByVCpuSignedDisp(pCodeBuf, off, idxTmpReg2, (int32_t)offGlobalForcedActions);
     298        }
    303299        else
    304300        {
    305             LogRelMax(16, ("!!WARNING!! offGlobalForcedActions=%#zx pVM=%p pVCpu=%p - CheckTimersAndIrqsCommon\n",
    306                            offGlobalForcedActions, pReNative->pVCpu->CTX_SUFF(pVM), pReNative->pVCpu));
    307 # ifdef IEM_WITH_THROW_CATCH
    308             AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
    309 # else
    310             AssertReleaseFailed();
    311 # endif
     301            off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, idxTmpReg2, uAddrGlobalForcedActions);
     302
     303            if (idxTmpReg2 >= 8)
     304                pCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B;
     305            pCodeBuf[off++] = 0x8b; /* mov */
     306            iemNativeEmitGprByGprDisp(pCodeBuf, off, idxTmpReg2, idxTmpReg2, 0);
    312307        }
    313 
    314 # ifdef RT_ARCH_AMD64
    315         if (idxTmpReg2 >= 8)
    316             pCodeBuf[off++] = X86_OP_REX_R;
    317         pCodeBuf[off++] = 0x8b; /* mov */
    318         off = iemNativeEmitGprByVCpuSignedDisp(pCodeBuf, off, idxTmpReg2, (int32_t)offGlobalForcedActions);
    319308
    320309        /* or reg1, reg2 */
     
    327316
    328317# elif defined(RT_ARCH_ARM64)
    329         Assert(offGlobalForcedActions < 0);
    330         off = iemNativeEmitGprBySignedVCpuLdStEx(pCodeBuf, off, idxTmpReg2, (int32_t)offGlobalForcedActions,
    331                                                  kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t));
     318        if (RT_LIKELY((int32_t)offGlobalForcedActions == offGlobalForcedActions))
     319        {
     320            if (offGlobalForcedActions < 0)
     321                off = iemNativeEmitGprBySignedVCpuLdStEx(pCodeBuf, off, idxTmpReg2, (int32_t)offGlobalForcedActions,
     322                                                         kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t));
     323            else
     324                off = iemNativeEmitGprByVCpuLdStEx(pCodeBuf, off, idxTmpReg2, (uint32_t)offGlobalForcedActions,
     325                                                   kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t));
     326        }
     327        else
     328        {
     329            off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, idxTmpReg2, uAddrGlobalForcedActions);
     330            pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Word, idxTmpReg2, idxTmpReg2, 0);
     331        }
     332
    332333        off = iemNativeEmitOrGprByGprEx(pCodeBuf, off, idxTmpReg1, idxTmpReg2);
    333334
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette