VirtualBox

Changeset 39038 in vbox


Ignore:
Timestamp:
Oct 19, 2011 2:36:27 PM (13 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
74458
Message:

VMM: -W4 warnings (MSC).

Location:
trunk/src/VBox/VMM
Files:
20 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r38816 r39038  
    855855        case MSR_IA32_THERM_STATUS:
    856856            /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
    857             *puValue = ( 1 << 31) /* validity bit */
    858                      | (20 << 16) /* degrees till TCC */;
     857            *puValue = RT_BIT(31)          /* validity bit */
     858                     | (UINT64_C(20) << 16) /* degrees till TCC */;
    859859            break;
    860860
     
    23412341         * This only seems to apply to AMD-V; in the VT-x case we *do* need to look
    23422342         * at SS. (ACP2 regression during install after a far call to ring 2)
    2343          * 
     2343         *
    23442344         * Seems it isn't necessiarly true for newer AMD-V CPUs even, we have
    23452345         * to move the VMCB.guest.u8CPL into Attr.n.u2Dpl to make this (and
  • trunk/src/VBox/VMM/VMMAll/EMAll.cpp

    r39034 r39038  
    630630        }
    631631    }
    632 #endif
    633632    return VERR_EM_INTERPRETER;
     633#endif
    634634}
    635635
     
    699699        }
    700700    }
    701 #endif
    702701    return VERR_EM_INTERPRETER;
     702#endif
    703703}
    704704
     
    779779        }
    780780    }
    781 #endif
    782781    return VERR_EM_INTERPRETER;
     782#endif
    783783}
    784784
     
    13271327#ifdef IN_RC
    13281328    }
    1329 #endif
    13301329    return VERR_EM_INTERPRETER;
     1330#endif
    13311331}
    13321332
     
    22092209
    22102210    AssertMsgFailedReturn(("Unexpected control register move\n"), VERR_EM_INTERPRETER);
    2211     return VERR_EM_INTERPRETER;
    22122211}
    22132212
     
    30663065#undef INTERPRET_CASE
    30673066    } /* switch (opcode) */
    3068     AssertFailed();
    3069     return VERR_INTERNAL_ERROR;
     3067    /* not reached */
    30703068}
    30713069
  • trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp

    r37955 r39038  
    7575    Assert(pVM->hwaccm.s.svm.fSupported);
    7676    return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
     77
     78#else
     79    hwaccmQueueInvlPage(pVCpu, GCVirt);
     80    return VINF_SUCCESS;
    7781#endif
    78 
    79     hwaccmQueueInvlPage(pVCpu, GCVirt);
    80     return VINF_SUCCESS;
    8182}
    8283
  • trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp

    r39034 r39038  
    5858static const unsigned g_aSize2Shift[] =
    5959{
    60     ~0,    /* 0 - invalid */
     60    ~0U,   /* 0 - invalid */
    6161    0,     /* *1 == 2^0 */
    6262    1,     /* *2 == 2^1 */
    63     ~0,    /* 3 - invalid */
     63    ~0U,   /* 3 - invalid */
    6464    2,     /* *4 == 2^2 */
    65     ~0,    /* 5 - invalid */
    66     ~0,    /* 6 - invalid */
    67     ~0,    /* 7 - invalid */
     65    ~0U,   /* 5 - invalid */
     66    ~0U,   /* 6 - invalid */
     67    ~0U,   /* 7 - invalid */
    6868    3      /* *8 == 2^3 */
    6969};
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r39034 r39038  
    17281728        default:
    17291729            AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
    1730             return ~0;
     1730            return NIL_RTHCPHYS;
    17311731    }
    17321732}
     
    17601760        default:
    17611761            AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
    1762             return ~0;
     1762            return NIL_RTHCPHYS;
    17631763    }
    17641764}
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r39034 r39038  
    2727 */
    2828
     29#ifdef _MSC_VER
     30/** @todo we're generating unnecessary code in nested/ept shadow mode and for
     31 *        real/prot-guest+RC mode. */
     32# pragma warning(disable: 4505)
     33#endif
    2934
    3035/*******************************************************************************
     
    3742static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage);
    3843static int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage);
    39 #if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     44# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    4045static void PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst);
    41 #else
     46# else
    4247static void PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, RTGCPHYS GCPhysPage, PPGMPOOLPAGE pShwPage, unsigned iPTDst);
    4348#endif
     
    24412446                if (SHW_PTE_IS_TRACK_DIRTY(*pPteDst))
    24422447                {
    2443                     PPGMPAGE pPage  = pgmPhysGetPage(pVM, GST_GET_PTE_GCPHYS(*pPteSrc));
     2448                    PPGMPAGE pPage  = pgmPhysGetPage(pVM, GST_GET_PTE_GCPHYS(PteSrc));
    24442449                    SHWPTE   PteDst = *pPteDst;
    24452450
     
    24472452                    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtyPageTrap));
    24482453
    2449                     Assert(pPteSrc->n.u1Write);
     2454                    Assert(PteSrc.n.u1Write);
    24502455
    24512456                    /* Note: No need to invalidate this entry on other VCPUs as a stale TLB
     
    24672472                                && PGM_PAGE_GET_TYPE(pPage)  == PGMPAGETYPE_RAM)
    24682473                            {
    2469                                 rc = pgmPhysPageMakeWritable(pVM, pPage, GST_GET_PTE_GCPHYS(*pPteSrc));
     2474                                rc = pgmPhysPageMakeWritable(pVM, pPage, GST_GET_PTE_GCPHYS(PteSrc));
    24702475                                AssertRC(rc);
    24712476                            }
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r39034 r39038  
    34903490        rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
    34913491        if (RT_SUCCESS(rc))
     3492        {
    34923493            rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
    3493         if (RT_SUCCESS(rc))
    3494         {
    3495             /** @todo we should check reserved bits ... */
    3496             AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
    3497             PGMPAGEMAPLOCK PgMpLck;
    3498             void const *pvSrc1;
    3499             rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
    3500             switch (rc)
     3494            if (RT_SUCCESS(rc))
    35013495            {
    3502                 case VINF_SUCCESS:
    3503                     memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
    3504                     PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
    3505                     break;
    3506                 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
    3507                     memset(pvDst, 0xff, cb1);
    3508                     break;
    3509                 default:
    3510                     Assert(RT_FAILURE_NP(rc));
    3511                     return rc;
     3496                /** @todo we should check reserved bits ... */
     3497                AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
     3498                PGMPAGEMAPLOCK PgMpLck;
     3499                void const *pvSrc1;
     3500                rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
     3501                switch (rc)
     3502                {
     3503                    case VINF_SUCCESS:
     3504                        memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
     3505                        PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
     3506                        break;
     3507                    case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
     3508                        memset(pvDst, 0xff, cb1);
     3509                        break;
     3510                    default:
     3511                        Assert(RT_FAILURE_NP(rc));
     3512                        return rc;
     3513                }
     3514
     3515                void const *pvSrc2;
     3516                rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
     3517                switch (rc)
     3518                {
     3519                    case VINF_SUCCESS:
     3520                        memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
     3521                        PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
     3522                        break;
     3523                    case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
     3524                        memset((uint8_t *)pvDst + cb1, 0xff, cb2);
     3525                        break;
     3526                    default:
     3527                        Assert(RT_FAILURE_NP(rc));
     3528                        return rc;
     3529                }
     3530
     3531                if (!(fFlags1 & X86_PTE_A))
     3532                {
     3533                    rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
     3534                    AssertRC(rc);
     3535                }
     3536                if (!(fFlags2 & X86_PTE_A))
     3537                {
     3538                    rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
     3539                    AssertRC(rc);
     3540                }
     3541                return VINF_SUCCESS;
    35123542            }
    3513 
    3514             void const *pvSrc2;
    3515             rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
    3516             switch (rc)
    3517             {
    3518                 case VINF_SUCCESS:
    3519                     memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
    3520                     PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
    3521                     break;
    3522                 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
    3523                     memset((uint8_t *)pvDst + cb1, 0xff, cb2);
    3524                     break;
    3525                 default:
    3526                     Assert(RT_FAILURE_NP(rc));
    3527                     return rc;
    3528             }
    3529 
    3530             if (!(fFlags1 & X86_PTE_A))
    3531             {
    3532                 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
    3533                 AssertRC(rc);
    3534             }
    3535             if (!(fFlags2 & X86_PTE_A))
    3536             {
    3537                 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
    3538                 AssertRC(rc);
    3539             }
    3540             return VINF_SUCCESS;
    35413543        }
    35423544    }
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r39034 r39038  
    31793179#endif
    31803180            AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d u32=%RX32 poolkind=%x\n", pPage->iFirstPresent, pPage->cPresent, u32, pPage->enmKind));
    3181             PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
     3181            /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);*/
    31823182            break;
    31833183        }
     
    32533253#endif
    32543254            AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d u64=%RX64 poolkind=%x iPte=%d PT=%RX64\n", pPage->iFirstPresent, pPage->cPresent, u64, pPage->enmKind, iPte, PGMSHWPTEPAE_GET_LOG(pPT->a[iPte])));
    3255             PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
     3255            /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);*/
    32563256            break;
    32573257        }
     
    32883288# endif
    32893289            AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
    3290             PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);
     3290            /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);*/
    32913291            break;
    32923292        }
     
    33213321# endif
    33223322            AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
    3323             PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);
     3323            /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);*/
    33243324            break;
    33253325        }
     
    33293329            AssertFatalMsgFailed(("enmKind=%d iShw=%d\n", pPage->enmKind, iShw));
    33303330    }
     3331
     3332    /* not reached. */
     3333#ifndef _MSC_VER
    33313334    return fRet;
     3335#endif
    33323336}
    33333337
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r37354 r39038  
    300300     || PGM_SHW_TYPE == PGM_TYPE_EPT
    301301        AssertFailed(); /* can't happen */
     302        pPT = NULL;     /* shut up MSC */
    302303# else
    303304        Assert(pgmMapAreMappingsEnabled(pVM));
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r37527 r39038  
    7373    PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
    7474    AssertFatalMsgFailed(("pGip=%p u32Magic=%#x\n", pGip, VALID_PTR(pGip) ? pGip->u32Magic : 0));
     75#ifndef _MSC_VER
    7576    return 0; /* gcc false positive warning */
     77#endif
    7678}
    7779
  • trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp

    r37955 r39038  
    187187    pVCpu->trpm.s.uActiveVector               = u8TrapNo;
    188188    pVCpu->trpm.s.enmActiveType               = enmType;
    189     pVCpu->trpm.s.uActiveErrorCode            = ~0;
     189    pVCpu->trpm.s.uActiveErrorCode            = ~(RTGCUINT)0;
    190190    pVCpu->trpm.s.uActiveCR2                  = 0xdeadface;
    191191    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/VMMR0/GMMR0.cpp

    r39034 r39038  
    629629 * @param   pGMM    The name of the pGMM variable.
    630630 */
    631 #if defined(VBOX_STRICT) && 0
     631#if defined(VBOX_STRICT) && defined(GMMR0_WITH_SANITY_CHECK) && 0
    632632# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM)   (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
    633633#else
     
    643643 * @param   pGMM    The name of the pGMM variable.
    644644 */
    645 #if defined(VBOX_STRICT) && 0
     645#if defined(VBOX_STRICT) && defined(GMMR0_WITH_SANITY_CHECK) && 0
    646646# define GMM_CHECK_SANITY_UPON_LEAVING(pGMM)    (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
    647647#else
     
    657657 * @param   pGMM    The name of the pGMM variable.
    658658 */
    659 #if defined(VBOX_STRICT) && 0
     659#if defined(VBOX_STRICT) && defined(GMMR0_WITH_SANITY_CHECK) && 0
    660660# define GMM_CHECK_SANITY_IN_LOOPS(pGMM)        (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
    661661#else
     
    672672DECLINLINE(void)            gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet);
    673673DECLINLINE(void)            gmmR0SelectSetAndLinkChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
     674#ifdef GMMR0_WITH_SANITY_CHECK
    674675static uint32_t             gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo);
     676#endif
    675677static bool                 gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem);
    676678DECLINLINE(void)            gmmR0FreePrivatePage(PGMM pGMM, PGVM pGVM, uint32_t idPage, PGMMPAGE pPage);
     
    16191621}
    16201622
     1623#ifdef GMMR0_WITH_SANITY_CHECK
    16211624
    16221625/**
     
    16791682}
    16801683
     1684#endif /* GMMR0_WITH_SANITY_CHECK */
    16811685
    16821686/**
  • trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp

    r37465 r39038  
    256256
    257257/** The GVMM::u32Magic value (Charlie Haden). */
    258 #define GVMM_MAGIC      0x19370806
     258#define GVMM_MAGIC      UINT32_C(0x19370806)
    259259
    260260
     
    12981298    ASMAtomicWriteNullPtr(&pHandle->pSession);
    12991299    ASMAtomicWriteHandle(&pHandle->hEMT0,        NIL_RTNATIVETHREAD);
    1300     ASMAtomicWriteSize(&pHandle->ProcId,         NIL_RTPROCESS);
     1300    ASMAtomicWriteU32(&pHandle->ProcId,          NIL_RTPROCESS);
    13011301
    13021302    gvmmR0UsedUnlock(pGVMM);
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r38816 r39038  
    964964    uint64_t    exitCode = (uint64_t)SVM_EXIT_INVALID;
    965965    SVM_VMCB   *pVMCB;
    966     bool        fSyncTPR = false;
    967966    unsigned    cResume = 0;
    968     uint8_t     u8LastTPR;
    969967    PHMGLOBLCPUINFO pCpu = 0;
    970968    RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0;
     
    10921090     * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB
    10931091     *       shootdowns rely on this.
    1094      */                                               
     1092     */
    10951093    uOldEFlags = ASMIntDisableFlags();
    10961094    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
     
    11111109    /* TPR caching using CR8 is only available in 64 bits mode or with 32 bits guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is supported. */
    11121110    /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!!!!! (no longer true)
    1113      * @todo query and update the TPR only when it could have been changed (mmio access)
    11141111     */
     1112    /** @todo query and update the TPR only when it could have been changed (mmio access)
     1113     */
     1114    bool    fSyncTPR  = false;
     1115    uint8_t u8LastTPR = 0; /* Initialized for potentially stupid compilers. */
    11151116    if (pVM->hwaccm.s.fHasIoApic)
    11161117    {
     1118        /* TPR caching in CR8 */
    11171119        bool fPending;
    1118 
    1119         /* TPR caching in CR8 */
    11201120        rc2 = PDMApicGetTPR(pVCpu, &u8LastTPR, &fPending);
    11211121        AssertRC(rc2);
     
    15871587        else
    15881588        {
    1589             if ((u8LastTPR >> 4) != pVMCB->ctrl.IntCtrl.n.u8VTPR)
     1589            if ((uint8_t)(u8LastTPR >> 4) != pVMCB->ctrl.IntCtrl.n.u8VTPR)
    15901590            {
    15911591                rc2 = PDMApicSetTPR(pVCpu, pVMCB->ctrl.IntCtrl.n.u8VTPR << 4);   /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r39034 r39038  
    7777static void hmR0VmxFlushVPID(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPTR GCPtr);
    7878static void hmR0VmxUpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    79 #ifdef VBOX_STRICT
    80 static bool hmR0VmxIsValidReadField(uint32_t idxField);
    81 static bool hmR0VmxIsValidWriteField(uint32_t idxField);
    82 #endif
    8379static void hmR0VmxSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite);
    8480
     
    46304626
    46314627#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     4628
    46324629/**
    46334630 * Prepares for and executes VMLAUNCH (64 bits guest mode)
     
    46974694}
    46984695
    4699 /**
    4700  * Executes the specified handler in 64 mode
    4701  *
    4702  * @returns VBox status code.
    4703  * @param   pVM         The VM to operate on.
    4704  * @param   pVCpu       The VMCPU to operate on.
    4705  * @param   pCtx        Guest context
    4706  * @param   pfnHandler  RC handler
    4707  * @param   cbParam     Number of parameters
    4708  * @param   paParam     Array of 32 bits parameters
    4709  */
    4710 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam)
    4711 {
    4712     int             rc, rc2;
    4713     PHMGLOBLCPUINFO pCpu;
    4714     RTHCPHYS        HCPhysCpuPage;
    4715     RTHCUINTREG     uOldEFlags;
    4716 
    4717     AssertReturn(pVM->hwaccm.s.pfnHost32ToGuest64R0, VERR_INTERNAL_ERROR);
    4718     Assert(pfnHandler);
    4719     Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField));
    4720     Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField));
    4721 
    4722 #ifdef VBOX_STRICT
    4723     for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries;i++)
    4724         Assert(hmR0VmxIsValidWriteField(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField[i]));
    4725 
    4726     for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries;i++)
    4727         Assert(hmR0VmxIsValidReadField(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField[i]));
    4728 #endif
    4729 
    4730     /* Disable interrupts. */
    4731     uOldEFlags = ASMIntDisableFlags();
    4732 
    4733     pCpu = HWACCMR0GetCurrentCpu();
    4734     HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    4735 
    4736     /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
    4737     VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
    4738 
    4739     /* Leave VMX Root Mode. */
    4740     VMXDisable();
    4741 
    4742     ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
    4743 
    4744     CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
    4745     CPUMSetHyperEIP(pVCpu, pfnHandler);
    4746     for (int i=(int)cbParam-1;i>=0;i--)
    4747         CPUMPushHyper(pVCpu, paParam[i]);
    4748 
    4749     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
    4750     /* Call switcher. */
    4751     rc = pVM->hwaccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
    4752     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
    4753 
    4754     /* Make sure the VMX instructions don't cause #UD faults. */
    4755     ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
    4756 
    4757     /* Enter VMX Root Mode */
    4758     rc2 = VMXEnable(HCPhysCpuPage);
    4759     if (RT_FAILURE(rc2))
    4760     {
    4761         ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
    4762         ASMSetFlags(uOldEFlags);
    4763         return VERR_VMX_VMXON_FAILED;
    4764     }
    4765 
    4766     rc2 = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
    4767     AssertRC(rc2);
    4768     Assert(!(ASMGetFlags() & X86_EFL_IF));
    4769     ASMSetFlags(uOldEFlags);
    4770     return rc;
    4771 }
    4772 
    4773 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
    4774 
    4775 
    4776 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    4777 /**
    4778  * Executes VMWRITE
    4779  *
    4780  * @returns VBox status code
    4781  * @param   pVCpu           The VMCPU to operate on.
    4782  * @param   idxField        VMCS index
    4783  * @param   u64Val          16, 32 or 64 bits value
    4784  */
    4785 VMMR0DECL(int) VMXWriteVMCS64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
    4786 {
    4787     int rc;
    4788 
    4789     switch (idxField)
    4790     {
    4791     case VMX_VMCS_CTRL_TSC_OFFSET_FULL:
    4792     case VMX_VMCS_CTRL_IO_BITMAP_A_FULL:
    4793     case VMX_VMCS_CTRL_IO_BITMAP_B_FULL:
    4794     case VMX_VMCS_CTRL_MSR_BITMAP_FULL:
    4795     case VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL:
    4796     case VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL:
    4797     case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL:
    4798     case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL:
    4799     case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL:
    4800     case VMX_VMCS_GUEST_LINK_PTR_FULL:
    4801     case VMX_VMCS_GUEST_PDPTR0_FULL:
    4802     case VMX_VMCS_GUEST_PDPTR1_FULL:
    4803     case VMX_VMCS_GUEST_PDPTR2_FULL:
    4804     case VMX_VMCS_GUEST_PDPTR3_FULL:
    4805     case VMX_VMCS_GUEST_DEBUGCTL_FULL:
    4806     case VMX_VMCS_GUEST_EFER_FULL:
    4807     case VMX_VMCS_CTRL_EPTP_FULL:
    4808         /* These fields consist of two parts, which are both writable in 32 bits mode. */
    4809         rc  = VMXWriteVMCS32(idxField, u64Val);
    4810         rc |= VMXWriteVMCS32(idxField + 1, (uint32_t)(u64Val >> 32ULL));
    4811         AssertRC(rc);
    4812         return rc;
    4813 
    4814     case VMX_VMCS64_GUEST_LDTR_BASE:
    4815     case VMX_VMCS64_GUEST_TR_BASE:
    4816     case VMX_VMCS64_GUEST_GDTR_BASE:
    4817     case VMX_VMCS64_GUEST_IDTR_BASE:
    4818     case VMX_VMCS64_GUEST_SYSENTER_EIP:
    4819     case VMX_VMCS64_GUEST_SYSENTER_ESP:
    4820     case VMX_VMCS64_GUEST_CR0:
    4821     case VMX_VMCS64_GUEST_CR4:
    4822     case VMX_VMCS64_GUEST_CR3:
    4823     case VMX_VMCS64_GUEST_DR7:
    4824     case VMX_VMCS64_GUEST_RIP:
    4825     case VMX_VMCS64_GUEST_RSP:
    4826     case VMX_VMCS64_GUEST_CS_BASE:
    4827     case VMX_VMCS64_GUEST_DS_BASE:
    4828     case VMX_VMCS64_GUEST_ES_BASE:
    4829     case VMX_VMCS64_GUEST_FS_BASE:
    4830     case VMX_VMCS64_GUEST_GS_BASE:
    4831     case VMX_VMCS64_GUEST_SS_BASE:
    4832         /* Queue a 64 bits value as we can't set it in 32 bits host mode. */
    4833         if (u64Val >> 32ULL)
    4834             rc = VMXWriteCachedVMCSEx(pVCpu, idxField, u64Val);
    4835         else
    4836             rc = VMXWriteVMCS32(idxField, (uint32_t)u64Val);
    4837 
    4838         return rc;
    4839 
    4840     default:
    4841         AssertMsgFailed(("Unexpected field %x\n", idxField));
    4842         return VERR_INVALID_PARAMETER;
    4843     }
    4844 }
    4845 
    4846 /**
    4847  * Cache VMCS writes for performance reasons (Darwin) and for running 64 bits guests on 32 bits hosts.
    4848  *
    4849  * @param   pVCpu       The VMCPU to operate on.
    4850  * @param   idxField    VMCS field
    4851  * @param   u64Val      Value
    4852  */
    4853 VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
    4854 {
    4855     PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
    4856 
    4857     AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1, ("entries=%x\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
    4858 
    4859     /* Make sure there are no duplicates. */
    4860     for (unsigned i=0;i<pCache->Write.cValidEntries;i++)
    4861     {
    4862         if (pCache->Write.aField[i] == idxField)
    4863         {
    4864             pCache->Write.aFieldVal[i] = u64Val;
    4865             return VINF_SUCCESS;
    4866         }
    4867     }
    4868 
    4869     pCache->Write.aField[pCache->Write.cValidEntries]    = idxField;
    4870     pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
    4871     pCache->Write.cValidEntries++;
    4872     return VINF_SUCCESS;
    4873 }
    4874 
    4875 #endif /* HC_ARCH_BITS == 32 && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    4876 
    4877 #ifdef VBOX_STRICT
     4696# ifdef VBOX_STRICT
     4697
    48784698static bool hmR0VmxIsValidReadField(uint32_t idxField)
    48794699{
     
    49714791}
    49724792
    4973 #endif
    4974 
     4793# endif /* VBOX_STRICT */
     4794
     4795/**
     4796 * Executes the specified handler in 64 mode
     4797 *
     4798 * @returns VBox status code.
     4799 * @param   pVM         The VM to operate on.
     4800 * @param   pVCpu       The VMCPU to operate on.
     4801 * @param   pCtx        Guest context
     4802 * @param   pfnHandler  RC handler
     4803 * @param   cbParam     Number of parameters
     4804 * @param   paParam     Array of 32 bits parameters
     4805 */
     4806VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam)
     4807{
     4808    int             rc, rc2;
     4809    PHMGLOBLCPUINFO pCpu;
     4810    RTHCPHYS        HCPhysCpuPage;
     4811    RTHCUINTREG     uOldEFlags;
     4812
     4813    AssertReturn(pVM->hwaccm.s.pfnHost32ToGuest64R0, VERR_INTERNAL_ERROR);
     4814    Assert(pfnHandler);
     4815    Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField));
     4816    Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField));
     4817
     4818#ifdef VBOX_STRICT
     4819    for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries;i++)
     4820        Assert(hmR0VmxIsValidWriteField(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField[i]));
     4821
     4822    for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries;i++)
     4823        Assert(hmR0VmxIsValidReadField(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField[i]));
     4824#endif
     4825
     4826    /* Disable interrupts. */
     4827    uOldEFlags = ASMIntDisableFlags();
     4828
     4829    pCpu = HWACCMR0GetCurrentCpu();
     4830    HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
     4831
     4832    /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
     4833    VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     4834
     4835    /* Leave VMX Root Mode. */
     4836    VMXDisable();
     4837
     4838    ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
     4839
     4840    CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
     4841    CPUMSetHyperEIP(pVCpu, pfnHandler);
     4842    for (int i=(int)cbParam-1;i>=0;i--)
     4843        CPUMPushHyper(pVCpu, paParam[i]);
     4844
     4845    STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     4846    /* Call switcher. */
     4847    rc = pVM->hwaccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
     4848    STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     4849
     4850    /* Make sure the VMX instructions don't cause #UD faults. */
     4851    ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
     4852
     4853    /* Enter VMX Root Mode */
     4854    rc2 = VMXEnable(HCPhysCpuPage);
     4855    if (RT_FAILURE(rc2))
     4856    {
     4857        ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
     4858        ASMSetFlags(uOldEFlags);
     4859        return VERR_VMX_VMXON_FAILED;
     4860    }
     4861
     4862    rc2 = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     4863    AssertRC(rc2);
     4864    Assert(!(ASMGetFlags() & X86_EFL_IF));
     4865    ASMSetFlags(uOldEFlags);
     4866    return rc;
     4867}
     4868
     4869#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
     4870
     4871
     4872#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     4873/**
     4874 * Executes VMWRITE
     4875 *
     4876 * @returns VBox status code
     4877 * @param   pVCpu           The VMCPU to operate on.
     4878 * @param   idxField        VMCS index
     4879 * @param   u64Val          16, 32 or 64 bits value
     4880 */
     4881VMMR0DECL(int) VMXWriteVMCS64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
     4882{
     4883    int rc;
     4884
     4885    switch (idxField)
     4886    {
     4887    case VMX_VMCS_CTRL_TSC_OFFSET_FULL:
     4888    case VMX_VMCS_CTRL_IO_BITMAP_A_FULL:
     4889    case VMX_VMCS_CTRL_IO_BITMAP_B_FULL:
     4890    case VMX_VMCS_CTRL_MSR_BITMAP_FULL:
     4891    case VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL:
     4892    case VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL:
     4893    case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL:
     4894    case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL:
     4895    case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL:
     4896    case VMX_VMCS_GUEST_LINK_PTR_FULL:
     4897    case VMX_VMCS_GUEST_PDPTR0_FULL:
     4898    case VMX_VMCS_GUEST_PDPTR1_FULL:
     4899    case VMX_VMCS_GUEST_PDPTR2_FULL:
     4900    case VMX_VMCS_GUEST_PDPTR3_FULL:
     4901    case VMX_VMCS_GUEST_DEBUGCTL_FULL:
     4902    case VMX_VMCS_GUEST_EFER_FULL:
     4903    case VMX_VMCS_CTRL_EPTP_FULL:
     4904        /* These fields consist of two parts, which are both writable in 32 bits mode. */
     4905        rc  = VMXWriteVMCS32(idxField, u64Val);
     4906        rc |= VMXWriteVMCS32(idxField + 1, (uint32_t)(u64Val >> 32ULL));
     4907        AssertRC(rc);
     4908        return rc;
     4909
     4910    case VMX_VMCS64_GUEST_LDTR_BASE:
     4911    case VMX_VMCS64_GUEST_TR_BASE:
     4912    case VMX_VMCS64_GUEST_GDTR_BASE:
     4913    case VMX_VMCS64_GUEST_IDTR_BASE:
     4914    case VMX_VMCS64_GUEST_SYSENTER_EIP:
     4915    case VMX_VMCS64_GUEST_SYSENTER_ESP:
     4916    case VMX_VMCS64_GUEST_CR0:
     4917    case VMX_VMCS64_GUEST_CR4:
     4918    case VMX_VMCS64_GUEST_CR3:
     4919    case VMX_VMCS64_GUEST_DR7:
     4920    case VMX_VMCS64_GUEST_RIP:
     4921    case VMX_VMCS64_GUEST_RSP:
     4922    case VMX_VMCS64_GUEST_CS_BASE:
     4923    case VMX_VMCS64_GUEST_DS_BASE:
     4924    case VMX_VMCS64_GUEST_ES_BASE:
     4925    case VMX_VMCS64_GUEST_FS_BASE:
     4926    case VMX_VMCS64_GUEST_GS_BASE:
     4927    case VMX_VMCS64_GUEST_SS_BASE:
     4928        /* Queue a 64 bits value as we can't set it in 32 bits host mode. */
     4929        if (u64Val >> 32ULL)
     4930            rc = VMXWriteCachedVMCSEx(pVCpu, idxField, u64Val);
     4931        else
     4932            rc = VMXWriteVMCS32(idxField, (uint32_t)u64Val);
     4933
     4934        return rc;
     4935
     4936    default:
     4937        AssertMsgFailed(("Unexpected field %x\n", idxField));
     4938        return VERR_INVALID_PARAMETER;
     4939    }
     4940}
     4941
     4942/**
     4943 * Cache VMCS writes for performance reasons (Darwin) and for running 64 bits guests on 32 bits hosts.
     4944 *
     4945 * @param   pVCpu       The VMCPU to operate on.
     4946 * @param   idxField    VMCS field
     4947 * @param   u64Val      Value
     4948 */
     4949VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
     4950{
     4951    PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
     4952
     4953    AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1, ("entries=%x\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
     4954
     4955    /* Make sure there are no duplicates. */
     4956    for (unsigned i=0;i<pCache->Write.cValidEntries;i++)
     4957    {
     4958        if (pCache->Write.aField[i] == idxField)
     4959        {
     4960            pCache->Write.aFieldVal[i] = u64Val;
     4961            return VINF_SUCCESS;
     4962        }
     4963    }
     4964
     4965    pCache->Write.aField[pCache->Write.cValidEntries]    = idxField;
     4966    pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
     4967    pCache->Write.cValidEntries++;
     4968    return VINF_SUCCESS;
     4969}
     4970
     4971#endif /* HC_ARCH_BITS == 32 && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
     4972
  • trunk/src/VBox/VMM/VMMR0/TRPMR0.cpp

    r35346 r39038  
    4444    PVMCPU pVCpu = VMMGetCpu0(pVM);
    4545    RTUINT uActiveVector = pVCpu->trpm.s.uActiveVector;
    46     pVCpu->trpm.s.uActiveVector = ~0;
     46    pVCpu->trpm.s.uActiveVector = UINT32_MAX;
    4747    AssertMsgReturnVoid(uActiveVector < 256, ("uActiveVector=%#x is invalid! (More assertions to come, please enjoy!)\n", uActiveVector));
    4848
  • trunk/src/VBox/VMM/VMMR3/PDMBlkCache.cpp

    r39034 r39038  
    361361                LogFlow(("Evicting entry %#p (%u bytes)\n", pCurr, pCurr->cbData));
    362362
    363                 if (fReuseBuffer && (pCurr->cbData == cbData))
     363                if (fReuseBuffer && pCurr->cbData == cbData)
    364364                {
    365365                    STAM_COUNTER_INC(&pCache->StatBuffersReused);
     
    382382
    383383                    /* We have to remove the last entries from the paged out list. */
    384                     while (   ((pGhostListDst->cbCached + pCurr->cbData) > pCache->cbRecentlyUsedOutMax)
     384                    while (   pGhostListDst->cbCached + pCurr->cbData > pCache->cbRecentlyUsedOutMax
    385385                           && pGhostEntFree)
    386386                    {
     
    825825
    826826        RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT);
    827         SSMR3PutU32(pSSM, strlen(pBlkCache->pszId));
     827        SSMR3PutU32(pSSM, (uint32_t)strlen(pBlkCache->pszId));
    828828        SSMR3PutStrZ(pSSM, pBlkCache->pszId);
    829829
     
    16101610                                               uint64_t off, size_t cbData, uint8_t *pbBuffer)
    16111611{
     1612    AssertReturn(cbData <= UINT32_MAX, NULL);
    16121613    PPDMBLKCACHEENTRY pEntryNew = (PPDMBLKCACHEENTRY)RTMemAllocZ(sizeof(PDMBLKCACHEENTRY));
    16131614
     
    16211622    pEntryNew->cRefs         = 1; /* We are using it now. */
    16221623    pEntryNew->pList         = NULL;
    1623     pEntryNew->cbData        = cbData;
     1624    pEntryNew->cbData        = (uint32_t)cbData;
    16241625    pEntryNew->pWaitingHead  = NULL;
    16251626    pEntryNew->pWaitingTail  = NULL;
     
    17401741
    17411742/**
    1742  * Calculate aligned offset and size for a new cache entry
    1743  * which do not intersect with an already existing entry and the
    1744  * file end.
     1743 * Calculate aligned offset and size for a new cache entry which do not
     1744 * intersect with an already existing entry and the file end.
    17451745 *
    17461746 * @returns The number of bytes the entry can hold of the requested amount
    17471747 *          of byte.
    1748  * @param   pEndpoint        The endpoint.
    1749  * @param   pBlkCache   The endpoint cache.
    1750  * @param   off              The start offset.
    1751  * @param   cb               The number of bytes the entry needs to hold at least.
    1752  * @param   uAlignment       Alignment of the boundary sizes.
    1753  * @param   poffAligned      Where to store the aligned offset.
    1754  * @param   pcbAligned       Where to store the aligned size of the entry.
    1755  */
    1756 static size_t pdmBlkCacheEntryBoundariesCalc(PPDMBLKCACHE pBlkCache,
    1757                                              uint64_t off, size_t cb,
    1758                                              unsigned uAlignment,
    1759                                              uint64_t *poffAligned, size_t *pcbAligned)
    1760 {
    1761     size_t cbAligned;
    1762     size_t cbInEntry = 0;
    1763     uint64_t offAligned;
     1748 * @param   pEndpoint       The endpoint.
     1749 * @param   pBlkCache       The endpoint cache.
     1750 * @param   off             The start offset.
     1751 * @param   cb              The number of bytes the entry needs to hold at
     1752 *                          least.
     1753 * @param   uAlignment      Alignment of the boundary sizes.
     1754 * @param   poffAligned     Where to store the aligned offset.
     1755 * @param   pcbAligned      Where to store the aligned size of the entry.
     1756 */
     1757static uint32_t pdmBlkCacheEntryBoundariesCalc(PPDMBLKCACHE pBlkCache,
     1758                                               uint64_t off, uint32_t cb,
     1759                                               unsigned uAlignment,
     1760                                               uint64_t *poffAligned, uint32_t *pcbAligned)
     1761{
     1762    /* Get the best fit entries around the offset */
    17641763    PPDMBLKCACHEENTRY pEntryAbove = NULL;
    1765 
    1766     /* Get the best fit entries around the offset */
    17671764    pdmBlkCacheGetCacheBestFitEntryByOffset(pBlkCache, off, &pEntryAbove);
    17681765
     
    17751772             pEntryAbove ? pEntryAbove->cbData : 0));
    17761773
    1777     offAligned = off;
    1778 
     1774    /** @todo r=bird: Why is uAlignment disregarded here? */
     1775    uint64_t offAligned = off;
     1776
     1777    uint32_t cbAligned;
     1778    uint32_t cbInEntry;
    17791779    if (    pEntryAbove
    17801780        &&  off + cb > pEntryAbove->Core.Key)
    17811781    {
    1782         cbInEntry = pEntryAbove->Core.Key - off;
    1783         cbAligned = pEntryAbove->Core.Key - offAligned;
     1782        cbInEntry = (uint32_t)(pEntryAbove->Core.Key - off);
     1783        cbAligned = (uint32_t)(pEntryAbove->Core.Key - offAligned);
    17841784    }
    17851785    else
    17861786    {
     1787        cbInEntry = cb;
    17871788        cbAligned = cb;
    1788         cbInEntry = cb;
    17891789    }
    17901790
    17911791    /* A few sanity checks */
    1792     AssertMsg(!pEntryAbove || (offAligned + cbAligned) <= pEntryAbove->Core.Key,
     1792    AssertMsg(!pEntryAbove || offAligned + cbAligned <= pEntryAbove->Core.Key,
    17931793              ("Aligned size intersects with another cache entry\n"));
    17941794    Assert(cbInEntry <= cbAligned);
     
    18241824                                                size_t *pcbData)
    18251825{
     1826    AssertReturn(cb <= UINT32_MAX, NULL);
     1827
    18261828    uint64_t offStart = 0;
    1827     size_t cbEntry = 0;
     1829    uint32_t cbEntry  = 0;
     1830    *pcbData = pdmBlkCacheEntryBoundariesCalc(pBlkCache, off, (uint32_t)cb, uAlignment,
     1831                                              &offStart, &cbEntry);
     1832
     1833    PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
     1834    pdmBlkCacheLockEnter(pCache);
     1835
    18281836    PPDMBLKCACHEENTRY pEntryNew = NULL;
    1829     PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache;
    1830     uint8_t *pbBuffer = NULL;
    1831 
    1832     *pcbData = pdmBlkCacheEntryBoundariesCalc(pBlkCache, off, cb, uAlignment,
    1833                                               &offStart, &cbEntry);
    1834 
    1835     pdmBlkCacheLockEnter(pCache);
     1837    uint8_t          *pbBuffer  = NULL;
    18361838    bool fEnough = pdmBlkCacheReclaim(pCache, cbEntry, true, &pbBuffer);
    1837 
    18381839    if (fEnough)
    18391840    {
  • trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp

    r37955 r39038  
    142142    if (    rc != VINF_EM_RAW_GUEST_TRAP
    143143        &&  rc != VINF_EM_RAW_RING_SWITCH_INT)
    144         pVCpu->trpm.s.uActiveVector = ~0;
     144        pVCpu->trpm.s.uActiveVector = UINT32_MAX;
    145145
    146146#ifdef VBOX_HIGH_RES_TIMERS_HACK
     
    694694             * starting from the instruction which caused the trap.
    695695             */
    696             pTrpmCpu->uActiveVector = ~0;
     696            pTrpmCpu->uActiveVector = UINT32_MAX;
    697697            Log6(("TRPMGC0b: %Rrc (%04x:%08x) (CG)\n", VINF_EM_RAW_RING_SWITCH, pRegFrame->cs, pRegFrame->eip));
    698698            PGMRZDynMapReleaseAutoSet(pVCpu);
     
    869869        case OP_BOUND:
    870870        case OP_INTO:
    871             pVCpu->trpm.s.uActiveVector = ~0;
     871            pVCpu->trpm.s.uActiveVector = UINT32_MAX;
    872872            return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_RING_SWITCH, pRegFrame);
    873873
  • trunk/src/VBox/VMM/include/PDMBlkCacheInternal.h

    r38885 r39038  
    6565    volatile uint32_t               cRefs;
    6666    /** Size of the entry. */
    67     size_t                          cbData;
     67    uint32_t                        cbData;
    6868    /** Pointer to the memory containing the data. */
    6969    uint8_t                        *pbData;
  • trunk/src/VBox/VMM/include/TRPMInternal.h

    r38867 r39038  
    205205
    206206    /** Active Interrupt or trap vector number.
    207      * If not ~0U this indicates that we're currently processing
    208      * a interrupt, trap, fault, abort, whatever which have arrived
    209      * at that vector number.
     207     * If not UINT32_MAX this indicates that we're currently processing a
     208     * interrupt, trap, fault, abort, whatever which have arrived at that
     209     * vector number.
    210210     */
    211     RTUINT                  uActiveVector;
     211    uint32_t                uActiveVector;
    212212
    213213    /** Active trap type. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette