VirtualBox

Changeset 80161 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Aug 6, 2019 6:10:51 PM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
132602
Message:

VMM,REM: Kicking out raw-mode. bugref:9517

Location:
trunk/src/VBox/VMM/VMMAll
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/AllPdbTypeHack.cpp

    r76553 r80161  
    5353#include "../include/NEMInternal.h"
    5454#include "../include/REMInternal.h"
    55 #ifndef IN_RC
    56 # include "../VMMR0/GMMR0Internal.h"
    57 # include "../VMMR0/GVMMR0Internal.h"
    58 #endif
    59 #ifdef VBOX_WITH_RAW_MODE
    60 # include "../include/CSAMInternal.h"
    61 # include "../include/PATMInternal.h"
    62 #endif
     55#include "../VMMR0/GMMR0Internal.h"
     56#include "../VMMR0/GVMMR0Internal.h"
    6357#include <VBox/vmm/vm.h>
    6458#ifdef IN_RING3
    6559# include <VBox/vmm/uvm.h>
    6660#endif
    67 #ifndef IN_RC
    68 # include <VBox/vmm/gvm.h>
    69 #endif
     61#include <VBox/vmm/gvm.h>
    7062
    7163
  • trunk/src/VBox/VMM/VMMAll/EMAll.cpp

    r80055 r80161  
    425425}
    426426
    427 #ifndef IN_RC
    428427
    429428/**
     
    740739}
    741740
    742 #endif /* !IN_RC */
    743741
    744742/**
     
    769767    pHistEntry->idxSlot       = UINT32_MAX;
    770768
    771 #ifndef IN_RC
    772769    /*
    773770     * If common exit type, we will insert/update the exit into the exit record hash table.
    774771     */
    775772    if (   (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
    776 # ifdef IN_RING0
     773#ifdef IN_RING0
    777774        && pVCpu->em.s.fExitOptimizationEnabledR0
    778775        && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
    779 # else
     776#else
    780777        && pVCpu->em.s.fExitOptimizationEnabled
    781 # endif
     778#endif
    782779        && uFlatPC != UINT64_MAX
    783780       )
    784781        return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
    785 #endif
    786782    return NULL;
    787783}
    788 
    789 
    790 #ifdef IN_RC
    791 /**
    792  * Special raw-mode interface for adding an exit to the history.
    793  *
    794  * Currently this is only for recording, not optimizing, so no return value.  If
    795  * we start seriously caring about raw-mode again, we may extend it.
    796  *
    797  * @param   pVCpu           The cross context virtual CPU structure.
    798  * @param   uFlagsAndType   Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
    799  * @param   uCs             The CS.
    800  * @param   uEip            The EIP.
    801  * @param   uTimestamp      The TSC value for the exit, 0 if not available.
    802  * @thread  EMT(0)
    803  */
    804 VMMRC_INT_DECL(void) EMRCHistoryAddExitCsEip(PVMCPU pVCpu, uint32_t uFlagsAndType, uint16_t uCs, uint32_t uEip, uint64_t uTimestamp)
    805 {
    806     AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
    807     PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)(pVCpu->em.s.iNextExit++) & 0xff];
    808     pHistEntry->uFlatPC       = ((uint64_t)uCs << 32) |  uEip;
    809     pHistEntry->uTimestamp    = uTimestamp;
    810     pHistEntry->uFlagsAndType = uFlagsAndType | EMEXIT_F_CS_EIP;
    811     pHistEntry->idxSlot       = UINT32_MAX;
    812 }
    813 #endif
    814784
    815785
     
    858828    pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC));
    859829
    860 #ifndef IN_RC
    861830    /*
    862831     * If common exit type, we will insert/update the exit into the exit record hash table.
    863832     */
    864833    if (   (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
    865 # ifdef IN_RING0
     834#ifdef IN_RING0
    866835        && pVCpu->em.s.fExitOptimizationEnabledR0
    867836        && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
    868 # else
     837#else
    869838        && pVCpu->em.s.fExitOptimizationEnabled
    870 # endif
     839#endif
    871840        && pHistEntry->uFlatPC != UINT64_MAX
    872841       )
    873842        return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo);
    874 #endif
    875843    return NULL;
    876844}
     
    903871    pHistEntry->uFlatPC       = uFlatPC;
    904872
    905 #ifndef IN_RC
    906873    /*
    907874     * If common exit type, we will insert/update the exit into the exit record hash table.
    908875     */
    909876    if (   (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
    910 # ifdef IN_RING0
     877#ifdef IN_RING0
    911878        && pVCpu->em.s.fExitOptimizationEnabledR0
    912879        && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
    913 # else
     880#else
    914881        && pVCpu->em.s.fExitOptimizationEnabled
    915 # endif
     882#endif
    916883       )
    917884        return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
    918 #endif
    919885    return NULL;
    920886}
     
    1027993        if (RT_FAILURE(rc))
    1028994        {
    1029 #ifndef IN_RC
    1030995            /*
    1031996             * If we fail to find the page via the guest's page tables
     
    10391004                    HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
    10401005            }
    1041 #endif
    10421006        }
    10431007    }
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r79118 r80161  
    3131
    3232
    33 #ifndef IN_RC
    3433
    3534/**
     
    120119}
    121120
    122 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     121#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    123122/**
    124123 * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g.
     
    171170    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    172171}
    173 # endif
     172#endif
    174173
    175174/**
     
    268267}
    269268
    270 #endif /* !IN_RC */
    271269
    272270
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r80034 r80161  
    880880VMM_INT_DECL(void) HMDumpHwvirtVmxState(PVMCPU pVCpu)
    881881{
    882 #ifndef IN_RC
    883882    /* The string width of -4 used in the macros below to cover 'LDTR', 'GDTR', 'IDTR. */
    884 # define HMVMX_DUMP_HOST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
     883#define HMVMX_DUMP_HOST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
    885884    do { \
    886885        LogRel(("  %s%-4s                       = {base=%016RX64}\n", \
    887886            (a_pszPrefix), (a_SegName), (a_pVmcs)->u64Host##a_Seg##Base.u)); \
    888887    } while (0)
    889 # define HMVMX_DUMP_HOST_FS_GS_TR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
     888#define HMVMX_DUMP_HOST_FS_GS_TR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
    890889    do { \
    891890        LogRel(("  %s%-4s                       = {%04x base=%016RX64}\n", \
    892891                (a_pszPrefix), (a_SegName), (a_pVmcs)->Host##a_Seg, (a_pVmcs)->u64Host##a_Seg##Base.u)); \
    893892    } while (0)
    894 # define HMVMX_DUMP_GUEST_SEGREG(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
     893#define HMVMX_DUMP_GUEST_SEGREG(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
    895894    do { \
    896895        LogRel(("  %s%-4s                       = {%04x base=%016RX64 limit=%08x flags=%04x}\n", \
     
    898897                (a_pVmcs)->u32Guest##a_Seg##Limit, (a_pVmcs)->u32Guest##a_Seg##Attr)); \
    899898    } while (0)
    900 # define HMVMX_DUMP_GUEST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
     899#define HMVMX_DUMP_GUEST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
    901900    do { \
    902901        LogRel(("  %s%-4s                       = {base=%016RX64 limit=%08x}\n", \
     
    11441143    }
    11451144
    1146 # undef HMVMX_DUMP_HOST_XDTR
    1147 # undef HMVMX_DUMP_HOST_FS_GS_TR
    1148 # undef HMVMX_DUMP_GUEST_SEGREG
    1149 # undef HMVMX_DUMP_GUEST_XDTR
    1150 #else
    1151     NOREF(pVCpu);
    1152 #endif /* !IN_RC */
     1145#undef HMVMX_DUMP_HOST_XDTR
     1146#undef HMVMX_DUMP_HOST_FS_GS_TR
     1147#undef HMVMX_DUMP_GUEST_SEGREG
     1148#undef HMVMX_DUMP_GUEST_XDTR
    11531149}
    11541150
     
    12901286
    12911287
    1292 #ifndef IN_RC
    1293 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1288#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    12941289/**
    12951290 * Notification callback for when a VM-exit happens outside VMX R0 code (e.g. in
     
    13641359}
    13651360
    1366 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    1367 #endif /* IN_RC */
    1368 
     1361#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
     1362
  • trunk/src/VBox/VMM/VMMAll/MMAll.cpp

    r76553 r80161  
    210210DECLINLINE(PMMLOOKUPHYPER) mmHyperLookupCC(PVM pVM, void *pv, uint32_t *poff)
    211211{
    212 #ifdef IN_RC
    213     return mmHyperLookupRC(pVM, (RTRCPTR)pv, poff);
    214 #elif defined(IN_RING0)
     212#ifdef IN_RING0
    215213    return mmHyperLookupR0(pVM, pv, poff);
     214#elif defined(IN_RING3)
     215    return mmHyperLookupR3(pVM, pv, poff);
    216216#else
    217     return mmHyperLookupR3(pVM, pv, poff);
     217# error "Neither IN_RING0 nor IN_RING3!"
    218218#endif
    219219}
     
    301301DECLINLINE(void *) mmHyperLookupCalcCC(PVM pVM, PMMLOOKUPHYPER pLookup, uint32_t off)
    302302{
    303 #ifdef IN_RC
    304     return (void *)mmHyperLookupCalcRC(pVM, pLookup, off);
    305 #elif defined(IN_RING0)
     303#ifdef IN_RING0
    306304    return mmHyperLookupCalcR0(pVM, pLookup, off);
    307 #else
     305#elif defined(IN_RING3)
    308306    NOREF(pVM);
    309307    return mmHyperLookupCalcR3(pLookup, off);
     308#else
     309# error "Neither IN_RING0 nor IN_RING3!"
    310310#endif
    311311}
     
    469469}
    470470
    471 #ifndef IN_RC
     471
    472472/**
    473473 * Converts a raw-mode context address in the Hypervisor memory region to a current context address.
     
    487487    return NULL;
    488488}
    489 #endif
     489
    490490
    491491#ifndef IN_RING3
     
    530530
    531531
    532 #ifndef IN_RC
    533532/**
    534533 * Converts a current context address in the Hypervisor memory region to a raw-mode context address.
     
    548547    return NIL_RTRCPTR;
    549548}
    550 #endif
    551549
    552550
  • trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp

    r76553 r80161  
    167167#endif
    168168    int rc = PDMCritSectEnter(&pHeap->Lock, VERR_SEM_BUSY);
    169 #if defined(IN_RC) || defined(IN_RING0)
     169#ifdef IN_RING0
    170170    if (rc == VERR_SEM_BUSY)
    171171        rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_MMHYPER_LOCK, 0);
  • trunk/src/VBox/VMM/VMMAll/MMAllPagePool.cpp

    r76553 r80161  
    4040
    4141
    42 #if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(IN_RC)
     42#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4343
    4444/**
     
    5454void *mmPagePoolPhys2Ptr(PMMPAGEPOOL pPool, RTHCPHYS HCPhys)
    5555{
    56 #if 0 /** @todo have to fix the debugger, but until then this is going on my nerves. */
    57 #ifdef IN_RING3
     56# if 0 /** @todo have to fix the debugger, but until then this is going on my nerves. */
     57#  ifdef IN_RING3
    5858    VM_ASSERT_EMT(pPool->pVM);
    59 #endif
    60 #endif
     59#  endif
     60# endif
    6161
    6262    /*
  • trunk/src/VBox/VMM/VMMAll/REMAll.cpp

    r76553 r80161  
    199199#endif /* !IN_RING3 */
    200200
    201 #ifdef IN_RC
    202 /**
    203  * Flushes the physical handler notifications if the queue is almost full.
    204  *
    205  * This is for avoiding trouble in RC when changing CR3.
    206  *
    207  * @param   pVM         The cross context VM structure.
    208  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    209  */
    210 VMMDECL(void) REMNotifyHandlerPhysicalFlushIfAlmostFull(PVM pVM, PVMCPU pVCpu)
    211 {
    212     Assert(pVM->cCpus == 1); NOREF(pVCpu);
    213 
    214     /*
    215      * Less than 48 items means we should flush.
    216      */
    217     uint32_t cFree = 0;
    218     for (uint32_t idx = pVM->rem.s.idxFreeList;
    219          idx != UINT32_MAX;
    220          idx = pVM->rem.s.aHandlerNotifications[idx].idxNext)
    221     {
    222         Assert(idx < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
    223         if (++cFree >= 48)
    224             return;
    225     }
    226     AssertRelease(VM_FF_IS_SET(pVM, VM_FF_REM_HANDLER_NOTIFY));
    227     AssertRelease(pVM->rem.s.idxPendingList != UINT32_MAX);
    228 
    229     /* Ok, we gotta flush them. */
    230     VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS, 0);
    231 
    232     AssertRelease(pVM->rem.s.idxPendingList == UINT32_MAX);
    233     AssertRelease(pVM->rem.s.idxFreeList != UINT32_MAX);
    234 }
    235 #endif /* IN_RC */
    236 
    237201
    238202/**
  • trunk/src/VBox/VMM/VMMAll/SELMAll.cpp

    r80055 r80161  
    3838
    3939
    40 /*********************************************************************************************************************************
    41 *   Global Variables                                                                                                             *
    42 *********************************************************************************************************************************/
    43 #if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
    44 /** Segment register names. */
    45 static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
    46 #endif
    47 
    4840
    4941/**
     
    8072    }
    8173
    82 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
    83     /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
    84     if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
    85         CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
    86     if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
    87         CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
    88 #else
    8974    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
    9075    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
    91 #endif
    9276
    9377    /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
     
    153137    }
    154138
    155 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
    156     if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
    157         CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
    158     if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
    159         CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
    160 #else
    161139    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
    162140    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
    163 #endif
    164141
    165142    /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
  • trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp

    r76553 r80161  
    142142                case TMTSCMODE_NATIVE_API:
    143143                {
    144 #ifndef IN_RC
    145144                    int rc = NEMHCResumeCpuTickOnAll(pVM, pVCpu, pVM->tm.s.u64LastPausedTSC);
    146145                    AssertRCReturn(rc, rc);
    147146                    pVCpu->tm.s.offTSCRawSrc = offTSCRawSrcOld = 0;
    148 #else
    149                     AssertFailedReturn(VERR_INTERNAL_ERROR_2);
    150 #endif
    151147                    break;
    152148                }
     
    454450                u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
    455451                break;
    456 #ifndef IN_RC
    457452            case TMTSCMODE_NATIVE_API:
    458453            {
     
    462457                break;
    463458            }
    464 #endif
    465459            default:
    466460                AssertFailedBreakStmt(u64 = SUPReadTsc());
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r76553 r80161  
    9090        case SUPGIPMODE_SYNC_TSC:
    9191        case SUPGIPMODE_INVARIANT_TSC:
    92 #if defined(IN_RC) || defined(IN_RING0)
     92#ifdef IN_RING0
    9393            if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
    9494                pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta    : RTTimeNanoTSLegacySyncInvarNoDelta;
     
    112112
    113113        case SUPGIPMODE_ASYNC_TSC:
    114 #if defined(IN_RC) || defined(IN_RING0)
     114#ifdef IN_RING0
    115115            pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
    116116#else
  • trunk/src/VBox/VMM/VMMAll/VMAll.cpp

    r76553 r80161  
    3030#include <iprt/assert.h>
    3131#include <iprt/string.h>
    32 #ifndef IN_RC
    33 # include <iprt/thread.h>
    34 #endif
     32#include <iprt/thread.h>
    3533
    3634
  • trunk/src/VBox/VMM/VMMAll/VMMAll.cpp

    r80003 r80161  
    163163
    164164
    165 #ifndef IN_RC
    166165/**
    167166 * Counterpart to vmmInitFormatTypes, called by VMMR3Term and VMMR0Term.
     
    172171        RTStrFormatTypeDeregister("vmcpuset");
    173172}
    174 #endif
    175173
    176174
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette