VirtualBox

Changeset 80050 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Jul 29, 2019 8:04:35 PM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
132487
Message:

Main: Kicking out raw-mode - CPUM*Hyper*(). bugref:9517

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r80024 r80050  
    7171 * Lazily loads the hidden parts of a selector register when using raw-mode.
    7272 */
    73 #if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
    74 # define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
    75     do \
    76     { \
    77         if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
    78             cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
    79     } while (0)
    80 #else
    81 # define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
    82     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
    83 #endif
     73#define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
     74    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg))
    8475
    8576/** @def CPUM_INT_ASSERT_NOT_EXTRN
     
    9283    AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
    9384              ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
    94 
    95 
    96 
    97 
    98 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
    99 
    100 /**
    101  * Does the lazy hidden selector register loading.
    102  *
    103  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    104  * @param   pSReg       The selector register to lazily load hidden parts of.
    105  */
    106 static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
    107 {
    108     Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
    109     Assert(VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)));
    110     Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
    111 
    112     if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
    113     {
    114         /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
    115         pSReg->Attr.u               = 0;
    116         pSReg->Attr.n.u4Type        = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
    117         pSReg->Attr.n.u1DescType    = 1; /* code/data segment */
    118         pSReg->Attr.n.u2Dpl         = 3;
    119         pSReg->Attr.n.u1Present     = 1;
    120         pSReg->u32Limit             = 0x0000ffff;
    121         pSReg->u64Base              = (uint32_t)pSReg->Sel << 4;
    122         pSReg->ValidSel             = pSReg->Sel;
    123         pSReg->fFlags               = CPUMSELREG_FLAGS_VALID;
    124         /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
    125     }
    126     else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
    127     {
    128         /* Real mode - leave the limit and flags alone here, at least for now. */
    129         pSReg->u64Base              = (uint32_t)pSReg->Sel << 4;
    130         pSReg->ValidSel             = pSReg->Sel;
    131         pSReg->fFlags               = CPUMSELREG_FLAGS_VALID;
    132     }
    133     else
    134     {
    135         /* Protected mode - get it from the selector descriptor tables. */
    136         if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
    137         {
    138             Assert(!CPUMIsGuestInLongMode(pVCpu));
    139             pSReg->Sel              = 0;
    140             pSReg->u64Base          = 0;
    141             pSReg->u32Limit         = 0;
    142             pSReg->Attr.u           = 0;
    143             pSReg->ValidSel         = 0;
    144             pSReg->fFlags           = CPUMSELREG_FLAGS_VALID;
    145             /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
    146         }
    147         else
    148             SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
    149     }
    150 }
    151 
    152 
    153 /**
    154  * Makes sure the hidden CS and SS selector registers are valid, loading them if
    155  * necessary.
    156  *
    157  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    158  */
    159 VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
    160 {
    161     CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
    162     CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
    163 }
    164 
    165 
    166 /**
    167  * Loads a the hidden parts of a selector register.
    168  *
    169  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    170  * @param   pSReg       The selector register to lazily load hidden parts of.
    171  */
    172 VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
    173 {
    174     CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
    175 }
    176 
    177 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
    178 
    179 
    180 /**
    181  * Obsolete.
    182  *
    183  * We don't support nested hypervisor context interrupts or traps.  Life is much
    184  * simpler when we don't.  It's also slightly faster at times.
    185  *
    186  * @param   pVCpu       The cross context virtual CPU structure.
    187  */
    188 VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
    189 {
    190     return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
    191 }
    192 
    193 
    194 /**
    195  * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
    196  *
    197  * @param   pVCpu       The cross context virtual CPU structure.
    198  */
    199 VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
    200 {
    201     return &pVCpu->cpum.s.Hyper;
    202 }
    203 
    204 
    205 VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
    206 {
    207     pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
    208     pVCpu->cpum.s.Hyper.gdtr.pGdt  = addr;
    209 }
    210 
    211 
    212 VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
    213 {
    214     pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
    215     pVCpu->cpum.s.Hyper.idtr.pIdt  = addr;
    216 }
    21785
    21886
     
    233101
    234102
    235 VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
    236 {
    237     pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
    238 }
    239 
    240 
    241 VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
    242 {
    243     pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
    244 }
    245 
    246 
    247 VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
    248 {
    249     pVCpu->cpum.s.Hyper.es.Sel = SelES;
    250 }
    251 
    252 
    253 VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
    254 {
    255     pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
    256 }
    257 
    258 
    259 VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
    260 {
    261     pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
    262 }
    263 
    264 
    265 VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
    266 {
    267     pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
    268 }
    269 
    270 
    271103VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
    272104{
     
    275107
    276108
    277 VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
    278 {
    279     pVCpu->cpum.s.Hyper.esp = u32ESP;
    280 }
    281 
    282 
    283 VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
    284 {
    285     pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
    286     return VINF_SUCCESS;
    287 }
    288 
    289 
    290109VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
    291110{
    292111    pVCpu->cpum.s.Hyper.eip = u32EIP;
    293 }
    294 
    295 
    296 /**
    297  * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
    298  * EFLAGS and EIP prior to resuming guest execution.
    299  *
    300  * All general register not given as a parameter will be set to 0.  The EFLAGS
    301  * register will be set to sane values for C/C++ code execution with interrupts
    302  * disabled and IOPL 0.
    303  *
    304  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    305  * @param   u32EIP              The EIP value.
    306  * @param   u32ESP              The ESP value.
    307  * @param   u32EAX              The EAX value.
    308  * @param   u32EDX              The EDX value.
    309  */
    310 VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
    311 {
    312     pVCpu->cpum.s.Hyper.eip      = u32EIP;
    313     pVCpu->cpum.s.Hyper.esp      = u32ESP;
    314     pVCpu->cpum.s.Hyper.eax      = u32EAX;
    315     pVCpu->cpum.s.Hyper.edx      = u32EDX;
    316     pVCpu->cpum.s.Hyper.ecx      = 0;
    317     pVCpu->cpum.s.Hyper.ebx      = 0;
    318     pVCpu->cpum.s.Hyper.ebp      = 0;
    319     pVCpu->cpum.s.Hyper.esi      = 0;
    320     pVCpu->cpum.s.Hyper.edi      = 0;
    321     pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
    322 }
    323 
    324 
    325 VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
    326 {
    327     pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
    328 }
    329 
    330 
    331 VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
    332 {
    333     pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
    334112}
    335113
     
    408186
    409187
    410 VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
    411 {
    412     return pVCpu->cpum.s.Hyper.cs.Sel;
    413 }
    414 
    415 
    416 VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
    417 {
    418     return pVCpu->cpum.s.Hyper.ds.Sel;
    419 }
    420 
    421 
    422 VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
    423 {
    424     return pVCpu->cpum.s.Hyper.es.Sel;
    425 }
    426 
    427 
    428 VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
    429 {
    430     return pVCpu->cpum.s.Hyper.fs.Sel;
    431 }
    432 
    433 
    434 VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
    435 {
    436     return pVCpu->cpum.s.Hyper.gs.Sel;
    437 }
    438 
    439 
    440 VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
    441 {
    442     return pVCpu->cpum.s.Hyper.ss.Sel;
    443 }
    444 
    445 
    446 VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
    447 {
    448     return pVCpu->cpum.s.Hyper.eax;
    449 }
    450 
    451 
    452 VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
    453 {
    454     return pVCpu->cpum.s.Hyper.ebx;
    455 }
    456 
    457 
    458 VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
    459 {
    460     return pVCpu->cpum.s.Hyper.ecx;
    461 }
    462 
    463 
    464 VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
    465 {
    466     return pVCpu->cpum.s.Hyper.edx;
    467 }
    468 
    469 
    470 VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
    471 {
    472     return pVCpu->cpum.s.Hyper.esi;
    473 }
    474 
    475 
    476 VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
    477 {
    478     return pVCpu->cpum.s.Hyper.edi;
    479 }
    480 
    481 
    482 VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
    483 {
    484     return pVCpu->cpum.s.Hyper.ebp;
    485 }
    486 
    487 
    488 VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
    489 {
    490     return pVCpu->cpum.s.Hyper.esp;
    491 }
    492 
    493 
    494 VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
    495 {
    496     return pVCpu->cpum.s.Hyper.eflags.u32;
    497 }
    498 
    499 
    500 VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
    501 {
    502     return pVCpu->cpum.s.Hyper.eip;
    503 }
    504 
    505 
    506 VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
    507 {
    508     return pVCpu->cpum.s.Hyper.rip;
    509 }
    510 
    511 
    512 VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
    513 {
    514     if (pcbLimit)
    515         *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
    516     return pVCpu->cpum.s.Hyper.idtr.pIdt;
    517 }
    518 
    519 
    520 VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
    521 {
    522     if (pcbLimit)
    523         *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
    524     return pVCpu->cpum.s.Hyper.gdtr.pGdt;
    525 }
    526 
    527 
    528 VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
    529 {
    530     return pVCpu->cpum.s.Hyper.ldtr.Sel;
    531 }
    532 
    533 
    534188VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
    535189{
     
    658312VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
    659313{
    660 #ifdef IN_RC
    661     /*
    662      * Check if we need to change hypervisor CR0 because
    663      * of math stuff.
    664      */
    665     if (    (cr0                     & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
    666         !=  (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
    667     {
    668         if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST))
    669         {
    670             /*
    671              * We haven't loaded the guest FPU state yet, so TS and MT are both set
    672              * and EM should be reflecting the guest EM (it always does this).
    673              */
    674             if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
    675             {
    676                 uint32_t HyperCR0 = ASMGetCR0();
    677                 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
    678                 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
    679                 HyperCR0 &= ~X86_CR0_EM;
    680                 HyperCR0 |= cr0 & X86_CR0_EM;
    681                 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
    682                 ASMSetCR0(HyperCR0);
    683             }
    684 # ifdef VBOX_STRICT
    685             else
    686             {
    687                 uint32_t HyperCR0 = ASMGetCR0();
    688                 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
    689                 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
    690             }
    691 # endif
    692         }
    693         else
    694         {
    695             /*
    696              * Already loaded the guest FPU state, so we're just mirroring
    697              * the guest flags.
    698              */
    699             uint32_t HyperCR0 = ASMGetCR0();
    700             AssertMsg(     (HyperCR0                 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
    701                       ==   (pVCpu->cpum.s.Guest.cr0  & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
    702                       ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
    703             HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
    704             HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
    705             Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
    706             ASMSetCR0(HyperCR0);
    707         }
    708     }
    709 #endif /* IN_RC */
    710 
    711314    /*
    712315     * Check for changes causing TLB flushes (for REM).
     
    16321235    const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
    16331236
     1237    /** @todo r=bird: I'm totally confused by fForceHyper! */
    16341238#ifdef IN_RING0
    16351239    if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
    16361240        fForceHyper = true;
    16371241#endif
    1638     if (  (!VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7))
    1639         & X86_DR7_ENABLED_MASK)
     1242    if ((!fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
    16401243    {
    16411244        Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    1642 #ifdef IN_RC
    1643         bool const fRawModeEnabled = true;
    1644 #elif defined(IN_RING3)
    1645         bool const fRawModeEnabled = VM_IS_RAW_MODE_ENABLED(pVM);
    1646 #endif
    16471245
    16481246        /*
     
    16631261        {
    16641262            uNewDr0 = CPUMGetGuestDR0(pVCpu);
    1665 #ifndef IN_RING0
    1666             if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
    1667                 uNewDr0 = 0;
    1668             else
    1669 #endif
    1670                 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
     1263            uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
    16711264        }
    16721265        else
     
    16831276        {
    16841277            uNewDr1 = CPUMGetGuestDR1(pVCpu);
    1685 #ifndef IN_RING0
    1686             if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
    1687                 uNewDr1 = 0;
    1688             else
    1689 #endif
    1690                 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
     1278            uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
    16911279        }
    16921280        else
     
    17031291        {
    17041292            uNewDr2 = CPUMGetGuestDR2(pVCpu);
    1705 #ifndef IN_RING0
    1706             if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
    1707                 uNewDr2 = 0;
    1708             else
    1709 #endif
    1710                 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
     1293            uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
    17111294        }
    17121295        else
     
    17231306        {
    17241307            uNewDr3 = CPUMGetGuestDR3(pVCpu);
    1725 #ifndef IN_RING0
    1726             if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
    1727                 uNewDr3 = 0;
    1728             else
    1729 #endif
    1730                 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
     1308            uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
    17311309        }
    17321310        else
     
    17361314         * Apply the updates.
    17371315         */
    1738 #ifdef IN_RC
    1739         /* Make sure to save host registers first. */
    1740         if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
    1741         {
    1742             if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
    1743             {
    1744                 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
    1745                 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
    1746             }
    1747             pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
    1748             pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
    1749             pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
    1750             pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
    1751             pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
    1752 
    1753             /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
    1754             pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
    1755             ASMSetDR0(uNewDr0);
    1756             pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
    1757             ASMSetDR1(uNewDr1);
    1758             pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
    1759             ASMSetDR2(uNewDr2);
    1760             pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
    1761             ASMSetDR3(uNewDr3);
    1762             ASMSetDR6(X86_DR6_INIT_VAL);
    1763             pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
    1764             ASMSetDR7(uNewDr7);
    1765         }
    1766         else
    1767 #endif
    1768         {
    1769             pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
    1770             if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
    1771                 CPUMSetHyperDR3(pVCpu, uNewDr3);
    1772             if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
    1773                 CPUMSetHyperDR2(pVCpu, uNewDr2);
    1774             if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
    1775                 CPUMSetHyperDR1(pVCpu, uNewDr1);
    1776             if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
    1777                 CPUMSetHyperDR0(pVCpu, uNewDr0);
    1778             if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
    1779                 CPUMSetHyperDR7(pVCpu, uNewDr7);
    1780         }
     1316        pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
     1317        if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
     1318            CPUMSetHyperDR3(pVCpu, uNewDr3);
     1319        if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
     1320            CPUMSetHyperDR2(pVCpu, uNewDr2);
     1321        if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
     1322            CPUMSetHyperDR1(pVCpu, uNewDr1);
     1323        if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
     1324            CPUMSetHyperDR0(pVCpu, uNewDr0);
     1325        if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
     1326            CPUMSetHyperDR7(pVCpu, uNewDr7);
    17811327    }
    17821328#ifdef IN_RING0
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette