Changeset 60915 in vbox
- Timestamp:
- May 10, 2016 6:51:24 AM (9 years ago)
- svn:sync-xref-src-repo-rev:
- 107130
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r60914 r60915 1432 1432 1433 1433 1434 /**1435 * Update CRx.1436 *1437 * @returns VBox status code.1438 * @param pVM The cross context VM structure.1439 * @param pVCpu The cross context virtual CPU structure.1440 * @param pRegFrame The register frame.1441 * @param DestRegCrx CRx register index (DISUSE_REG_CR*)1442 * @param val New CRx value1443 *1444 */1445 static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint64_t val)1446 {1447 uint64_t oldval;1448 uint64_t msrEFER;1449 uint32_t fValid;1450 int rc, rc2;1451 NOREF(pVM);1452 1453 /** @todo Clean up this mess. */1454 LogFlow(("emInterpretCRxWrite at %RGv CR%d <- %RX64\n", (RTGCPTR)pRegFrame->rip, DestRegCrx, val));1455 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));1456 switch (DestRegCrx)1457 {1458 case DISCREG_CR0:1459 oldval = CPUMGetGuestCR0(pVCpu);1460 #ifdef IN_RC1461 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */1462 if ( (val & (X86_CR0_WP | X86_CR0_AM))1463 != (oldval & (X86_CR0_WP | X86_CR0_AM)))1464 return VERR_EM_INTERPRETER;1465 #endif1466 rc = VINF_SUCCESS;1467 #if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST)1468 CPUMSetGuestCR0(pVCpu, val);1469 #else1470 CPUMQueryGuestCtxPtr(pVCpu)->cr0 = val | X86_CR0_ET;1471 #endif1472 val = CPUMGetGuestCR0(pVCpu);1473 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))1474 != (val & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))1475 {1476 /* global flush */1477 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);1478 AssertRCReturn(rc, rc);1479 }1480 1481 /* Deal with long mode enabling/disabling. */1482 msrEFER = CPUMGetGuestEFER(pVCpu);1483 if (msrEFER & MSR_K6_EFER_LME)1484 {1485 if ( !(oldval & X86_CR0_PG)1486 && (val & X86_CR0_PG))1487 {1488 /* Illegal to have an active 64 bits CS selector (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */1489 if (pRegFrame->cs.Attr.n.u1Long)1490 {1491 AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n"));1492 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */1493 }1494 1495 /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */1496 if (!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE))1497 {1498 AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n"));1499 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */1500 }1501 msrEFER |= MSR_K6_EFER_LMA;1502 }1503 else1504 if ( (oldval & X86_CR0_PG)1505 && !(val & X86_CR0_PG))1506 {1507 msrEFER &= ~MSR_K6_EFER_LMA;1508 /** @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */1509 }1510 CPUMSetGuestEFER(pVCpu, msrEFER);1511 }1512 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));1513 return rc2 == VINF_SUCCESS ? rc : rc2;1514 1515 case DISCREG_CR2:1516 rc = CPUMSetGuestCR2(pVCpu, val); AssertRC(rc);1517 return VINF_SUCCESS;1518 1519 case DISCREG_CR3:1520 /* Reloading the current CR3 means the guest just wants to flush the TLBs */1521 rc = CPUMSetGuestCR3(pVCpu, val); AssertRC(rc);1522 if (CPUMGetGuestCR0(pVCpu) & X86_CR0_PG)1523 {1524 /* flush */1525 rc = PGMFlushTLB(pVCpu, val, !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE));1526 AssertRC(rc);1527 }1528 return rc;1529 1530 case DISCREG_CR4:1531 oldval = CPUMGetGuestCR4(pVCpu);1532 rc = CPUMSetGuestCR4(pVCpu, val); AssertRC(rc);1533 val = CPUMGetGuestCR4(pVCpu);1534 1535 /* Illegal to disable PAE when long mode is active. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */1536 msrEFER = CPUMGetGuestEFER(pVCpu);1537 if ( (msrEFER & MSR_K6_EFER_LMA)1538 && (oldval & X86_CR4_PAE)1539 && !(val & X86_CR4_PAE))1540 {1541 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */1542 }1543 1544 /* From IEM iemCImpl_load_CrX. */1545 /** @todo Check guest CPUID bits for determining corresponding valid bits. */1546 fValid = X86_CR4_VME | X86_CR4_PVI1547 | X86_CR4_TSD | X86_CR4_DE1548 | X86_CR4_PSE | X86_CR4_PAE1549 | X86_CR4_MCE | X86_CR4_PGE1550 | X86_CR4_PCE | X86_CR4_OSFXSR1551 | X86_CR4_OSXMMEEXCPT;1552 //if (xxx)1553 // fValid |= X86_CR4_VMXE;1554 //if (xxx)1555 // fValid |= X86_CR4_OSXSAVE;1556 if (val & ~(uint64_t)fValid)1557 {1558 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", val, val & ~(uint64_t)fValid));1559 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */1560 }1561 1562 rc = VINF_SUCCESS;1563 if ( (oldval & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE))1564 != (val & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE)))1565 {1566 /* global flush */1567 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);1568 AssertRCReturn(rc, rc);1569 }1570 1571 /* Feeling extremely lazy. */1572 # ifdef IN_RC1573 if ( (oldval & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME))1574 != (val & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME)))1575 {1576 Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val));1577 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);1578 }1579 # endif1580 # ifdef VBOX_WITH_RAW_MODE1581 if (((val ^ oldval) & X86_CR4_VME) && !HMIsEnabled(pVM))1582 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);1583 # endif1584 1585 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));1586 return rc2 == VINF_SUCCESS ? rc : rc2;1587 1588 case DISCREG_CR8:1589 return PDMApicSetTPR(pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */1590 1591 default:1592 AssertFailed();1593 case DISCREG_CR1: /* illegal op */1594 break;1595 }1596 return VERR_EM_INTERPRETER;1597 }1598 1599 1600 1434 #ifdef LOG_ENABLED 1601 1435 static const char *emMSRtoString(uint32_t uMsr) … … 3249 3083 return VINF_SUCCESS; 3250 3084 return CPUMSetGuestCR0(pVCpu, cr0 & ~X86_CR0_TS); 3085 } 3086 3087 3088 /** 3089 * Update CRx. 3090 * 3091 * @returns VBox status code. 3092 * @param pVM The cross context VM structure. 3093 * @param pVCpu The cross context virtual CPU structure. 3094 * @param pRegFrame The register frame. 3095 * @param DestRegCrx CRx register index (DISUSE_REG_CR*) 3096 * @param val New CRx value 3097 * 3098 */ 3099 static int emUpdateCRx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint64_t val) 3100 { 3101 uint64_t oldval; 3102 uint64_t msrEFER; 3103 uint32_t fValid; 3104 int rc, rc2; 3105 NOREF(pVM); 3106 3107 /** @todo Clean up this mess. */ 3108 LogFlow(("emInterpretCRxWrite at %RGv CR%d <- %RX64\n", (RTGCPTR)pRegFrame->rip, DestRegCrx, val)); 3109 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); 3110 switch (DestRegCrx) 3111 { 3112 case DISCREG_CR0: 3113 oldval = CPUMGetGuestCR0(pVCpu); 3114 #ifdef IN_RC 3115 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */ 3116 if ( (val & (X86_CR0_WP | X86_CR0_AM)) 3117 != (oldval & (X86_CR0_WP | X86_CR0_AM))) 3118 return VERR_EM_INTERPRETER; 3119 #endif 3120 rc = VINF_SUCCESS; 3121 #if !defined(VBOX_COMPARE_IEM_AND_EM) || !defined(VBOX_COMPARE_IEM_LAST) 3122 CPUMSetGuestCR0(pVCpu, val); 3123 #else 3124 CPUMQueryGuestCtxPtr(pVCpu)->cr0 = val | X86_CR0_ET; 3125 #endif 3126 val = CPUMGetGuestCR0(pVCpu); 3127 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) 3128 != (val & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))) 3129 { 3130 /* global flush */ 3131 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */); 3132 AssertRCReturn(rc, rc); 3133 } 3134 3135 /* Deal with long mode enabling/disabling. */ 3136 msrEFER = CPUMGetGuestEFER(pVCpu); 3137 if (msrEFER & MSR_K6_EFER_LME) 3138 { 3139 if ( !(oldval & X86_CR0_PG) 3140 && (val & X86_CR0_PG)) 3141 { 3142 /* Illegal to have an active 64 bits CS selector (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 3143 if (pRegFrame->cs.Attr.n.u1Long) 3144 { 3145 AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n")); 3146 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */ 3147 } 3148 3149 /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 3150 if (!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)) 3151 { 3152 AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n")); 3153 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */ 3154 } 3155 msrEFER |= MSR_K6_EFER_LMA; 3156 } 3157 else 3158 if ( (oldval & X86_CR0_PG) 3159 && !(val & X86_CR0_PG)) 3160 { 3161 msrEFER &= ~MSR_K6_EFER_LMA; 3162 /** @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */ 3163 } 3164 CPUMSetGuestEFER(pVCpu, msrEFER); 3165 } 3166 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu)); 3167 return rc2 == VINF_SUCCESS ? rc : rc2; 3168 3169 case DISCREG_CR2: 3170 rc = CPUMSetGuestCR2(pVCpu, val); AssertRC(rc); 3171 return VINF_SUCCESS; 3172 3173 case DISCREG_CR3: 3174 /* Reloading the current CR3 means the guest just wants to flush the TLBs */ 3175 rc = CPUMSetGuestCR3(pVCpu, val); AssertRC(rc); 3176 if (CPUMGetGuestCR0(pVCpu) & X86_CR0_PG) 3177 { 3178 /* flush */ 3179 rc = PGMFlushTLB(pVCpu, val, !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE)); 3180 AssertRC(rc); 3181 } 3182 return rc; 3183 3184 case DISCREG_CR4: 3185 oldval = CPUMGetGuestCR4(pVCpu); 3186 rc = CPUMSetGuestCR4(pVCpu, val); AssertRC(rc); 3187 val = CPUMGetGuestCR4(pVCpu); 3188 3189 /* Illegal to disable PAE when long mode is active. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 3190 msrEFER = CPUMGetGuestEFER(pVCpu); 3191 if ( (msrEFER & MSR_K6_EFER_LMA) 3192 && (oldval & X86_CR4_PAE) 3193 && !(val & X86_CR4_PAE)) 3194 { 3195 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */ 3196 } 3197 3198 /* From IEM iemCImpl_load_CrX. */ 3199 /** @todo Check guest CPUID bits for determining corresponding valid bits. */ 3200 fValid = X86_CR4_VME | X86_CR4_PVI 3201 | X86_CR4_TSD | X86_CR4_DE 3202 | X86_CR4_PSE | X86_CR4_PAE 3203 | X86_CR4_MCE | X86_CR4_PGE 3204 | X86_CR4_PCE | X86_CR4_OSFXSR 3205 | X86_CR4_OSXMMEEXCPT; 3206 //if (xxx) 3207 // fValid |= X86_CR4_VMXE; 3208 //if (xxx) 3209 // fValid |= X86_CR4_OSXSAVE; 3210 if (val & ~(uint64_t)fValid) 3211 { 3212 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", val, val & ~(uint64_t)fValid)); 3213 return VERR_EM_INTERPRETER; /** @todo generate \#GP(0) */ 3214 } 3215 3216 rc = VINF_SUCCESS; 3217 if ( (oldval & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE)) 3218 != (val & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE))) 3219 { 3220 /* global flush */ 3221 rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */); 3222 AssertRCReturn(rc, rc); 3223 } 3224 3225 /* Feeling extremely lazy. */ 3226 # ifdef IN_RC 3227 if ( (oldval & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME)) 3228 != (val & (X86_CR4_OSFXSR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME))) 3229 { 3230 Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val)); 3231 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 3232 } 3233 # endif 3234 # ifdef VBOX_WITH_RAW_MODE 3235 if (((val ^ oldval) & X86_CR4_VME) && !HMIsEnabled(pVM)) 3236 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 3237 # endif 3238 3239 rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu)); 3240 return rc2 == VINF_SUCCESS ? rc : rc2; 3241 3242 case DISCREG_CR8: 3243 return PDMApicSetTPR(pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */ 3244 3245 default: 3246 AssertFailed(); 3247 case DISCREG_CR1: /* illegal op */ 3248 break; 3249 } 3250 return VERR_EM_INTERPRETER; 3251 3251 } 3252 3252
Note:
See TracChangeset
for help on using the changeset viewer.