Changeset 21666 in vbox
- Timestamp:
- Jul 17, 2009 9:33:05 AM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCM.cpp
r21656 r21666 1494 1494 1495 1495 /** 1496 * Callback to patch a TPR instruction 1496 * Callback to patch a TPR instruction (vmmcall or mov cr8) 1497 1497 * 1498 1498 * @returns VBox status code. … … 1502 1502 * 1503 1503 */ 1504 DECLCALLBACK(int) hwaccmR3 PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)1504 DECLCALLBACK(int) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser) 1505 1505 { 1506 1506 PCPUMCTX pCtx = (PCPUMCTX)pvUser; … … 1623 1623 1624 1624 /** 1625 * Callback to patch a TPR instruction (vmmcall or mov cr8) 1626 * 1627 * @returns VBox status code. 1628 * @param pVM The VM handle. 1629 * @param pVCpu The VMCPU for the EMT we're being called on. 1630 * @param pvUser User specified CPU context 1631 * 1632 */ 1633 DECLCALLBACK(int) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser) 1634 { 1635 PCPUMCTX pCtx = (PCPUMCTX)pvUser; 1636 RTGCPTR oldrip = pCtx->rip; 1637 PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState; 1638 unsigned cbOp; 1639 1640 Log(("Patch TPR access at %RGv\n", pCtx->rip)); 1641 1642 int rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp); 1643 AssertRC(rc); 1644 if ( rc == VINF_SUCCESS 1645 && pDis->pCurInstr->opcode == OP_MOV) 1646 { 1647 if (pVM->hwaccm.s.svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches)) 1648 { 1649 uint32_t idx = pVM->hwaccm.s.svm.cPatches; 1650 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[idx]; 1651 1652 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp); 1653 AssertRC(rc); 1654 1655 pPatch->cbOp = cbOp; 1656 1657 if (pDis->param1.flags == USE_DISPLACEMENT32) 1658 { 1659 /* 1660 * TPR write: 1661 * 1662 * push ecx 1663 * push edx 1664 * push eax 1665 * xor edx, edx 1666 * mov eax, src_reg 1667 * mov ecx, MSR_K8_LSTAR (0xc0000082) 1668 * rdmsr 1669 * pop eax 1670 * pop edx 1671 * pop ecx 1672 * 1673 */ 1674 if (pDis->param2.flags == USE_REG_GEN32) 1675 { 1676 pPatch->enmType = HWACCMTPRINSTR_WRITE_REG; 1677 pPatch->uSrcOperand = pDis->param2.base.reg_gen; 1678 } 1679 else 1680 { 1681 Assert(pDis->param2.flags == USE_IMMEDIATE32); 1682 pPatch->enmType = HWACCMTPRINSTR_WRITE_IMM; 1683 pPatch->uSrcOperand = pDis->param2.parval; 1684 } 1685 // rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall)); 1686 // AssertRC(rc); 1687 1688 // memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall)); 1689 // pPatch->cbNewOp = sizeof(aVMMCall); 1690 } 1691 else 1692 { 1693 /* 1694 * TPR read: 1695 * 1696 * push ecx 1697 * push edx 1698 * push eax 1699 * mov ecx, MSR_K8_LSTAR (0xc0000082) 1700 * rdmsr 1701 * mov target_reg, eax 1702 * pop eax 1703 * pop edx 1704 * pop ecx 1705 * 1706 */ 1707 Assert(pDis->param1.flags == USE_REG_GEN32); 1708 1709 pPatch->enmType = HWACCMTPRINSTR_READ; 1710 pPatch->uDstOperand = pDis->param1.base.reg_gen; 1711 1712 // rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall)); 1713 // AssertRC(rc); 1714 1715 // memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall)); 1716 // pPatch->cbNewOp = sizeof(aVMMCall); 1717 } 1718 1719 pPatch->Core.Key = pCtx->eip; 1720 rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core); 1721 AssertRC(rc); 1722 1723 pVM->hwaccm.s.svm.cPatches++; 1724 return VINF_SUCCESS; 1725 } 1726 return hwaccmR0EmulateTprMov(pVCpu, pDis, pCtx, cbOp); 1727 } 1728 return VERR_ACCESS_DENIED; 1729 } 1730 /** 1625 1731 * Attempt to patch TPR mmio instructions 1626 1732 * … … 1632 1738 VMMR3DECL(int) HWACCMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1633 1739 { 1634 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, hwaccmR3PatchTprInstr, pCtx);1740 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, (pVM->hwaccm.s.pGuestPatchMem) ? hwaccmR3PatchTprInstr : hwaccmR3ReplaceTprInstr, pCtx); 1635 1741 AssertRC(rc); 1636 1742 return rc; -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r21653 r21666 55 55 static int svmR0InterpretInvpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID); 56 56 static int svmR0EmulateTprVMMCall(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 57 static void svmR0SetMSRPermission(PVM pVM, unsigned ulMSR, bool fRead, bool fWrite); 57 58 58 59 /******************************************************************************* … … 362 363 pVMCB->guest.u64GPAT = 0x0007040600070406ULL; 363 364 } 365 366 /* The following MSRs are saved automatically by vmload/vmsave, so we allow the guest 367 * to modify them directly. 368 */ 369 svmR0SetMSRPermission(pVM, MSR_K8_LSTAR, true, true); 370 svmR0SetMSRPermission(pVM, MSR_K8_CSTAR, true, true); 371 svmR0SetMSRPermission(pVM, MSR_K6_STAR, true, true); 372 svmR0SetMSRPermission(pVM, MSR_K8_SF_MASK, true, true); 373 svmR0SetMSRPermission(pVM, MSR_K8_FS_BASE, true, true); 374 svmR0SetMSRPermission(pVM, MSR_K8_GS_BASE, true, true); 375 svmR0SetMSRPermission(pVM, MSR_K8_KERNEL_GS_BASE, true, true); 376 svmR0SetMSRPermission(pVM, MSR_IA32_SYSENTER_CS, true, true); 377 svmR0SetMSRPermission(pVM, MSR_IA32_SYSENTER_ESP, true, true); 378 svmR0SetMSRPermission(pVM, MSR_IA32_SYSENTER_EIP, true, true); 364 379 return rc; 365 380 } 366 381 382 383 /** 384 * Sets the permission bits for the specified MSR 385 * 386 * @param pVM The VM to operate on. 387 * @param ulMSR MSR value 388 * @param fRead Reading allowed/disallowed 389 * @param fWrite Writing allowed/disallowed 390 */ 391 static void svmR0SetMSRPermission(PVM pVM, unsigned ulMSR, bool fRead, bool fWrite) 392 { 393 unsigned ulBit; 394 uint8_t *pMSRBitmap = (uint8_t *)pVM->hwaccm.s.svm.pMSRBitmap; 395 396 if (ulMSR <= 0x00001FFF) 397 { 398 /* Pentium®-compatible MSRs */ 399 ulBit = ulMSR * 2; 400 } 401 else 402 if ( ulMSR >= 0xC0000000 403 && ulMSR <= 0xC0001FFF) 404 { 405 /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */ 406 ulBit = (ulMSR - 0xC0000000) * 2; 407 pMSRBitmap += 0x800; 408 } 409 else 410 if ( ulMSR >= 0xC0010000 411 && ulMSR <= 0xC0011FFF) 412 { 413 /* AMD Seventh and Eighth Generation Processor MSRs */ 414 ulBit = (ulMSR - 0xC0001000) * 2; 415 pMSRBitmap += 0x1000; 416 } 417 else 418 { 419 AssertFailed(); 420 return; 421 } 422 Assert(ulBit < 16 * 1024 - 1); 423 if (fRead) 424 ASMBitClear(pMSRBitmap, ulBit); 425 else 426 ASMBitSet(pMSRBitmap, ulBit); 427 428 if (fWrite) 429 ASMBitClear(pMSRBitmap, ulBit + 1); 430 else 431 ASMBitSet(pMSRBitmap, ulBit + 1); 432 } 367 433 368 434 /** … … 1314 1380 pCtx->rax = pVMCB->guest.u64RAX; 1315 1381 1382 /* Save all the MSRs that can be changed by the guest without causing a world switch. (fs & gs base are saved with SVM_READ_SELREG) */ 1383 pCtx->msrSTAR = pVMCB->guest.u64STAR; /* legacy syscall eip, cs & ss */ 1384 pCtx->msrLSTAR = pVMCB->guest.u64LSTAR; /* 64 bits mode syscall rip */ 1385 pCtx->msrCSTAR = pVMCB->guest.u64CSTAR; /* compatibility mode syscall rip */ 1386 pCtx->msrSFMASK = pVMCB->guest.u64SFMASK; /* syscall flag mask */ 1316 1387 pCtx->msrKERNELGSBASE = pVMCB->guest.u64KernelGSBase; /* swapgs exchange value */ 1388 pCtx->SysEnter.cs = pVMCB->guest.u64SysEnterCS; 1389 pCtx->SysEnter.eip = pVMCB->guest.u64SysEnterEIP; 1390 pCtx->SysEnter.esp = pVMCB->guest.u64SysEnterESP; 1317 1391 1318 1392 /* Can be updated behind our back in the nested paging case. */ … … 1326 1400 SVM_READ_SELREG(FS, fs); 1327 1401 SVM_READ_SELREG(GS, gs); 1328 1329 /*1330 * System MSRs1331 */1332 pCtx->SysEnter.cs = pVMCB->guest.u64SysEnterCS;1333 pCtx->SysEnter.eip = pVMCB->guest.u64SysEnterEIP;1334 pCtx->SysEnter.esp = pVMCB->guest.u64SysEnterESP;1335 1402 1336 1403 /* Remaining guest CPU context: TR, IDTR, GDTR, LDTR; must sync everything otherwise we can get out of sync when jumping to ring 3. */
Note:
See TracChangeset
for help on using the changeset viewer.