Changeset 47240 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jul 18, 2013 11:53:08 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r47205 r47240 1870 1870 /** @} */ 1871 1871 1872 1872 1873 /** 1873 1874 * Loads the specified stack far pointer from the TSS. … … 1942 1943 } 1943 1944 return rcStrict; 1945 } 1946 1947 1948 /** 1949 * Loads the specified stack pointer from the 64-bit TSS. 1950 * 1951 * @returns VBox strict status code. 1952 * @param pIemCpu The IEM per CPU instance data. 1953 * @param pCtx The CPU context. 1954 * @param uCpl The CPL to load the stack for. 1955 * @param uIst The interrupt stack table index, 0 if to use uCpl. 1956 * @param puRsp Where to return the new stack pointer. 1957 */ 1958 static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, 1959 uint64_t *puRsp) 1960 { 1961 Assert(uCpl < 4); 1962 Assert(uIst < 8); 1963 *puRsp = 0; /* make gcc happy */ 1964 1965 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2); 1966 1967 uint32_t off; 1968 if (uIst) 1969 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1); 1970 else 1971 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0); 1972 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit) 1973 { 1974 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit)); 1975 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu); 1976 } 1977 1978 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off); 1944 1979 } 1945 1980 … … 2500 2535 } 2501 2536 2502 /* Don't allow lowering the privilege level. */ 2503 /** @todo Does the lowering of privileges apply to software interrupts 2504 * only? This has bearings on the more-privileged or 2505 * same-privilege stack behavior further down. A testcase would 2506 * be nice. */ 2537 /* Don't allow lowering the privilege level. For non-conforming CS 2538 selectors, the CS.DPL sets the privilege level the trap/interrupt 2539 handler runs at. For conforming CS selectors, the CPL remains 2540 unchanged, but the CS.DPL must be <= CPL. */ 2541 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched 2542 * when CPU in Ring-0. Result \#GP? */ 2507 2543 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl) 2508 2544 { … … 2511 2547 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL); 2512 2548 } 2549 2513 2550 2514 2551 /* Make sure the selector is present. */ … … 2529 2566 } 2530 2567 2531 #if 02532 2568 /* 2533 * If the privilege level changes , we need to get a new stack from the TSS.2534 * This in turns means validating the new SS and ESP...2569 * If the privilege level changes or if the IST isn't zero, we need to get 2570 * a new stack from the TSS. 2535 2571 */ 2572 uint64_t uNewRsp; 2536 2573 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx); 2537 2574 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF 2538 2575 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl; 2539 if (uNewCpl != pIemCpu->uCpl) 2540 { 2541 RTSEL NewSS; 2542 uint32_t uNewEsp; 2543 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp); 2576 if ( uNewCpl != pIemCpu->uCpl 2577 || Idte.Gate.u3IST != 0) 2578 { 2579 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp); 2544 2580 if (rcStrict != VINF_SUCCESS) 2545 2581 return rcStrict; 2546 2547 IEMSELDESC DescSS; 2548 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS); 2582 /** @todo testcase: is this aligned? */ 2583 } 2584 else 2585 uNewRsp = pCtx->rsp & 0xf; 2586 2587 /* 2588 * Start making changes. 2589 */ 2590 2591 /* Create the stack frame. */ 2592 uint32_t cbStackFrame = sizeof(uint64_t) * (6 + !!(fFlags & IEM_XCPT_FLAGS_ERR)); 2593 RTPTRUNION uStackFrame; 2594 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX, 2595 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */ 2596 if (rcStrict != VINF_SUCCESS) 2597 return rcStrict; 2598 void * const pvStackFrame = uStackFrame.pv; 2599 2600 if (fFlags & IEM_XCPT_FLAGS_ERR) 2601 *uStackFrame.pu64++ = uErr; 2602 uStackFrame.pu64[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT 2603 ? pCtx->rip + cbInstr : pCtx->rip; 2604 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; 2605 uStackFrame.pu64[2] = fEfl; 2606 uStackFrame.pu64[3] = pCtx->rsp; 2607 uStackFrame.pu64[4] = pCtx->ss.Sel; 2608 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); 2609 if (rcStrict != VINF_SUCCESS) 2610 return rcStrict; 2611 2612 /* Mark the CS selectors 'accessed' (hope this is the correct time). */ 2613 /** @todo testcase: excatly _when_ are the accessed bits set - before or 2614 * after pushing the stack frame? (Write protect the gdt + stack to 2615 * find out.) */ 2616 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2617 { 2618 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS); 2549 2619 if (rcStrict != VINF_SUCCESS) 2550 2620 return rcStrict; 2551 2552 /* Check that there is sufficient space for the stack frame. */ 2553 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy); 2554 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN) 2555 { 2556 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */ 2557 } 2558 2559 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20; 2560 if ( uNewEsp - 1 > cbLimitSS 2561 || uNewEsp < cbStackFrame) 2562 { 2563 Log(("iemRaiseXcptOrIntInLongMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n", 2564 u8Vector, NewSS, uNewEsp, cbStackFrame)); 2565 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS); 2566 } 2567 2568 /* 2569 * Start making changes. 2570 */ 2571 2572 /* Create the stack frame. */ 2573 RTPTRUNION uStackFrame; 2574 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX, 2575 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */ 2576 if (rcStrict != VINF_SUCCESS) 2577 return rcStrict; 2578 void * const pvStackFrame = uStackFrame.pv; 2579 2580 if (fFlags & IEM_XCPT_FLAGS_ERR) 2581 *uStackFrame.pu32++ = uErr; 2582 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT 2583 ? pCtx->eip + cbInstr : pCtx->eip; 2584 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; 2585 uStackFrame.pu32[2] = fEfl; 2586 uStackFrame.pu32[3] = pCtx->esp; 2587 uStackFrame.pu32[4] = pCtx->ss.Sel; 2588 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); 2589 if (rcStrict != VINF_SUCCESS) 2590 return rcStrict; 2591 2592 /* Mark the selectors 'accessed' (hope this is the correct time). */ 2593 /** @todo testcase: excatly _when_ are the accessed bits set - before or 2594 * after pushing the stack frame? (Write protect the gdt + stack to 2595 * find out.) */ 2596 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2597 { 2598 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS); 2599 if (rcStrict != VINF_SUCCESS) 2600 return rcStrict; 2601 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2602 } 2603 2604 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2605 { 2606 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS); 2607 if (rcStrict != VINF_SUCCESS) 2608 return rcStrict; 2609 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2610 } 2611 2612 /* 2613 * Start comitting the register changes (joins with the DPL=CPL branch). 2614 */ 2615 pCtx->ss.Sel = NewSS; 2616 pCtx->ss.ValidSel = NewSS; 2617 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 2618 pCtx->ss.u32Limit = cbLimitSS; 2619 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 2620 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 2621 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */ 2622 pIemCpu->uCpl = uNewCpl; 2623 } 2621 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2622 } 2623 2624 2624 /* 2625 * S ame privilege, no stack change and smaller stack frame.2625 * Start comitting the register changes. 2626 2626 */ 2627 else 2628 { 2629 uint64_t uNewRsp; 2630 RTPTRUNION uStackFrame; 2631 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12; 2632 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp); 2633 if (rcStrict != VINF_SUCCESS) 2634 return rcStrict; 2635 void * const pvStackFrame = uStackFrame.pv; 2636 2637 if (fFlags & IEM_XCPT_FLAGS_ERR) 2638 *uStackFrame.pu32++ = uErr; 2639 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT 2640 ? pCtx->eip + cbInstr : pCtx->eip; 2641 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; 2642 uStackFrame.pu32[2] = fEfl; 2643 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */ 2644 if (rcStrict != VINF_SUCCESS) 2645 return rcStrict; 2646 2647 /* Mark the CS selector as 'accessed'. */ 2648 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2649 { 2650 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS); 2651 if (rcStrict != VINF_SUCCESS) 2652 return rcStrict; 2653 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2654 } 2655 2656 /* 2657 * Start committing the register changes (joins with the other branch). 2658 */ 2659 pCtx->rsp = uNewRsp; 2660 } 2661 2662 /* ... register committing continues. */ 2663 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl; 2664 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl; 2665 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2666 pCtx->cs.u32Limit = cbLimitCS; 2667 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2668 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2669 2670 pCtx->rip = uNewEip; 2627 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the 2628 * hidden registers when interrupting 32-bit or 16-bit code! */ 2629 pCtx->ss.Sel = 0 | uNewCpl; 2630 pCtx->ss.ValidSel = 0 | uNewCpl; 2631 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 2632 pCtx->ss.u32Limit = UINT32_MAX; 2633 pCtx->ss.u64Base = 0; 2634 pCtx->ss.Attr.u = 0; 2635 pCtx->ss.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC; 2636 pCtx->ss.Attr.n.u1DescType = 1; 2637 pCtx->ss.Attr.n.u2Dpl = uNewCpl; 2638 pCtx->ss.Attr.n.u1Present = uNewCpl; 2639 pCtx->ss.Attr.n.u4LimitHigh = 0; 2640 pCtx->ss.Attr.n.u1Available = 0; 2641 pCtx->ss.Attr.n.u1Long = 0; 2642 pCtx->ss.Attr.n.u1DefBig = 0; 2643 pCtx->ss.Attr.n.u1Granularity = 0; 2644 pCtx->ss.Attr.n.u1Unusable = 1; 2645 pCtx->rsp = uNewRsp - cbStackFrame; 2646 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl; 2647 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl; 2648 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2649 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy); 2650 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2651 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2652 pCtx->rip = uNewRip; 2653 pIemCpu->uCpl = uNewCpl; 2654 2671 2655 fEfl &= ~fEflToClear; 2672 2656 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl); … … 2679 2663 2680 2664 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS; 2681 #else2682 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);2683 /** @todo implement me. */2684 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("long mode exception / interrupt dispatching\n"));2685 #endif2686 2665 } 2687 2666 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r47138 r47240 15582 15582 15583 15583 case IEMMODE_64BIT: 15584 /** @todo AMD does not believe in the case (see bs-cpu-xcpt-1) and will 15585 * apparently ignore REX.W, at least for the jmp far qword [rsp] and 15586 * call far qword [rsp] encodings. */ 15584 15587 IEM_MC_BEGIN(3, 1); 15585 15588 IEM_MC_ARG(uint16_t, u16Sel, 0); … … 15700 15703 FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm) 15701 15704 { 15702 IEMOP_MNEMONIC("jmp Ep");15703 IEMOP_HLP_NO_64BIT(); 15705 IEMOP_MNEMONIC("jmpf Ep"); 15706 IEMOP_HLP_NO_64BIT(); /** @todo this isn't quite right I'm afraid... */ 15704 15707 return FNIEMOP_CALL_2(iemOpHlp_Grp5_far_Ep, bRm, iemCImpl_FarJmp); 15705 15708 }
Note:
See TracChangeset
for help on using the changeset viewer.