Changeset 47283 in vbox for trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
- Timestamp:
- Jul 19, 2013 11:20:49 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r47280 r47283 2426 2426 IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize) 2427 2427 { 2428 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 2429 //VBOXSTRICTRC rcStrict; 2430 //uint64_t uNewRsp; 2431 2432 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize); 2433 IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); 2428 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 2429 NOREF(cbInstr); 2430 2431 /* 2432 * Nested task return is not supported in long mode. 2433 */ 2434 if (pCtx->eflags.Bits.u1NT) 2435 { 2436 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u)); 2437 return iemRaiseGeneralProtectionFault0(pIemCpu); 2438 } 2439 2440 /* 2441 * Normal return. 2442 * 2443 * Do the stack bits, but don't commit RSP before everything checks 2444 * out right. 2445 */ 2446 VBOXSTRICTRC rcStrict; 2447 RTCPTRUNION uFrame; 2448 uint64_t uNewRip; 2449 uint16_t uNewCs; 2450 uint16_t uNewSs; 2451 uint32_t uNewFlags; 2452 uint64_t uNewRsp; 2453 if (enmEffOpSize == IEMMODE_64BIT) 2454 { 2455 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*8, &uFrame.pv, &uNewRsp); 2456 if (rcStrict != VINF_SUCCESS) 2457 return rcStrict; 2458 uNewRip = uFrame.pu64[0]; 2459 uNewCs = (uint16_t)uFrame.pu64[1]; 2460 uNewFlags = (uint32_t)uFrame.pu64[2]; 2461 uNewRsp = uFrame.pu64[3]; 2462 uNewSs = (uint16_t)uFrame.pu64[4]; 2463 } 2464 else if (enmEffOpSize == IEMMODE_32BIT) 2465 { 2466 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*4, &uFrame.pv, &uNewRsp); 2467 if (rcStrict != VINF_SUCCESS) 2468 return rcStrict; 2469 uNewRip = uFrame.pu32[0]; 2470 uNewCs = (uint16_t)uFrame.pu32[1]; 2471 uNewFlags = uFrame.pu32[2]; 2472 uNewRsp = uFrame.pu32[3]; 2473 uNewSs = (uint16_t)uFrame.pu32[4]; 2474 } 2475 else 2476 { 2477 Assert(enmEffOpSize == IEMMODE_16BIT); 2478 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*2, &uFrame.pv, &uNewRsp); 2479 if (rcStrict != VINF_SUCCESS) 2480 return rcStrict; 2481 uNewRip = uFrame.pu16[0]; 2482 uNewCs = uFrame.pu16[1]; 2483 uNewFlags = uFrame.pu16[2]; 2484 uNewRsp = uFrame.pu16[3]; 2485 uNewSs = uFrame.pu16[4]; 2486 } 2487 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */ 2488 if (rcStrict != VINF_SUCCESS) 2489 return rcStrict; 2490 2491 /* 2492 * Check stuff. 2493 */ 2494 /* Read the CS descriptor. */ 2495 if (!(uNewCs & X86_SEL_MASK_OFF_RPL)) 2496 { 2497 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp)); 2498 return iemRaiseGeneralProtectionFault0(pIemCpu); 2499 } 2500 2501 IEMSELDESC DescCS; 2502 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs); 2503 if (rcStrict != VINF_SUCCESS) 2504 { 2505 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n", 2506 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict))); 2507 return rcStrict; 2508 } 2509 2510 /* Must be a code descriptor. */ 2511 if ( !DescCS.Legacy.Gen.u1DescType 2512 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 2513 { 2514 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n", 2515 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type)); 2516 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 2517 } 2518 2519 /* Privilege checks. */ 2520 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL; 2521 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl) 2522 { 2523 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp, pIemCpu->uCpl)); 2524 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 2525 } 2526 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 2527 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl) 2528 { 2529 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < DPL (%d) -> #GP\n", 2530 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u2Dpl)); 2531 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 2532 } 2533 2534 /* Present? */ 2535 if (!DescCS.Legacy.Gen.u1Present) 2536 { 2537 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp)); 2538 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs); 2539 } 2540 2541 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy); 2542 2543 /* Read the SS descriptor. */ 2544 IEMSELDESC DescSS; 2545 if (!(uNewSs & X86_SEL_MASK_OFF_RPL)) 2546 { 2547 if ( !DescCS.Legacy.Gen.u1Long 2548 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */ 2549 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */ 2550 { 2551 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp)); 2552 return iemRaiseGeneralProtectionFault0(pIemCpu); 2553 } 2554 DescSS.Legacy.u = 0; 2555 } 2556 else 2557 { 2558 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSs); 2559 if (rcStrict != VINF_SUCCESS) 2560 { 2561 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n", 2562 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict))); 2563 return rcStrict; 2564 } 2565 } 2566 2567 /* Privilege checks. */ 2568 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL)) 2569 { 2570 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp)); 2571 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs); 2572 } 2573 2574 uint32_t cbLimitSs; 2575 if (!(uNewSs & X86_SEL_MASK_OFF_RPL)) 2576 cbLimitSs = UINT32_MAX; 2577 else 2578 { 2579 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL)) 2580 { 2581 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n", 2582 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl)); 2583 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs); 2584 } 2585 2586 /* Must be a writeable data segment descriptor. */ 2587 if (!DescSS.Legacy.Gen.u1DescType) 2588 { 2589 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n", 2590 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type)); 2591 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs); 2592 } 2593 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE) 2594 { 2595 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n", 2596 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type)); 2597 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs); 2598 } 2599 2600 /* Present? */ 2601 if (!DescSS.Legacy.Gen.u1Present) 2602 { 2603 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp)); 2604 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSs); 2605 } 2606 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy); 2607 } 2608 2609 /* Check EIP. */ 2610 if (DescCS.Legacy.Gen.u1Long) 2611 { 2612 if (!IEM_IS_CANONICAL(uNewRip)) 2613 { 2614 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n", 2615 uNewCs, uNewRip, uNewSs, uNewRsp)); 2616 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs); 2617 } 2618 } 2619 else 2620 { 2621 if (uNewRip > cbLimitCS) 2622 { 2623 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n", 2624 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS)); 2625 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs); 2626 } 2627 } 2628 2629 /* 2630 * Commit the changes, marking CS and SS accessed first since 2631 * that may fail. 2632 */ 2633 /** @todo where exactly are these actually marked accessed by a real CPU? */ 2634 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2635 { 2636 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs); 2637 if (rcStrict != VINF_SUCCESS) 2638 return rcStrict; 2639 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2640 } 2641 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2642 { 2643 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSs); 2644 if (rcStrict != VINF_SUCCESS) 2645 return rcStrict; 2646 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2647 } 2648 2649 pCtx->rip = uNewRip; 2650 pCtx->cs.Sel = uNewCs; 2651 pCtx->cs.ValidSel = uNewCs; 2652 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2653 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2654 pCtx->cs.u32Limit = cbLimitCS; 2655 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2656 pCtx->rsp = uNewRsp; 2657 pCtx->ss.Sel = uNewSs; 2658 pCtx->ss.ValidSel = uNewSs; 2659 if (!(uNewSs & X86_SEL_MASK_OFF_RPL)) 2660 { 2661 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 2662 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT); 2663 pCtx->ss.u32Limit = UINT32_MAX; 2664 pCtx->ss.u64Base = 0; 2665 } 2666 else 2667 { 2668 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 2669 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 2670 pCtx->ss.u32Limit = cbLimitSs; 2671 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 2672 } 2673 2674 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 2675 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; 2676 if (enmEffOpSize != IEMMODE_16BIT) 2677 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 2678 if (pIemCpu->uCpl == 0) 2679 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */ 2680 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) 2681 fEFlagsMask |= X86_EFL_IF; 2682 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx); 2683 fEFlagsNew &= ~fEFlagsMask; 2684 fEFlagsNew |= uNewFlags & fEFlagsMask; 2685 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew); 2686 2687 if (pIemCpu->uCpl != uNewCpl) 2688 { 2689 pIemCpu->uCpl = uNewCpl; 2690 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->ds); 2691 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->es); 2692 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->fs); 2693 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->gs); 2694 } 2695 2696 return VINF_SUCCESS; 2434 2697 } 2435 2698
Note:
See TracChangeset
for help on using the changeset viewer.