Changeset 51182 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 5, 2014 12:08:40 PM (11 years ago)
- svn:sync-xref-src-repo-rev:
- 93555
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r50923 r51182 76 76 //#define IEM_VERIFICATION_MODE_MINIMAL 77 77 //#define IEM_LOG_MEMORY_WRITES 78 //#define IEM_IMPLEMENTS_TASKSWITCH 78 79 79 80 /******************************************************************************* … … 271 272 272 273 /** 274 * Check if we're currently executing in virtual 8086 mode. 275 * 276 * @returns @c true if it is, @c false if not. 277 * @param a_pIemCpu The IEM state of the current CPU. 278 */ 279 #define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx))) 280 281 /** 273 282 * Check if we're currently executing in long mode. 274 283 * … … 697 706 * Internal Functions * 698 707 *******************************************************************************/ 708 static VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr); 699 709 static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu); 700 710 static VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu); … … 704 714 static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr); 705 715 static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel); 716 static VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr); 706 717 static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr); 707 718 static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu); … … 720 731 static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 721 732 static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 733 static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode); 722 734 static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt); 723 735 static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp); 724 736 static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp); 737 static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value); 738 static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value); 725 739 static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel); 726 740 static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg); … … 2253 2267 * 2254 2268 * @param pIemCpu The IEM per CPU instance data. 2255 * @param pSReg 2269 * @param pSReg Pointer to the segment register. 2256 2270 */ 2257 2271 static void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg) … … 2272 2286 pSReg->u32Limit = 0; 2273 2287 } 2288 } 2289 2290 2291 /** 2292 * Loads a segment selector during a task switch in V8086 mode. 2293 * 2294 * @param pIemCpu The IEM per CPU instance data. 2295 * @param pSReg Pointer to the segment register. 2296 * @param uSel The selector value to load. 2297 */ 2298 static void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel) 2299 { 2300 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */ 2301 pSReg->Sel = uSel; 2302 pSReg->ValidSel = uSel; 2303 pSReg->fFlags = CPUMSELREG_FLAGS_VALID; 2304 pSReg->u64Base = uSel << 4; 2305 pSReg->u32Limit = 0xffff; 2306 pSReg->Attr.u = 0xf3; 2307 } 2308 2309 2310 /** 2311 * Loads a NULL data selector into a selector register, both the hidden and 2312 * visible parts, in protected mode. 2313 * 2314 * @param pIemCpu The IEM state of the calling EMT. 2315 * @param pSReg Pointer to the segment register. 2316 * @param uRpl The RPL. 2317 */ 2318 static void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl) 2319 { 2320 /** @todo Testcase: write a testcase checking what happends when loading a NULL 2321 * data selector in protected mode. */ 2322 pSReg->Sel = uRpl; 2323 pSReg->ValidSel = uRpl; 2324 pSReg->fFlags = CPUMSELREG_FLAGS_VALID; 2325 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) 2326 { 2327 /* VT-x (Intel 3960x) observed doing something like this. */ 2328 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT); 2329 pSReg->u32Limit = UINT32_MAX; 2330 pSReg->u64Base = 0; 2331 } 2332 else 2333 { 2334 pSReg->Attr.u = X86DESCATTR_UNUSABLE; 2335 pSReg->u32Limit = 0; 2336 pSReg->u64Base = 0; 2337 } 2338 } 2339 2340 2341 /** 2342 * Loads a segment selector during a task switch in protected mode. In this task 2343 * switch scenario, we would throw #TS exceptions rather than #GPs. 2344 * 2345 * @returns VBox strict status code. 2346 * @param pIemCpu The IEM per CPU instance data. 2347 * @param pSReg Pointer to the segment register. 2348 * @param uSel The new selector value. 2349 * 2350 * @remarks This does -NOT- handle CS or SS. 2351 * @remarks This expects pIemCpu->uCpl to be up to date. 2352 */ 2353 static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel) 2354 { 2355 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT); 2356 2357 /* Null data selector. */ 2358 if (!(uSel & X86_SEL_MASK_OFF_RPL)) 2359 { 2360 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel); 2361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg)); 2362 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS); 2363 return VINF_SUCCESS; 2364 } 2365 2366 /* Fetch the descriptor. */ 2367 IEMSELDESC Desc; 2368 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS); 2369 if (rcStrict != VINF_SUCCESS) 2370 { 2371 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel, 2372 VBOXSTRICTRC_VAL(rcStrict))); 2373 return rcStrict; 2374 } 2375 2376 /* Must be a data segment or readable code segment. */ 2377 if ( !Desc.Legacy.Gen.u1DescType 2378 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE) 2379 { 2380 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel, 2381 Desc.Legacy.Gen.u4Type)); 2382 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL); 2383 } 2384 2385 /* Check privileges for data segments and non-conforming code segments. */ 2386 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 2387 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 2388 { 2389 /* The RPL and the new CPL must be less than or equal to the DPL. */ 2390 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl 2391 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)) 2392 { 2393 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n", 2394 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 2395 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL); 2396 } 2397 } 2398 2399 /* Is it there? */ 2400 if (!Desc.Legacy.Gen.u1Present) 2401 { 2402 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel)); 2403 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL); 2404 } 2405 2406 /* The base and limit. */ 2407 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy); 2408 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy); 2409 2410 /* 2411 * Ok, everything checked out fine. Now set the accessed bit before 2412 * committing the result into the registers. 2413 */ 2414 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2415 { 2416 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel); 2417 if (rcStrict != VINF_SUCCESS) 2418 return rcStrict; 2419 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2420 } 2421 2422 /* Commit */ 2423 pSReg->Sel = uSel; 2424 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 2425 pSReg->u32Limit = cbLimit; 2426 pSReg->u64Base = u64Base; 2427 pSReg->ValidSel = uSel; 2428 pSReg->fFlags = CPUMSELREG_FLAGS_VALID; 2429 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) 2430 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE; 2431 2432 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg)); 2433 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS); 2434 return VINF_SUCCESS; 2435 } 2436 2437 2438 /** 2439 * Performs a task switch. 2440 * 2441 * If the task switch is the result of a JMP, CALL or IRET instruction, the 2442 * caller is responsible for performing the necessary checks (like DPL, TSS 2443 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction 2444 * reference for JMP, CALL, IRET. 2445 * 2446 * If the task switch is the due to a software interrupt or hardware exception, 2447 * the caller is responsible for validating the TSS selector and descriptor. See 2448 * Intel Instruction reference for INT n. 2449 * 2450 * @returns VBox strict status code. 2451 * @param pIemCpu The IEM per CPU instance data. 2452 * @param pCtx The CPU context. 2453 * @param enmTaskSwitch What caused this task switch. 2454 * @param uNextEip The EIP effective after the task switch. 2455 * @param fFlags The flags. 2456 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set. 2457 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set. 2458 * @param SelTSS The TSS selector of the new task. 2459 * @param pNewDescTSS Pointer to the new TSS descriptor. 2460 */ 2461 static VBOXSTRICTRC iemTaskSwitch(PIEMCPU pIemCpu, 2462 PCPUMCTX pCtx, 2463 IEMTASKSWITCH enmTaskSwitch, 2464 uint32_t uNextEip, 2465 uint32_t fFlags, 2466 uint16_t uErr, 2467 uint64_t uCr2, 2468 RTSEL SelTSS, 2469 PIEMSELDESC pNewDescTSS) 2470 { 2471 Assert(!IEM_IS_REAL_MODE(pIemCpu)); 2472 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT); 2473 2474 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type; 2475 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL 2476 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY 2477 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL 2478 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY); 2479 2480 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL 2481 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY); 2482 2483 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS, 2484 fIsNewTSS386, pCtx->eip, uNextEip)); 2485 2486 /* Update CR2 in case it's a page-fault. */ 2487 /** @todo This should probably be done much earlier in IEM/PGM. See 2488 * @bugref{5653} comment#49. */ 2489 if (fFlags & IEM_XCPT_FLAGS_CR2) 2490 pCtx->cr2 = uCr2; 2491 2492 /* 2493 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference" 2494 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)". 2495 */ 2496 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16); 2497 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN; 2498 if (uNewTSSLimit < uNewTSSLimitMin) 2499 { 2500 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n", 2501 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin)); 2502 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL); 2503 } 2504 2505 /* 2506 * Check the current TSS limit. The last written byte to the current TSS during the 2507 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit). 2508 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields. 2509 * 2510 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can 2511 * end up with smaller than "legal" TSS limits. 2512 */ 2513 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit; 2514 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29; 2515 if (uCurTSSLimit < uCurTSSLimitMin) 2516 { 2517 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n", 2518 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin)); 2519 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL); 2520 } 2521 2522 /* 2523 * Verify that the new TSS can be accessed and map it. Map only the required contents 2524 * and not the entire TSS. 2525 */ 2526 void *pvNewTSS; 2527 uint32_t cbNewTSS = uNewTSSLimitMin + 1; 2528 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy); 2529 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1); 2530 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may 2531 * not perform correct translation if this happens. See Intel spec. 7.2.1 2532 * "Task-State Segment" */ 2533 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW); 2534 if (rcStrict != VINF_SUCCESS) 2535 { 2536 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch, 2537 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict))); 2538 return rcStrict; 2539 } 2540 2541 /* 2542 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET. 2543 */ 2544 uint32_t u32EFlags = pCtx->eflags.u32; 2545 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP 2546 || enmTaskSwitch == IEMTASKSWITCH_IRET) 2547 { 2548 PX86DESC pDescCurTSS; 2549 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX, 2550 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW); 2551 if (rcStrict != VINF_SUCCESS) 2552 { 2553 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n", 2554 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict))); 2555 return rcStrict; 2556 } 2557 2558 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK; 2559 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW); 2560 if (rcStrict != VINF_SUCCESS) 2561 { 2562 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n", 2563 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict))); 2564 return rcStrict; 2565 } 2566 2567 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */ 2568 if (enmTaskSwitch == IEMTASKSWITCH_IRET) 2569 { 2570 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY 2571 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY); 2572 u32EFlags &= ~X86_EFL_NT; 2573 } 2574 } 2575 2576 /* 2577 * Save the CPU state into the current TSS. 2578 */ 2579 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base; 2580 if (GCPtrNewTSS == GCPtrCurTSS) 2581 { 2582 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS)); 2583 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n", 2584 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel)); 2585 } 2586 if (fIsNewTSS386) 2587 { 2588 /* 2589 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size. 2590 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields. 2591 */ 2592 void *pvCurTSS32; 2593 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip); 2594 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip); 2595 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64); 2596 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW); 2597 if (rcStrict != VINF_SUCCESS) 2598 { 2599 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n", 2600 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict))); 2601 return rcStrict; 2602 } 2603 2604 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */ 2605 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS); 2606 pCurTSS32->eip = uNextEip; 2607 pCurTSS32->eflags = u32EFlags; 2608 pCurTSS32->eax = pCtx->eax; 2609 pCurTSS32->ecx = pCtx->ecx; 2610 pCurTSS32->edx = pCtx->edx; 2611 pCurTSS32->ebx = pCtx->ebx; 2612 pCurTSS32->esp = pCtx->esp; 2613 pCurTSS32->ebp = pCtx->ebp; 2614 pCurTSS32->esi = pCtx->esi; 2615 pCurTSS32->edi = pCtx->edi; 2616 pCurTSS32->es = pCtx->es.Sel; 2617 pCurTSS32->cs = pCtx->cs.Sel; 2618 pCurTSS32->ss = pCtx->ss.Sel; 2619 pCurTSS32->ds = pCtx->ds.Sel; 2620 pCurTSS32->fs = pCtx->fs.Sel; 2621 pCurTSS32->gs = pCtx->gs.Sel; 2622 2623 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW); 2624 if (rcStrict != VINF_SUCCESS) 2625 { 2626 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, 2627 VBOXSTRICTRC_VAL(rcStrict))); 2628 return rcStrict; 2629 } 2630 } 2631 else 2632 { 2633 /* 2634 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size. 2635 */ 2636 void *pvCurTSS16; 2637 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip); 2638 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip); 2639 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28); 2640 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW); 2641 if (rcStrict != VINF_SUCCESS) 2642 { 2643 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n", 2644 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict))); 2645 return rcStrict; 2646 } 2647 2648 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */ 2649 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS); 2650 pCurTSS16->ip = uNextEip; 2651 pCurTSS16->flags = u32EFlags; 2652 pCurTSS16->ax = pCtx->ax; 2653 pCurTSS16->cx = pCtx->cx; 2654 pCurTSS16->dx = pCtx->dx; 2655 pCurTSS16->bx = pCtx->bx; 2656 pCurTSS16->sp = pCtx->sp; 2657 pCurTSS16->bp = pCtx->bp; 2658 pCurTSS16->si = pCtx->si; 2659 pCurTSS16->di = pCtx->di; 2660 pCurTSS16->es = pCtx->es.Sel; 2661 pCurTSS16->cs = pCtx->cs.Sel; 2662 pCurTSS16->ss = pCtx->ss.Sel; 2663 pCurTSS16->ds = pCtx->ds.Sel; 2664 2665 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW); 2666 if (rcStrict != VINF_SUCCESS) 2667 { 2668 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, 2669 VBOXSTRICTRC_VAL(rcStrict))); 2670 return rcStrict; 2671 } 2672 } 2673 2674 /* 2675 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT. 2676 */ 2677 if ( enmTaskSwitch == IEMTASKSWITCH_CALL 2678 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT) 2679 { 2680 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */ 2681 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS; 2682 pNewTSS->selPrev = pCtx->tr.Sel; 2683 } 2684 2685 /* 2686 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky, 2687 * it's done further below with error handling (e.g. CR3 changes will go through PGM). 2688 */ 2689 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi; 2690 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt; 2691 bool fNewDebugTrap; 2692 if (fIsNewTSS386) 2693 { 2694 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS; 2695 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0; 2696 uNewEip = pNewTSS32->eip; 2697 uNewEflags = pNewTSS32->eflags; 2698 uNewEax = pNewTSS32->eax; 2699 uNewEcx = pNewTSS32->ecx; 2700 uNewEdx = pNewTSS32->edx; 2701 uNewEbx = pNewTSS32->ebx; 2702 uNewEsp = pNewTSS32->esp; 2703 uNewEbp = pNewTSS32->ebp; 2704 uNewEsi = pNewTSS32->esi; 2705 uNewEdi = pNewTSS32->edi; 2706 uNewES = pNewTSS32->es; 2707 uNewCS = pNewTSS32->cs; 2708 uNewSS = pNewTSS32->ss; 2709 uNewDS = pNewTSS32->ds; 2710 uNewFS = pNewTSS32->fs; 2711 uNewGS = pNewTSS32->gs; 2712 uNewLdt = pNewTSS32->selLdt; 2713 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap); 2714 } 2715 else 2716 { 2717 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS; 2718 uNewCr3 = 0; 2719 uNewEip = pNewTSS16->ip; 2720 uNewEflags = pNewTSS16->flags; 2721 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax; 2722 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx; 2723 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx; 2724 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx; 2725 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp; 2726 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp; 2727 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si; 2728 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di; 2729 uNewES = pNewTSS16->es; 2730 uNewCS = pNewTSS16->cs; 2731 uNewSS = pNewTSS16->ss; 2732 uNewDS = pNewTSS16->ds; 2733 uNewFS = 0; 2734 uNewGS = 0; 2735 uNewLdt = pNewTSS16->selLdt; 2736 fNewDebugTrap = false; 2737 } 2738 2739 if (GCPtrNewTSS == GCPtrCurTSS) 2740 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n", 2741 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt)); 2742 2743 /* 2744 * We're done accessing the new TSS. 2745 */ 2746 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW); 2747 if (rcStrict != VINF_SUCCESS) 2748 { 2749 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict))); 2750 return rcStrict; 2751 } 2752 2753 /* 2754 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT. 2755 */ 2756 if (enmTaskSwitch != IEMTASKSWITCH_IRET) 2757 { 2758 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX, 2759 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW); 2760 if (rcStrict != VINF_SUCCESS) 2761 { 2762 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n", 2763 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict))); 2764 return rcStrict; 2765 } 2766 2767 /* Check that the descriptor indicates the new TSS is available (not busy). */ 2768 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL 2769 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL, 2770 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type)); 2771 2772 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK; 2773 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW); 2774 if (rcStrict != VINF_SUCCESS) 2775 { 2776 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n", 2777 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict))); 2778 return rcStrict; 2779 } 2780 } 2781 2782 /* 2783 * From this point on, we're technically in the new task. We will defer exceptions 2784 * until the completion of the task switch but before executing any instructions in the new task. 2785 */ 2786 pCtx->tr.Sel = SelTSS; 2787 pCtx->tr.ValidSel = SelTSS; 2788 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID; 2789 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy); 2790 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy); 2791 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy); 2792 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR); 2793 2794 /* Set the busy bit in TR. */ 2795 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK; 2796 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */ 2797 if ( enmTaskSwitch == IEMTASKSWITCH_CALL 2798 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT) 2799 { 2800 uNewEflags |= X86_EFL_NT; 2801 } 2802 2803 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */ 2804 pCtx->cr0 |= X86_CR0_TS; 2805 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0); 2806 2807 pCtx->eip = uNewEip; 2808 pCtx->eax = uNewEax; 2809 pCtx->ecx = uNewEcx; 2810 pCtx->edx = uNewEdx; 2811 pCtx->ebx = uNewEbx; 2812 pCtx->esp = uNewEsp; 2813 pCtx->ebp = uNewEbp; 2814 pCtx->esi = uNewEsi; 2815 pCtx->edi = uNewEdi; 2816 2817 uNewEflags &= X86_EFL_LIVE_MASK; 2818 uNewEflags |= X86_EFL_RA1_MASK; 2819 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags); 2820 2821 /* 2822 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors 2823 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3 2824 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging. 2825 */ 2826 pCtx->es.Sel = uNewES; 2827 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE; 2828 pCtx->es.Attr.u &= ~X86DESCATTR_P; 2829 2830 pCtx->cs.Sel = uNewCS; 2831 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE; 2832 pCtx->cs.Attr.u &= ~X86DESCATTR_P; 2833 2834 pCtx->ss.Sel = uNewSS; 2835 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE; 2836 pCtx->ss.Attr.u &= ~X86DESCATTR_P; 2837 2838 pCtx->ds.Sel = uNewDS; 2839 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE; 2840 pCtx->ds.Attr.u &= ~X86DESCATTR_P; 2841 2842 pCtx->fs.Sel = uNewFS; 2843 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE; 2844 pCtx->fs.Attr.u &= ~X86DESCATTR_P; 2845 2846 pCtx->gs.Sel = uNewGS; 2847 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE; 2848 pCtx->gs.Attr.u &= ~X86DESCATTR_P; 2849 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS); 2850 2851 pCtx->ldtr.Sel = uNewLdt; 2852 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE; 2853 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P; 2854 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR); 2855 2856 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) 2857 { 2858 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE; 2859 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE; 2860 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE; 2861 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE; 2862 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE; 2863 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE; 2864 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE; 2865 } 2866 2867 /* 2868 * Switch CR3 for the new task. 2869 */ 2870 if ( fIsNewTSS386 2871 && (pCtx->cr0 & X86_CR0_PG)) 2872 { 2873 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */ 2874 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu)) 2875 { 2876 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3); 2877 AssertRCSuccessReturn(rc, rc); 2878 } 2879 else 2880 pCtx->cr3 = uNewCr3; 2881 2882 /* Inform PGM. */ 2883 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu)) 2884 { 2885 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE)); 2886 AssertRCReturn(rc, rc); 2887 /* ignore informational status codes */ 2888 } 2889 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3); 2890 } 2891 2892 /* 2893 * Switch LDTR for the new task. 2894 */ 2895 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL)) 2896 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt); 2897 else 2898 { 2899 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */ 2900 2901 IEMSELDESC DescNewLdt; 2902 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS); 2903 if (rcStrict != VINF_SUCCESS) 2904 { 2905 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch, 2906 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict))); 2907 return rcStrict; 2908 } 2909 if ( !DescNewLdt.Legacy.Gen.u1Present 2910 || DescNewLdt.Legacy.Gen.u1DescType 2911 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT) 2912 { 2913 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch, 2914 uNewLdt, DescNewLdt.Legacy.u)); 2915 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 2916 } 2917 2918 pCtx->ldtr.ValidSel = uNewLdt; 2919 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 2920 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy); 2921 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy); 2922 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy); 2923 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu)) 2924 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE; 2925 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr)); 2926 } 2927 2928 IEMSELDESC DescSS; 2929 if (IEM_IS_V86_MODE(pIemCpu)) 2930 { 2931 pIemCpu->uCpl = 3; 2932 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES); 2933 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS); 2934 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS); 2935 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS); 2936 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS); 2937 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS); 2938 } 2939 else 2940 { 2941 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL); 2942 2943 /* 2944 * Load the stack segment for the new task. 2945 */ 2946 if (!(uNewSS & X86_SEL_MASK_OFF_RPL)) 2947 { 2948 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS)); 2949 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL); 2950 } 2951 2952 /* Fetch the descriptor. */ 2953 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS); 2954 if (rcStrict != VINF_SUCCESS) 2955 { 2956 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS, 2957 VBOXSTRICTRC_VAL(rcStrict))); 2958 return rcStrict; 2959 } 2960 2961 /* SS must be a data segment and writable. */ 2962 if ( !DescSS.Legacy.Gen.u1DescType 2963 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) 2964 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE)) 2965 { 2966 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n", 2967 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type)); 2968 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL); 2969 } 2970 2971 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */ 2972 if ( (uNewSS & X86_SEL_RPL) != uNewCpl 2973 || DescSS.Legacy.Gen.u2Dpl != uNewCpl) 2974 { 2975 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl, 2976 uNewCpl)); 2977 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL); 2978 } 2979 2980 /* Is it there? */ 2981 if (!DescSS.Legacy.Gen.u1Present) 2982 { 2983 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS)); 2984 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL); 2985 } 2986 2987 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy); 2988 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy); 2989 2990 /* Set the accessed bit before committing the result into SS. */ 2991 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2992 { 2993 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS); 2994 if (rcStrict != VINF_SUCCESS) 2995 return rcStrict; 2996 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2997 } 2998 2999 /* Commit SS. */ 3000 pCtx->ss.Sel = uNewSS; 3001 pCtx->ss.ValidSel = uNewSS; 3002 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 3003 pCtx->ss.u32Limit = cbLimit; 3004 pCtx->ss.u64Base = u64Base; 3005 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 3006 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss)); 3007 3008 /* CPL has changed, update IEM before loading rest of segments. */ 3009 pIemCpu->uCpl = uNewCpl; 3010 3011 /* 3012 * Load the data segments for the new task. 3013 */ 3014 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES); 3015 if (rcStrict != VINF_SUCCESS) 3016 return rcStrict; 3017 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS); 3018 if (rcStrict != VINF_SUCCESS) 3019 return rcStrict; 3020 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS); 3021 if (rcStrict != VINF_SUCCESS) 3022 return rcStrict; 3023 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS); 3024 if (rcStrict != VINF_SUCCESS) 3025 return rcStrict; 3026 3027 /* 3028 * Load the code segment for the new task. 3029 */ 3030 if (!(uNewCS & X86_SEL_MASK_OFF_RPL)) 3031 { 3032 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS)); 3033 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 3034 } 3035 3036 /* Fetch the descriptor. */ 3037 IEMSELDESC DescCS; 3038 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS); 3039 if (rcStrict != VINF_SUCCESS) 3040 { 3041 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict))); 3042 return rcStrict; 3043 } 3044 3045 /* CS must be a code segment. */ 3046 if ( !DescCS.Legacy.Gen.u1DescType 3047 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 3048 { 3049 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS, 3050 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type)); 3051 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 3052 } 3053 3054 /* For conforming CS, DPL must be less than or equal to the RPL. */ 3055 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 3056 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL)) 3057 { 3058 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type, 3059 DescCS.Legacy.Gen.u2Dpl)); 3060 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 3061 } 3062 3063 /* For non-conforming CS, DPL must match RPL. */ 3064 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 3065 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL)) 3066 { 3067 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, 3068 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl)); 3069 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 3070 } 3071 3072 /* Is it there? */ 3073 if (!DescCS.Legacy.Gen.u1Present) 3074 { 3075 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS)); 3076 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 3077 } 3078 3079 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy); 3080 u64Base = X86DESC_BASE(&DescCS.Legacy); 3081 3082 /* Set the accessed bit before committing the result into CS. */ 3083 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 3084 { 3085 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS); 3086 if (rcStrict != VINF_SUCCESS) 3087 return rcStrict; 3088 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 3089 } 3090 3091 /* Commit CS. */ 3092 pCtx->cs.Sel = uNewCS; 3093 pCtx->cs.ValidSel = uNewCS; 3094 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 3095 pCtx->cs.u32Limit = cbLimit; 3096 pCtx->cs.u64Base = u64Base; 3097 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 3098 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs)); 3099 } 3100 3101 /** @todo Debug trap. */ 3102 if (fIsNewTSS386 && fNewDebugTrap) 3103 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n")); 3104 3105 /* 3106 * Construct the error code masks based on what caused this task switch. 3107 * See Intel Instruction reference for INT. 3108 */ 3109 uint16_t uExt; 3110 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT 3111 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)) 3112 { 3113 uExt = 1; 3114 } 3115 else 3116 uExt = 0; 3117 3118 /* 3119 * Push any error code on to the new stack. 3120 */ 3121 if (fFlags & IEM_XCPT_FLAGS_ERR) 3122 { 3123 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT); 3124 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy); 3125 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN) 3126 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */ 3127 3128 /* Check that there is sufficient space on the stack. */ 3129 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2; 3130 if ( pCtx->esp - 1 > cbLimitSS 3131 || pCtx->esp < cbStackFrame) 3132 { 3133 /** @todo Intel says #SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */ 3134 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp, 3135 cbStackFrame)); 3136 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt); 3137 } 3138 3139 if (fIsNewTSS386) 3140 rcStrict = iemMemStackPushU32(pIemCpu, uErr); 3141 else 3142 rcStrict = iemMemStackPushU16(pIemCpu, uErr); 3143 if (rcStrict != VINF_SUCCESS) 3144 { 3145 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16", 3146 VBOXSTRICTRC_VAL(rcStrict))); 3147 return rcStrict; 3148 } 3149 } 3150 3151 /* Check the new EIP against the new CS limit. */ 3152 if (pCtx->eip > pCtx->cs.u32Limit) 3153 { 3154 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n", 3155 pCtx->eip, pCtx->cs.u32Limit)); 3156 /** @todo Intel says #GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */ 3157 return iemRaiseGeneralProtectionFault(pIemCpu, uExt); 3158 } 3159 3160 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel)); 3161 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS; 2274 3162 } 2275 3163 … … 2325 3213 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 2326 3214 } 3215 bool fTaskGate = false; 2327 3216 uint8_t f32BitGate = true; 2328 3217 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM; … … 2354 3243 2355 3244 case X86_SEL_TYPE_SYS_TASK_GATE: 2356 /** @todo task gates. */ 2357 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n")); /** @todo Implement task gate support. */ 3245 fTaskGate = true; 3246 #ifndef IEM_IMPLEMENTS_TASKSWITCH 3247 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n")); 3248 #endif 3249 break; 2358 3250 2359 3251 case X86_SEL_TYPE_SYS_286_TRAP_GATE: … … 2380 3272 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector)); 2381 3273 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 3274 } 3275 3276 /* Is it a task-gate? */ 3277 if (fTaskGate) 3278 { 3279 /* 3280 * Construct the error code masks based on what caused this task switch. 3281 * See Intel Instruction reference for INT. 3282 */ 3283 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1; 3284 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL; 3285 RTSEL SelTSS = Idte.Gate.u16Sel; 3286 3287 /* 3288 * Fetch the TSS descriptor in the GDT. 3289 */ 3290 IEMSELDESC DescTSS; 3291 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt); 3292 if (rcStrict != VINF_SUCCESS) 3293 { 3294 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS, 3295 VBOXSTRICTRC_VAL(rcStrict))); 3296 return rcStrict; 3297 } 3298 3299 /* The TSS descriptor must be a system segment and be available (not busy). */ 3300 if ( DescTSS.Legacy.Gen.u1DescType 3301 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL 3302 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL)) 3303 { 3304 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n", 3305 u8Vector, SelTSS, DescTSS.Legacy.au64)); 3306 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt); 3307 } 3308 3309 /* The TSS must be present. */ 3310 if (!DescTSS.Legacy.Gen.u1Present) 3311 { 3312 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64)); 3313 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt); 3314 } 3315 3316 /* Do the actual task switch. */ 3317 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS); 2382 3318 } 2383 3319 … … 3074 4010 3075 4011 3076 #ifdef SOME_UNUSED_FUNCTION3077 4012 /** \#TS(err) - 0a. */ 3078 4013 DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr) … … 3080 4015 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0); 3081 4016 } 3082 #endif3083 4017 3084 4018 … … 3135 4069 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 3136 4070 uSel & ~X86_SEL_RPL, 0); 4071 } 4072 4073 4074 /** \#SS(err) - 0c. */ 4075 DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr) 4076 { 4077 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0); 3137 4078 } 3138 4079 … … 5887 6828 * Check the input and figure out which mapping entry to use. 5888 6829 */ 5889 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 94); /* 512 is the max! */6830 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */ 5890 6831 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK))); 5891 6832 … … 7139 8080 7140 8081 /** 7141 * Fetches a descriptor table entry .8082 * Fetches a descriptor table entry with caller specified error code. 7142 8083 * 7143 8084 * @returns Strict VBox status code. … … 7146 8087 * @param uSel The selector which table entry to fetch. 7147 8088 * @param uXcpt The exception to raise on table lookup error. 7148 */ 7149 static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) 7150 { 8089 * @param uErrorCode The error code associated with the exception. 8090 */ 8091 static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, 8092 uint16_t uErrorCode) 8093 { 8094 AssertPtr(pDesc); 7151 8095 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 7152 8096 … … 7164 8108 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel)); 7165 8109 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 7166 u Sel & ~X86_SEL_RPL, 0);8110 uErrorCode, 0); 7167 8111 } 7168 8112 … … 7176 8120 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt)); 7177 8121 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 7178 u Sel & ~X86_SEL_RPL, 0);8122 uErrorCode, 0); 7179 8123 } 7180 8124 GCPtrBase = pCtx->gdtr.pGdt; … … 7197 8141 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel)); 7198 8142 /** @todo is this the right exception? */ 7199 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 7200 uSel & ~X86_SEL_RPL, 0); 8143 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0); 7201 8144 } 7202 8145 } 7203 8146 return rcStrict; 8147 } 8148 8149 8150 /** 8151 * Fetches a descriptor table entry. 8152 * 8153 * @returns Strict VBox status code. 8154 * @param pIemCpu The IEM per CPU. 8155 * @param pDesc Where to return the descriptor table entry. 8156 * @param uSel The selector which table entry to fetch. 8157 * @param uXcpt The exception to raise on table lookup error. 8158 */ 8159 static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) 8160 { 8161 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL); 7204 8162 } 7205 8163 … … 8737 9695 RTGCPTR uCr2; 8738 9696 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2); 8739 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2 );9697 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */); 8740 9698 if (!IEM_VERIFICATION_ENABLED(pIemCpu)) 8741 9699 TRPMResetTrap(pVCpu); … … 9534 10492 Log2(("****\n" 9535 10493 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n" 9536 " eip=%08x esp=%08x ebp=%08x iopl=%d \n"10494 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n" 9537 10495 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n" 9538 10496 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n" … … 9540 10498 , 9541 10499 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi, 9542 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, 10500 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel, 9543 10501 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel, 9544 10502 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u, … … 9861 10819 RTGCPTR uCr2; 9862 10820 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2); 9863 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2 );10821 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */); 9864 10822 if (!IEM_VERIFICATION_ENABLED(pIemCpu)) 9865 10823 TRPMResetTrap(pVCpu); … … 9898 10856 #endif 9899 10857 if (rcStrict != VINF_SUCCESS) 9900 LogFlow(("IEMExec One: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",10858 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n", 9901 10859 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict))); 9902 10860 return rcStrict; … … 9917 10875 * @param uErrCode The error code if applicable. 9918 10876 * @param uCr2 The CR2 value if applicable. 9919 */ 9920 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2) 10877 * @param cbInstr The instruction length (only relevant for 10878 * software interrupts). 10879 */ 10880 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2, 10881 uint8_t cbInstr) 9921 10882 { 9922 10883 iemInitDecoder(&pVCpu->iem.s, false); … … 9962 10923 } 9963 10924 9964 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2); 10925 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2); 10926 } 10927 10928 10929 /** 10930 * Injects the active TRPM event. 10931 * 10932 * @returns Strict VBox status code. 10933 * @param pVCpu Pointer to the VMCPU. 10934 */ 10935 VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu) 10936 { 10937 uint8_t u8TrapNo; 10938 TRPMEVENT enmType; 10939 RTGCUINT uErrCode; 10940 RTGCUINTPTR uCr2; 10941 uint8_t cbInstr; 10942 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr); 10943 if (RT_FAILURE(rc)) 10944 return rc; 10945 10946 TRPMResetTrap(pVCpu); 10947 return IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr); 9965 10948 } 9966 10949 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r50863 r51182 186 186 pIemCpu->fUndefinedEFlags |= fUndefined; 187 187 #endif 188 }189 190 191 /**192 * Loads a NULL data selector into a selector register, both the hidden and193 * visible parts, in protected mode.194 *195 * @param pIemCpu The IEM state of the calling EMT.196 * @param pSReg Pointer to the segment register.197 * @param uRpl The RPL.198 */199 static void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)200 {201 /** @todo Testcase: write a testcase checking what happends when loading a NULL202 * data selector in protected mode. */203 pSReg->Sel = uRpl;204 pSReg->ValidSel = uRpl;205 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;206 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))207 {208 /* VT-x (Intel 3960x) observed doing something like this. */209 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);210 pSReg->u32Limit = UINT32_MAX;211 pSReg->u64Base = 0;212 }213 else214 {215 pSReg->Attr.u = X86DESCATTR_UNUSABLE;216 pSReg->u32Limit = 0;217 pSReg->u64Base = 0;218 }219 188 } 220 189 … … 914 883 * @param enmEffOpSize The effective operand size. 915 884 * @param pDesc The descriptor corrsponding to @a uSel. The type is 916 * callgate.885 * task gate. 917 886 */ 918 887 IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc) 919 888 { 920 /* Call various functions to do the work. Clear RF? */ 889 #ifndef IEM_IMPLEMENTS_TASKSWITCH 921 890 IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); 891 #else 892 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL); 893 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL 894 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL); 895 896 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl 897 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL)) 898 { 899 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl, 900 pIemCpu->uCpl, (uSel & X86_SEL_RPL))); 901 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL); 902 } 903 904 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not 905 * far calls (see iemCImpl_callf). Most likely in both cases it should be 906 * checked here, need testcases. */ 907 if (!pDesc->Legacy.Gen.u1Present) 908 { 909 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel)); 910 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL); 911 } 912 913 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 914 uint32_t uNextEip = pCtx->eip + cbInstr; 915 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL, 916 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc); 917 #endif 922 918 } 923 919 … … 930 926 * @param enmEffOpSize The effective operand size. 931 927 * @param pDesc The descriptor corrsponding to @a uSel. The type is 932 * callgate.928 * task gate. 933 929 */ 934 930 IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc) 935 931 { 936 /* Call various functions to do the work. Don't clear RF */ 932 #ifndef IEM_IMPLEMENTS_TASKSWITCH 937 933 IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); 934 #else 935 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL); 936 937 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl 938 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL)) 939 { 940 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl, 941 pIemCpu->uCpl, (uSel & X86_SEL_RPL))); 942 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL); 943 } 944 945 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not 946 * far calls (see iemCImpl_callf). Most likely in both cases it should be 947 * checked here, need testcases. */ 948 if (!pDesc->Legacy.Gen.u1Present) 949 { 950 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel)); 951 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL); 952 } 953 954 /* 955 * Fetch the new TSS descriptor from the GDT. 956 */ 957 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel; 958 if (uSelTss & X86_SEL_LDT) 959 { 960 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss)); 961 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL); 962 } 963 964 IEMSELDESC TssDesc; 965 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelTss, X86_XCPT_GP); 966 if (rcStrict != VINF_SUCCESS) 967 return rcStrict; 968 969 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK) 970 { 971 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss, 972 TssDesc.Legacy.Gate.u4Type)); 973 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL); 974 } 975 976 if (!TssDesc.Legacy.Gate.u1Present) 977 { 978 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss)); 979 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelTss & X86_SEL_MASK_OFF_RPL); 980 } 981 982 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 983 uint32_t uNextEip = pCtx->eip + cbInstr; 984 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL, 985 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc); 986 #endif 938 987 } 939 988 … … 982 1031 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type)); 983 1032 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 984 985 1033 } 986 1034 … … 2242 2290 IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize) 2243 2291 { 2292 #ifndef IEM_IMPLEMENTS_TASKSWITCH 2244 2293 IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); 2294 #else 2295 /* 2296 * Read the segment selector in the link-field of the current TSS. 2297 */ 2298 RTSEL uSelRet; 2299 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 2300 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base); 2301 if (rcStrict != VINF_SUCCESS) 2302 return rcStrict; 2303 2304 /* 2305 * Fetch the returning task's TSS descriptor from the GDT. 2306 */ 2307 if (uSelRet & X86_SEL_LDT) 2308 { 2309 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet)); 2310 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet); 2311 } 2312 2313 IEMSELDESC TssDesc; 2314 rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelRet, X86_XCPT_GP); 2315 if (rcStrict != VINF_SUCCESS) 2316 return rcStrict; 2317 2318 if (TssDesc.Legacy.Gate.u1DescType) 2319 { 2320 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet)); 2321 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL); 2322 } 2323 2324 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY 2325 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) 2326 { 2327 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type)); 2328 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL); 2329 } 2330 2331 if (!TssDesc.Legacy.Gate.u1Present) 2332 { 2333 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet)); 2334 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL); 2335 } 2336 2337 uint32_t uNextEip = pCtx->eip + cbInstr; 2338 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */, 2339 0 /* uCr2 */, uSelRet, &TssDesc); 2340 #endif 2245 2341 } 2246 2342 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r50856 r51182 4845 4845 /* 4846 4846 * AMD-V does not provide us with the original exception but we have it in u64IntInfo since we 4847 * injected the event during VM-entry. Software interrupts and exceptions will be regenerated 4848 * when the recompiler restarts the instruction. 4847 * injected the event during VM-entry. 4849 4848 */ 4850 4849 SVMEVENT Event; 4851 4850 Event.u = pVCpu->hm.s.Event.u64IntInfo; 4852 if ( Event.n.u3Type == SVM_EVENT_EXCEPTION 4853 || Event.n.u3Type == SVM_EVENT_SOFTWARE_INT) 4854 { 4855 pVCpu->hm.s.Event.fPending = false; 4856 } 4857 else 4858 Log4(("hmR0SvmExitTaskSwitch: TS occurred during event delivery. Kept pending u8Vector=%#x\n", Event.n.u8Vector)); 4851 Log4(("hmR0SvmExitTaskSwitch: TS occurred during event delivery. u8Vector=%#x\n", Event.n.u8Vector)); 4852 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch); 4853 return VINF_EM_RAW_INJECT_TRPM_EVENT; 4859 4854 } 4860 4855 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r51145 r51182 10650 10650 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo); 10651 10651 10652 /* Software interrupts and exceptions will be regenerated when the recompiler restarts the instruction. */ 10653 if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT 10654 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT 10655 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT) 10652 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo); 10653 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo); 10654 10655 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */ 10656 Assert(!pVCpu->hm.s.Event.fPending); 10657 pVCpu->hm.s.Event.fPending = true; 10658 pVCpu->hm.s.Event.u64IntInfo = pVmxTransient->uIdtVectoringInfo; 10659 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient); 10660 AssertRCReturn(rc, rc); 10661 if (fErrorCodeValid) 10662 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode; 10663 else 10664 pVCpu->hm.s.Event.u32ErrCode = 0; 10665 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT 10666 && uVector == X86_XCPT_PF) 10656 10667 { 10657 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo); 10658 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo); 10659 10660 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */ 10661 Assert(!pVCpu->hm.s.Event.fPending); 10662 pVCpu->hm.s.Event.fPending = true; 10663 pVCpu->hm.s.Event.u64IntInfo = pVmxTransient->uIdtVectoringInfo; 10664 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient); 10665 AssertRCReturn(rc, rc); 10666 if (fErrorCodeValid) 10667 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode; 10668 else 10669 pVCpu->hm.s.Event.u32ErrCode = 0; 10670 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT 10671 && uVector == X86_XCPT_PF) 10672 { 10673 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2; 10674 } 10675 10676 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector)); 10668 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2; 10677 10669 } 10670 10671 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector)); 10672 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch); 10673 return VINF_EM_RAW_INJECT_TRPM_EVENT; 10678 10674 } 10679 10675 } -
trunk/src/VBox/VMM/include/EMHandleRCTmpl.h
r48998 r51182 275 275 break; 276 276 277 case VINF_EM_RAW_INJECT_TRPM_EVENT: 278 #ifdef VBOX_WITH_FIRST_IEM_STEP 279 rc = VBOXSTRICTRC_VAL(IEMInjectTrpmEvent(pVCpu)); 280 #else 281 /* Do the same thing as VINF_EM_RAW_EMULATE_INSTR. */ 282 rc = emR3ExecuteInstruction(pVM, pVCpu, "EMUL: "); 283 #endif 284 break; 285 286 277 287 #ifdef EMHANDLERC_WITH_PATM 278 288 /* -
trunk/src/VBox/VMM/include/IEMInternal.h
r47754 r51182 523 523 524 524 /** 525 * Possible hardware task switch sources. 526 */ 527 typedef enum IEMTASKSWITCH 528 { 529 /** Task switch caused by an interrupt/exception. */ 530 IEMTASKSWITCH_INT_XCPT = 1, 531 /** Task switch caused by a far CALL. */ 532 IEMTASKSWITCH_CALL, 533 /** Task switch caused by a far JMP. */ 534 IEMTASKSWITCH_JUMP, 535 /** Task switch caused by an IRET. */ 536 IEMTASKSWITCH_IRET 537 } IEMTASKSWITCH; 538 AssertCompileSize(IEMTASKSWITCH, 4); 539 540 541 /** 525 542 * Tests if verification mode is enabled. 526 543 *
Note:
See TracChangeset
for help on using the changeset viewer.