VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllXcpt-x86.cpp@ 108226

Last change on this file since 108226 was 108220, checked in by vboxsync, 3 months ago

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 126.4 KB
Line 
1/* $Id: IEMAllXcpt-x86.cpp 108220 2025-02-14 11:40:20Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - x86 target, exceptions & interrupts.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/pdmapic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gcm.h>
47#include <VBox/vmm/gim.h>
48#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
49# include <VBox/vmm/em.h>
50# include <VBox/vmm/hm_svm.h>
51#endif
52#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
53# include <VBox/vmm/hmvmxinline.h>
54#endif
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/dbgf.h>
57#include <VBox/vmm/dbgftrace.h>
58#include "IEMInternal.h"
59#include <VBox/vmm/vmcc.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <VBox/dis.h>
64#include <iprt/asm-math.h>
65#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
66# include <iprt/asm-amd64-x86.h>
67#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
68# include <iprt/asm-arm.h>
69#endif
70#include <iprt/assert.h>
71#include <iprt/string.h>
72#include <iprt/x86.h>
73
74#include "IEMInline.h"
75
76
77/*********************************************************************************************************************************
78* Structures and Typedefs *
79*********************************************************************************************************************************/
80/**
81 * CPU exception classes.
82 */
83typedef enum IEMXCPTCLASS
84{
85 IEMXCPTCLASS_BENIGN,
86 IEMXCPTCLASS_CONTRIBUTORY,
87 IEMXCPTCLASS_PAGE_FAULT,
88 IEMXCPTCLASS_DOUBLE_FAULT
89} IEMXCPTCLASS;
90
91
92/*********************************************************************************************************************************
93* Global Variables *
94*********************************************************************************************************************************/
95#if defined(IEM_LOG_MEMORY_WRITES)
96/** What IEM just wrote. */
97uint8_t g_abIemWrote[256];
98/** How much IEM just wrote. */
99size_t g_cbIemWrote;
100#endif
101
102
103/** @name Misc Worker Functions.
104 * @{
105 */
106
107/**
108 * Gets the exception class for the specified exception vector.
109 *
110 * @returns The class of the specified exception.
111 * @param uVector The exception vector.
112 */
113static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
114{
115 Assert(uVector <= X86_XCPT_LAST);
116 switch (uVector)
117 {
118 case X86_XCPT_DE:
119 case X86_XCPT_TS:
120 case X86_XCPT_NP:
121 case X86_XCPT_SS:
122 case X86_XCPT_GP:
123 case X86_XCPT_SX: /* AMD only */
124 return IEMXCPTCLASS_CONTRIBUTORY;
125
126 case X86_XCPT_PF:
127 case X86_XCPT_VE: /* Intel only */
128 return IEMXCPTCLASS_PAGE_FAULT;
129
130 case X86_XCPT_DF:
131 return IEMXCPTCLASS_DOUBLE_FAULT;
132 }
133 return IEMXCPTCLASS_BENIGN;
134}
135
136
137/**
138 * Evaluates how to handle an exception caused during delivery of another event
139 * (exception / interrupt).
140 *
141 * @returns How to handle the recursive exception.
142 * @param pVCpu The cross context virtual CPU structure of the
143 * calling thread.
144 * @param fPrevFlags The flags of the previous event.
145 * @param uPrevVector The vector of the previous event.
146 * @param fCurFlags The flags of the current exception.
147 * @param uCurVector The vector of the current exception.
148 * @param pfXcptRaiseInfo Where to store additional information about the
149 * exception condition. Optional.
150 */
151VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
152 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
153{
154 /*
155 * Only CPU exceptions can be raised while delivering other events, software interrupt
156 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
157 */
158 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
159 Assert(pVCpu); RT_NOREF(pVCpu);
160 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
161
162 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
163 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
164 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
165 {
166 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
167 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
168 {
169 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
170 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
171 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
172 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
173 {
174 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
175 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
176 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
177 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
178 uCurVector, pVCpu->cpum.GstCtx.cr2));
179 }
180 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
181 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
182 {
183 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
184 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
185 }
186 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
187 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
188 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
189 {
190 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
191 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
192 }
193 }
194 else
195 {
196 if (uPrevVector == X86_XCPT_NMI)
197 {
198 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
199 if (uCurVector == X86_XCPT_PF)
200 {
201 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
202 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
203 }
204 }
205 else if ( uPrevVector == X86_XCPT_AC
206 && uCurVector == X86_XCPT_AC)
207 {
208 enmRaise = IEMXCPTRAISE_CPU_HANG;
209 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
210 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
211 }
212 }
213 }
214 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
215 {
216 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
217 if (uCurVector == X86_XCPT_PF)
218 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
219 }
220 else
221 {
222 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
223 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
224 }
225
226 if (pfXcptRaiseInfo)
227 *pfXcptRaiseInfo = fRaiseInfo;
228 return enmRaise;
229}
230
231
232/**
233 * Enters the CPU shutdown state initiated by a triple fault or other
234 * unrecoverable conditions.
235 *
236 * @returns Strict VBox status code.
237 * @param pVCpu The cross context virtual CPU structure of the
238 * calling thread.
239 */
240static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
241{
242 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
243 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
244
245 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
246 {
247 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
248 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
249 }
250
251 RT_NOREF(pVCpu);
252 return VINF_EM_TRIPLE_FAULT;
253}
254
255
256/**
257 * Validates a new SS segment.
258 *
259 * @returns VBox strict status code.
260 * @param pVCpu The cross context virtual CPU structure of the
261 * calling thread.
262 * @param NewSS The new SS selctor.
263 * @param uCpl The CPL to load the stack for.
264 * @param pDesc Where to return the descriptor.
265 */
266static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
267{
268 /* Null selectors are not allowed (we're not called for dispatching
269 interrupts with SS=0 in long mode). */
270 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
271 {
272 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
273 return iemRaiseTaskSwitchFault0(pVCpu);
274 }
275
276 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
277 if ((NewSS & X86_SEL_RPL) != uCpl)
278 {
279 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
280 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
281 }
282
283 /*
284 * Read the descriptor.
285 */
286 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
287 if (rcStrict != VINF_SUCCESS)
288 return rcStrict;
289
290 /*
291 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
292 */
293 if (!pDesc->Legacy.Gen.u1DescType)
294 {
295 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
296 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
297 }
298
299 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
300 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
301 {
302 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
303 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
304 }
305 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
306 {
307 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
308 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
309 }
310
311 /* Is it there? */
312 /** @todo testcase: Is this checked before the canonical / limit check below? */
313 if (!pDesc->Legacy.Gen.u1Present)
314 {
315 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
316 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
317 }
318
319 return VINF_SUCCESS;
320}
321
322/** @} */
323
324
325/** @name Raising Exceptions.
326 *
327 * @{
328 */
329
330
331/**
332 * Loads the specified stack far pointer from the TSS.
333 *
334 * @returns VBox strict status code.
335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
336 * @param uCpl The CPL to load the stack for.
337 * @param pSelSS Where to return the new stack segment.
338 * @param puEsp Where to return the new stack pointer.
339 */
340static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
341{
342 VBOXSTRICTRC rcStrict;
343 Assert(uCpl < 4);
344
345 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
346 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
347 {
348 /*
349 * 16-bit TSS (X86TSS16).
350 */
351 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
352 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
353 {
354 uint32_t off = uCpl * 4 + 2;
355 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
356 {
357 /** @todo check actual access pattern here. */
358 uint32_t u32Tmp = 0; /* gcc maybe... */
359 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
360 if (rcStrict == VINF_SUCCESS)
361 {
362 *puEsp = RT_LOWORD(u32Tmp);
363 *pSelSS = RT_HIWORD(u32Tmp);
364 return VINF_SUCCESS;
365 }
366 }
367 else
368 {
369 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
370 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
371 }
372 break;
373 }
374
375 /*
376 * 32-bit TSS (X86TSS32).
377 */
378 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
379 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
380 {
381 uint32_t off = uCpl * 8 + 4;
382 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
383 {
384/** @todo check actual access pattern here. */
385 uint64_t u64Tmp;
386 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
387 if (rcStrict == VINF_SUCCESS)
388 {
389 *puEsp = u64Tmp & UINT32_MAX;
390 *pSelSS = (RTSEL)(u64Tmp >> 32);
391 return VINF_SUCCESS;
392 }
393 }
394 else
395 {
396 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
397 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
398 }
399 break;
400 }
401
402 default:
403 AssertFailed();
404 rcStrict = VERR_IEM_IPE_4;
405 break;
406 }
407
408 *puEsp = 0; /* make gcc happy */
409 *pSelSS = 0; /* make gcc happy */
410 return rcStrict;
411}
412
413
414/**
415 * Loads the specified stack pointer from the 64-bit TSS.
416 *
417 * @returns VBox strict status code.
418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
419 * @param uCpl The CPL to load the stack for.
420 * @param uIst The interrupt stack table index, 0 if to use uCpl.
421 * @param puRsp Where to return the new stack pointer.
422 */
423static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
424{
425 Assert(uCpl < 4);
426 Assert(uIst < 8);
427 *puRsp = 0; /* make gcc happy */
428
429 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
430 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
431
432 uint32_t off;
433 if (uIst)
434 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
435 else
436 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
437 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
438 {
439 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
440 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
441 }
442
443 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
444}
445
446
447/**
448 * Adjust the CPU state according to the exception being raised.
449 *
450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
451 * @param u8Vector The exception that has been raised.
452 */
453DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
454{
455 switch (u8Vector)
456 {
457 case X86_XCPT_DB:
458 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
459 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
460 break;
461 /** @todo Read the AMD and Intel exception reference... */
462 }
463}
464
465
466/**
467 * Implements exceptions and interrupts for real mode.
468 *
469 * @returns VBox strict status code.
470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
471 * @param cbInstr The number of bytes to offset rIP by in the return
472 * address.
473 * @param u8Vector The interrupt / exception vector number.
474 * @param fFlags The flags.
475 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
476 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
477 */
478static VBOXSTRICTRC
479iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
480 uint8_t cbInstr,
481 uint8_t u8Vector,
482 uint32_t fFlags,
483 uint16_t uErr,
484 uint64_t uCr2) RT_NOEXCEPT
485{
486 NOREF(uErr); NOREF(uCr2);
487 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
488
489 /*
490 * Read the IDT entry.
491 */
492 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
493 {
494 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
495 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
496 }
497 RTFAR16 Idte;
498 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
499 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
500 {
501 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
502 return rcStrict;
503 }
504
505#ifdef LOG_ENABLED
506 /* If software interrupt, try decode it if logging is enabled and such. */
507 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
508 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
509 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
510#endif
511
512 /*
513 * Push the stack frame.
514 */
515 uint8_t bUnmapInfo;
516 uint16_t *pu16Frame;
517 uint64_t uNewRsp;
518 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
519 if (rcStrict != VINF_SUCCESS)
520 return rcStrict;
521
522 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
523#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
524 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
525 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
526 fEfl |= UINT16_C(0xf000);
527#endif
528 pu16Frame[2] = (uint16_t)fEfl;
529 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
530 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
531 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
532 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
533 return rcStrict;
534
535 /*
536 * Load the vector address into cs:ip and make exception specific state
537 * adjustments.
538 */
539 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
540 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
541 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
542 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
543 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
544 pVCpu->cpum.GstCtx.rip = Idte.off;
545 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
546 IEMMISC_SET_EFL(pVCpu, fEfl);
547
548 /** @todo do we actually do this in real mode? */
549 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
550 iemRaiseXcptAdjustState(pVCpu, u8Vector);
551
552 /*
553 * Deal with debug events that follows the exception and clear inhibit flags.
554 */
555 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
556 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
557 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
558 else
559 {
560 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
561 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
562 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
563 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
564 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
565 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
566 return iemRaiseDebugException(pVCpu);
567 }
568
569 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
570 so best leave them alone in case we're in a weird kind of real mode... */
571
572 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
573}
574
575
576/**
577 * Loads a NULL data selector into when coming from V8086 mode.
578 *
579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
580 * @param pSReg Pointer to the segment register.
581 */
582DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
583{
584 pSReg->Sel = 0;
585 pSReg->ValidSel = 0;
586 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
587 {
588 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
589 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
590 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
591 }
592 else
593 {
594 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
595 /** @todo check this on AMD-V */
596 pSReg->u64Base = 0;
597 pSReg->u32Limit = 0;
598 }
599}
600
601
602/**
603 * Loads a segment selector during a task switch in V8086 mode.
604 *
605 * @param pSReg Pointer to the segment register.
606 * @param uSel The selector value to load.
607 */
608DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
609{
610 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
611 pSReg->Sel = uSel;
612 pSReg->ValidSel = uSel;
613 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
614 pSReg->u64Base = uSel << 4;
615 pSReg->u32Limit = 0xffff;
616 pSReg->Attr.u = 0xf3;
617}
618
619
620/**
621 * Loads a segment selector during a task switch in protected mode.
622 *
623 * In this task switch scenario, we would throw \#TS exceptions rather than
624 * \#GPs.
625 *
626 * @returns VBox strict status code.
627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
628 * @param pSReg Pointer to the segment register.
629 * @param uSel The new selector value.
630 *
631 * @remarks This does _not_ handle CS or SS.
632 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
633 */
634static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
635{
636 Assert(!IEM_IS_64BIT_CODE(pVCpu));
637
638 /* Null data selector. */
639 if (!(uSel & X86_SEL_MASK_OFF_RPL))
640 {
641 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
642 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
643 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
644 return VINF_SUCCESS;
645 }
646
647 /* Fetch the descriptor. */
648 IEMSELDESC Desc;
649 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
650 if (rcStrict != VINF_SUCCESS)
651 {
652 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
653 VBOXSTRICTRC_VAL(rcStrict)));
654 return rcStrict;
655 }
656
657 /* Must be a data segment or readable code segment. */
658 if ( !Desc.Legacy.Gen.u1DescType
659 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
660 {
661 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
662 Desc.Legacy.Gen.u4Type));
663 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
664 }
665
666 /* Check privileges for data segments and non-conforming code segments. */
667 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
668 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
669 {
670 /* The RPL and the new CPL must be less than or equal to the DPL. */
671 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
672 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
673 {
674 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
675 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
676 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
677 }
678 }
679
680 /* Is it there? */
681 if (!Desc.Legacy.Gen.u1Present)
682 {
683 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
684 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
685 }
686
687 /* The base and limit. */
688 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
689 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
690
691 /*
692 * Ok, everything checked out fine. Now set the accessed bit before
693 * committing the result into the registers.
694 */
695 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
696 {
697 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
698 if (rcStrict != VINF_SUCCESS)
699 return rcStrict;
700 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
701 }
702
703 /* Commit */
704 pSReg->Sel = uSel;
705 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
706 pSReg->u32Limit = cbLimit;
707 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
708 pSReg->ValidSel = uSel;
709 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
710 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
711 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
712
713 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
714 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
715 return VINF_SUCCESS;
716}
717
718
719/**
720 * Performs a task switch.
721 *
722 * If the task switch is the result of a JMP, CALL or IRET instruction, the
723 * caller is responsible for performing the necessary checks (like DPL, TSS
724 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
725 * reference for JMP, CALL, IRET.
726 *
727 * If the task switch is the due to a software interrupt or hardware exception,
728 * the caller is responsible for validating the TSS selector and descriptor. See
729 * Intel Instruction reference for INT n.
730 *
731 * @returns VBox strict status code.
732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
733 * @param enmTaskSwitch The cause of the task switch.
734 * @param uNextEip The EIP effective after the task switch.
735 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
736 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
737 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
738 * @param SelTss The TSS selector of the new task.
739 * @param pNewDescTss Pointer to the new TSS descriptor.
740 */
741VBOXSTRICTRC
742iemTaskSwitch(PVMCPUCC pVCpu,
743 IEMTASKSWITCH enmTaskSwitch,
744 uint32_t uNextEip,
745 uint32_t fFlags,
746 uint16_t uErr,
747 uint64_t uCr2,
748 RTSEL SelTss,
749 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
750{
751 Assert(!IEM_IS_REAL_MODE(pVCpu));
752 Assert(!IEM_IS_64BIT_CODE(pVCpu));
753 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
754
755 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
756 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
757 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
758 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
759 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
760
761 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
762 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
763
764 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
765 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
766
767 /* Update CR2 in case it's a page-fault. */
768 /** @todo This should probably be done much earlier in IEM/PGM. See
769 * @bugref{5653#c49}. */
770 if (fFlags & IEM_XCPT_FLAGS_CR2)
771 pVCpu->cpum.GstCtx.cr2 = uCr2;
772
773 /*
774 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
775 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
776 */
777 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
778 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
779 if (uNewTssLimit < uNewTssLimitMin)
780 {
781 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
782 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
783 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
784 }
785
786 /*
787 * Task switches in VMX non-root mode always cause task switches.
788 * The new TSS must have been read and validated (DPL, limits etc.) before a
789 * task-switch VM-exit commences.
790 *
791 * See Intel spec. 25.4.2 "Treatment of Task Switches".
792 */
793 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
794 {
795 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
796 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
797 }
798
799 /*
800 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
801 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
802 */
803 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
804 {
805 uint64_t const uExitInfo1 = SelTss;
806 uint64_t uExitInfo2 = uErr;
807 switch (enmTaskSwitch)
808 {
809 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
810 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
811 default: break;
812 }
813 if (fFlags & IEM_XCPT_FLAGS_ERR)
814 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
815 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
816 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
817
818 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
819 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
820 RT_NOREF2(uExitInfo1, uExitInfo2);
821 }
822
823 /*
824 * Check the current TSS limit. The last written byte to the current TSS during the
825 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
826 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
827 *
828 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
829 * end up with smaller than "legal" TSS limits.
830 */
831 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
832 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
833 if (uCurTssLimit < uCurTssLimitMin)
834 {
835 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
836 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
837 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
838 }
839
840 /*
841 * Verify that the new TSS can be accessed and map it. Map only the required contents
842 * and not the entire TSS.
843 */
844 uint8_t bUnmapInfoNewTss;
845 void *pvNewTss;
846 uint32_t const cbNewTss = uNewTssLimitMin + 1;
847 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
848 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
849 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
850 * not perform correct translation if this happens. See Intel spec. 7.2.1
851 * "Task-State Segment". */
852 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
853/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
854 * Consider wrapping the remainder into a function for simpler cleanup. */
855 if (rcStrict != VINF_SUCCESS)
856 {
857 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
858 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
859 return rcStrict;
860 }
861
862 /*
863 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
864 */
865 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
866 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
867 || enmTaskSwitch == IEMTASKSWITCH_IRET)
868 {
869 uint8_t bUnmapInfoDescCurTss;
870 PX86DESC pDescCurTss;
871 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
872 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
873 if (rcStrict != VINF_SUCCESS)
874 {
875 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
876 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
877 return rcStrict;
878 }
879
880 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
881 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
882 if (rcStrict != VINF_SUCCESS)
883 {
884 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
885 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
886 return rcStrict;
887 }
888
889 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
890 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
891 {
892 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
893 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
894 fEFlags &= ~X86_EFL_NT;
895 }
896 }
897
898 /*
899 * Save the CPU state into the current TSS.
900 */
901 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
902 if (GCPtrNewTss == GCPtrCurTss)
903 {
904 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
905 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
906 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
907 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
908 pVCpu->cpum.GstCtx.ldtr.Sel));
909 }
910 if (fIsNewTss386)
911 {
912 /*
913 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
914 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
915 */
916 uint8_t bUnmapInfoCurTss32;
917 void *pvCurTss32;
918 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
919 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
920 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
921 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
922 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
923 if (rcStrict != VINF_SUCCESS)
924 {
925 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
926 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
927 return rcStrict;
928 }
929
930 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
931 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
932 pCurTss32->eip = uNextEip;
933 pCurTss32->eflags = fEFlags;
934 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
935 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
936 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
937 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
938 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
939 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
940 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
941 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
942 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
943 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
944 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
945 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
946 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
947 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
948
949 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
950 if (rcStrict != VINF_SUCCESS)
951 {
952 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
953 VBOXSTRICTRC_VAL(rcStrict)));
954 return rcStrict;
955 }
956 }
957 else
958 {
959 /*
960 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
961 */
962 uint8_t bUnmapInfoCurTss16;
963 void *pvCurTss16;
964 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
965 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
966 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
967 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
968 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
969 if (rcStrict != VINF_SUCCESS)
970 {
971 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
972 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
973 return rcStrict;
974 }
975
976 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
977 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
978 pCurTss16->ip = uNextEip;
979 pCurTss16->flags = (uint16_t)fEFlags;
980 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
981 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
982 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
983 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
984 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
985 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
986 pCurTss16->si = pVCpu->cpum.GstCtx.si;
987 pCurTss16->di = pVCpu->cpum.GstCtx.di;
988 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
989 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
990 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
991 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
992
993 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
994 if (rcStrict != VINF_SUCCESS)
995 {
996 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
997 VBOXSTRICTRC_VAL(rcStrict)));
998 return rcStrict;
999 }
1000 }
1001
1002 /*
1003 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
1004 */
1005 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
1006 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
1007 {
1008 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
1009 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
1010 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
1011 }
1012
1013 /*
1014 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
1015 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
1016 */
1017 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
1018 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
1019 bool fNewDebugTrap;
1020 if (fIsNewTss386)
1021 {
1022 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
1023 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
1024 uNewEip = pNewTss32->eip;
1025 uNewEflags = pNewTss32->eflags;
1026 uNewEax = pNewTss32->eax;
1027 uNewEcx = pNewTss32->ecx;
1028 uNewEdx = pNewTss32->edx;
1029 uNewEbx = pNewTss32->ebx;
1030 uNewEsp = pNewTss32->esp;
1031 uNewEbp = pNewTss32->ebp;
1032 uNewEsi = pNewTss32->esi;
1033 uNewEdi = pNewTss32->edi;
1034 uNewES = pNewTss32->es;
1035 uNewCS = pNewTss32->cs;
1036 uNewSS = pNewTss32->ss;
1037 uNewDS = pNewTss32->ds;
1038 uNewFS = pNewTss32->fs;
1039 uNewGS = pNewTss32->gs;
1040 uNewLdt = pNewTss32->selLdt;
1041 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
1042 }
1043 else
1044 {
1045 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
1046 uNewCr3 = 0;
1047 uNewEip = pNewTss16->ip;
1048 uNewEflags = pNewTss16->flags;
1049 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
1050 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
1051 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
1052 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
1053 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
1054 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
1055 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
1056 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
1057 uNewES = pNewTss16->es;
1058 uNewCS = pNewTss16->cs;
1059 uNewSS = pNewTss16->ss;
1060 uNewDS = pNewTss16->ds;
1061 uNewFS = 0;
1062 uNewGS = 0;
1063 uNewLdt = pNewTss16->selLdt;
1064 fNewDebugTrap = false;
1065 }
1066
1067 if (GCPtrNewTss == GCPtrCurTss)
1068 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
1069 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
1070
1071 /*
1072 * We're done accessing the new TSS.
1073 */
1074 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
1075 if (rcStrict != VINF_SUCCESS)
1076 {
1077 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
1078 return rcStrict;
1079 }
1080
1081 /*
1082 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
1083 */
1084 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
1085 {
1086 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
1087 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
1088 if (rcStrict != VINF_SUCCESS)
1089 {
1090 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
1091 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
1092 return rcStrict;
1093 }
1094
1095 /* Check that the descriptor indicates the new TSS is available (not busy). */
1096 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
1097 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
1098 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
1099
1100 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
1101 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
1102 if (rcStrict != VINF_SUCCESS)
1103 {
1104 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
1105 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
1106 return rcStrict;
1107 }
1108 }
1109
1110 /*
1111 * From this point on, we're technically in the new task. We will defer exceptions
1112 * until the completion of the task switch but before executing any instructions in the new task.
1113 */
1114 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
1115 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
1116 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1117 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
1118 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
1119 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
1120 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
1121
1122 /* Set the busy bit in TR. */
1123 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
1124
1125 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
1126 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
1127 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
1128 {
1129 uNewEflags |= X86_EFL_NT;
1130 }
1131
1132 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
1133 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
1134 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
1135
1136 pVCpu->cpum.GstCtx.eip = uNewEip;
1137 pVCpu->cpum.GstCtx.eax = uNewEax;
1138 pVCpu->cpum.GstCtx.ecx = uNewEcx;
1139 pVCpu->cpum.GstCtx.edx = uNewEdx;
1140 pVCpu->cpum.GstCtx.ebx = uNewEbx;
1141 pVCpu->cpum.GstCtx.esp = uNewEsp;
1142 pVCpu->cpum.GstCtx.ebp = uNewEbp;
1143 pVCpu->cpum.GstCtx.esi = uNewEsi;
1144 pVCpu->cpum.GstCtx.edi = uNewEdi;
1145
1146 uNewEflags &= X86_EFL_LIVE_MASK;
1147 uNewEflags |= X86_EFL_RA1_MASK;
1148 IEMMISC_SET_EFL(pVCpu, uNewEflags);
1149
1150 /*
1151 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
1152 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
1153 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
1154 */
1155 pVCpu->cpum.GstCtx.es.Sel = uNewES;
1156 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
1157
1158 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
1159 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
1160
1161 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1162 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
1163
1164 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
1165 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
1166
1167 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
1168 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
1169
1170 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
1171 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
1172 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1173
1174 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
1175 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
1176 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
1177 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
1178
1179 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1180 {
1181 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
1182 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
1183 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
1184 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
1185 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
1186 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
1187 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
1188 }
1189
1190 /*
1191 * Switch CR3 for the new task.
1192 */
1193 if ( fIsNewTss386
1194 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
1195 {
1196 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
1197 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
1198 AssertRCSuccessReturn(rc, rc);
1199
1200 /* Inform PGM. */
1201 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
1202 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
1203 AssertRCReturn(rc, rc);
1204 /* ignore informational status codes */
1205
1206 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
1207 }
1208
1209 /*
1210 * Switch LDTR for the new task.
1211 */
1212 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
1213 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
1214 else
1215 {
1216 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
1217
1218 IEMSELDESC DescNewLdt;
1219 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
1220 if (rcStrict != VINF_SUCCESS)
1221 {
1222 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
1223 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
1224 return rcStrict;
1225 }
1226 if ( !DescNewLdt.Legacy.Gen.u1Present
1227 || DescNewLdt.Legacy.Gen.u1DescType
1228 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1229 {
1230 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
1231 uNewLdt, DescNewLdt.Legacy.u));
1232 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
1233 }
1234
1235 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
1236 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1237 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
1238 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
1239 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
1240 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1241 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
1242 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1243 }
1244
1245 IEMSELDESC DescSS;
1246 if (IEM_IS_V86_MODE(pVCpu))
1247 {
1248 IEM_SET_CPL(pVCpu, 3);
1249 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
1250 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
1251 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
1252 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
1253 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
1254 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
1255
1256 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
1257 DescSS.Legacy.u = 0;
1258 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
1259 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
1260 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
1261 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
1262 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
1263 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
1264 DescSS.Legacy.Gen.u2Dpl = 3;
1265 }
1266 else
1267 {
1268 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
1269
1270 /*
1271 * Load the stack segment for the new task.
1272 */
1273 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
1274 {
1275 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
1276 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1277 }
1278
1279 /* Fetch the descriptor. */
1280 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
1281 if (rcStrict != VINF_SUCCESS)
1282 {
1283 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
1284 VBOXSTRICTRC_VAL(rcStrict)));
1285 return rcStrict;
1286 }
1287
1288 /* SS must be a data segment and writable. */
1289 if ( !DescSS.Legacy.Gen.u1DescType
1290 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1291 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
1292 {
1293 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
1294 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
1295 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1296 }
1297
1298 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
1299 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
1300 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
1301 {
1302 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
1303 uNewCpl));
1304 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1305 }
1306
1307 /* Is it there? */
1308 if (!DescSS.Legacy.Gen.u1Present)
1309 {
1310 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
1311 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1312 }
1313
1314 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
1315 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
1316
1317 /* Set the accessed bit before committing the result into SS. */
1318 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1319 {
1320 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1321 if (rcStrict != VINF_SUCCESS)
1322 return rcStrict;
1323 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1324 }
1325
1326 /* Commit SS. */
1327 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1328 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
1329 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1330 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
1331 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
1332 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1333 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1334
1335 /* CPL has changed, update IEM before loading rest of segments. */
1336 IEM_SET_CPL(pVCpu, uNewCpl);
1337
1338 /*
1339 * Load the data segments for the new task.
1340 */
1341 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
1342 if (rcStrict != VINF_SUCCESS)
1343 return rcStrict;
1344 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
1345 if (rcStrict != VINF_SUCCESS)
1346 return rcStrict;
1347 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
1348 if (rcStrict != VINF_SUCCESS)
1349 return rcStrict;
1350 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
1351 if (rcStrict != VINF_SUCCESS)
1352 return rcStrict;
1353
1354 /*
1355 * Load the code segment for the new task.
1356 */
1357 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
1358 {
1359 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
1360 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1361 }
1362
1363 /* Fetch the descriptor. */
1364 IEMSELDESC DescCS;
1365 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
1366 if (rcStrict != VINF_SUCCESS)
1367 {
1368 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
1369 return rcStrict;
1370 }
1371
1372 /* CS must be a code segment. */
1373 if ( !DescCS.Legacy.Gen.u1DescType
1374 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1375 {
1376 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
1377 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1378 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1379 }
1380
1381 /* For conforming CS, DPL must be less than or equal to the RPL. */
1382 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1383 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
1384 {
1385 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
1386 DescCS.Legacy.Gen.u2Dpl));
1387 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1388 }
1389
1390 /* For non-conforming CS, DPL must match RPL. */
1391 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1392 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
1393 {
1394 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
1395 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
1396 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1397 }
1398
1399 /* Is it there? */
1400 if (!DescCS.Legacy.Gen.u1Present)
1401 {
1402 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
1403 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1404 }
1405
1406 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1407 u64Base = X86DESC_BASE(&DescCS.Legacy);
1408
1409 /* Set the accessed bit before committing the result into CS. */
1410 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1411 {
1412 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1413 if (rcStrict != VINF_SUCCESS)
1414 return rcStrict;
1415 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1416 }
1417
1418 /* Commit CS. */
1419 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
1420 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
1421 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1422 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1423 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1424 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1425 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1426 }
1427
1428 /* Make sure the CPU mode is correct. */
1429 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
1430 if (fExecNew != pVCpu->iem.s.fExec)
1431 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
1432 pVCpu->iem.s.fExec = fExecNew;
1433
1434 /** @todo Debug trap. */
1435 if (fIsNewTss386 && fNewDebugTrap)
1436 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
1437
1438 /*
1439 * Construct the error code masks based on what caused this task switch.
1440 * See Intel Instruction reference for INT.
1441 */
1442 uint16_t uExt;
1443 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
1444 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1445 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
1446 uExt = 1;
1447 else
1448 uExt = 0;
1449
1450 /*
1451 * Push any error code on to the new stack.
1452 */
1453 if (fFlags & IEM_XCPT_FLAGS_ERR)
1454 {
1455 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
1456 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
1457 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
1458
1459 /* Check that there is sufficient space on the stack. */
1460 /** @todo Factor out segment limit checking for normal/expand down segments
1461 * into a separate function. */
1462 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
1463 {
1464 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
1465 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
1466 {
1467 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
1468 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
1469 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
1470 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
1471 }
1472 }
1473 else
1474 {
1475 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
1476 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
1477 {
1478 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
1479 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
1480 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
1481 }
1482 }
1483
1484
1485 if (fIsNewTss386)
1486 rcStrict = iemMemStackPushU32(pVCpu, uErr);
1487 else
1488 rcStrict = iemMemStackPushU16(pVCpu, uErr);
1489 if (rcStrict != VINF_SUCCESS)
1490 {
1491 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
1492 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
1493 return rcStrict;
1494 }
1495 }
1496
1497 /* Check the new EIP against the new CS limit. */
1498 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
1499 {
1500 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
1501 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
1502 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
1503 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
1504 }
1505
1506 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
1507 pVCpu->cpum.GstCtx.ss.Sel));
1508 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1509}
1510
1511
1512/**
1513 * Implements exceptions and interrupts for protected mode.
1514 *
1515 * @returns VBox strict status code.
1516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1517 * @param cbInstr The number of bytes to offset rIP by in the return
1518 * address.
1519 * @param u8Vector The interrupt / exception vector number.
1520 * @param fFlags The flags.
1521 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1522 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1523 */
1524static VBOXSTRICTRC
1525iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
1526 uint8_t cbInstr,
1527 uint8_t u8Vector,
1528 uint32_t fFlags,
1529 uint16_t uErr,
1530 uint64_t uCr2) RT_NOEXCEPT
1531{
1532 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1533
1534 /*
1535 * Read the IDT entry.
1536 */
1537 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1538 {
1539 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1540 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1541 }
1542 X86DESC Idte;
1543 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
1544 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
1545 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1546 {
1547 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1548 return rcStrict;
1549 }
1550 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
1551 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
1552 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
1553 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
1554
1555 /*
1556 * Check the descriptor type, DPL and such.
1557 * ASSUMES this is done in the same order as described for call-gate calls.
1558 */
1559 if (Idte.Gate.u1DescType)
1560 {
1561 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1562 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1563 }
1564 bool fTaskGate = false;
1565 uint8_t f32BitGate = true;
1566 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
1567 switch (Idte.Gate.u4Type)
1568 {
1569 case X86_SEL_TYPE_SYS_UNDEFINED:
1570 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1571 case X86_SEL_TYPE_SYS_LDT:
1572 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1573 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1574 case X86_SEL_TYPE_SYS_UNDEFINED2:
1575 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1576 case X86_SEL_TYPE_SYS_UNDEFINED3:
1577 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1578 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1579 case X86_SEL_TYPE_SYS_UNDEFINED4:
1580 {
1581 /** @todo check what actually happens when the type is wrong...
1582 * esp. call gates. */
1583 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1584 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1585 }
1586
1587 case X86_SEL_TYPE_SYS_286_INT_GATE:
1588 f32BitGate = false;
1589 RT_FALL_THRU();
1590 case X86_SEL_TYPE_SYS_386_INT_GATE:
1591 fEflToClear |= X86_EFL_IF;
1592 break;
1593
1594 case X86_SEL_TYPE_SYS_TASK_GATE:
1595 fTaskGate = true;
1596#ifndef IEM_IMPLEMENTS_TASKSWITCH
1597 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
1598#endif
1599 break;
1600
1601 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1602 f32BitGate = false;
1603 break;
1604 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1605 break;
1606
1607 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1608 }
1609
1610 /* Check DPL against CPL if applicable. */
1611 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
1612 {
1613 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
1614 {
1615 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
1616 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1617 }
1618 }
1619
1620 /* Is it there? */
1621 if (!Idte.Gate.u1Present)
1622 {
1623 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
1624 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1625 }
1626
1627 /* Is it a task-gate? */
1628 if (fTaskGate)
1629 {
1630 /*
1631 * Construct the error code masks based on what caused this task switch.
1632 * See Intel Instruction reference for INT.
1633 */
1634 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1635 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
1636 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
1637 RTSEL SelTss = Idte.Gate.u16Sel;
1638
1639 /*
1640 * Fetch the TSS descriptor in the GDT.
1641 */
1642 IEMSELDESC DescTSS;
1643 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
1644 if (rcStrict != VINF_SUCCESS)
1645 {
1646 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
1647 VBOXSTRICTRC_VAL(rcStrict)));
1648 return rcStrict;
1649 }
1650
1651 /* The TSS descriptor must be a system segment and be available (not busy). */
1652 if ( DescTSS.Legacy.Gen.u1DescType
1653 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
1654 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
1655 {
1656 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
1657 u8Vector, SelTss, DescTSS.Legacy.au64));
1658 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
1659 }
1660
1661 /* The TSS must be present. */
1662 if (!DescTSS.Legacy.Gen.u1Present)
1663 {
1664 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
1665 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
1666 }
1667
1668 /* Do the actual task switch. */
1669 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
1670 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
1671 fFlags, uErr, uCr2, SelTss, &DescTSS);
1672 }
1673
1674 /* A null CS is bad. */
1675 RTSEL NewCS = Idte.Gate.u16Sel;
1676 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
1677 {
1678 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
1679 return iemRaiseGeneralProtectionFault0(pVCpu);
1680 }
1681
1682 /* Fetch the descriptor for the new CS. */
1683 IEMSELDESC DescCS;
1684 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
1685 if (rcStrict != VINF_SUCCESS)
1686 {
1687 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
1688 return rcStrict;
1689 }
1690
1691 /* Must be a code segment. */
1692 if (!DescCS.Legacy.Gen.u1DescType)
1693 {
1694 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1695 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
1696 }
1697 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1698 {
1699 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1700 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
1701 }
1702
1703 /* Don't allow lowering the privilege level. */
1704 /** @todo Does the lowering of privileges apply to software interrupts
1705 * only? This has bearings on the more-privileged or
1706 * same-privilege stack behavior further down. A testcase would
1707 * be nice. */
1708 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
1709 {
1710 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1711 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1712 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
1713 }
1714
1715 /* Make sure the selector is present. */
1716 if (!DescCS.Legacy.Gen.u1Present)
1717 {
1718 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
1719 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
1720 }
1721
1722#ifdef LOG_ENABLED
1723 /* If software interrupt, try decode it if logging is enabled and such. */
1724 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1725 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
1726 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
1727#endif
1728
1729 /* Check the new EIP against the new CS limit. */
1730 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
1731 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
1732 ? Idte.Gate.u16OffsetLow
1733 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
1734 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
1735 if (uNewEip > cbLimitCS)
1736 {
1737 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
1738 u8Vector, uNewEip, cbLimitCS, NewCS));
1739 return iemRaiseGeneralProtectionFault(pVCpu, 0);
1740 }
1741 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
1742
1743 /* Calc the flag image to push. */
1744 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1745 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
1746 fEfl &= ~X86_EFL_RF;
1747 else
1748 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
1749
1750 /* From V8086 mode only go to CPL 0. */
1751 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
1752 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
1753 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
1754 {
1755 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
1756 return iemRaiseGeneralProtectionFault(pVCpu, 0);
1757 }
1758
1759 /*
1760 * If the privilege level changes, we need to get a new stack from the TSS.
1761 * This in turns means validating the new SS and ESP...
1762 */
1763 if (uNewCpl != IEM_GET_CPL(pVCpu))
1764 {
1765 RTSEL NewSS;
1766 uint32_t uNewEsp;
1767 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
1768 if (rcStrict != VINF_SUCCESS)
1769 return rcStrict;
1770
1771 IEMSELDESC DescSS;
1772 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
1773 if (rcStrict != VINF_SUCCESS)
1774 return rcStrict;
1775 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
1776 if (!DescSS.Legacy.Gen.u1DefBig)
1777 {
1778 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
1779 uNewEsp = (uint16_t)uNewEsp;
1780 }
1781
1782 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
1783
1784 /* Check that there is sufficient space for the stack frame. */
1785 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
1786 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
1787 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
1788 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
1789
1790 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
1791 {
1792 if ( uNewEsp - 1 > cbLimitSS
1793 || uNewEsp < cbStackFrame)
1794 {
1795 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
1796 u8Vector, NewSS, uNewEsp, cbStackFrame));
1797 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
1798 }
1799 }
1800 else
1801 {
1802 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
1803 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
1804 {
1805 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
1806 u8Vector, NewSS, uNewEsp, cbStackFrame));
1807 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
1808 }
1809 }
1810
1811 /*
1812 * Start making changes.
1813 */
1814
1815 /* Set the new CPL so that stack accesses use it. */
1816 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
1817 IEM_SET_CPL(pVCpu, uNewCpl);
1818
1819 /* Create the stack frame. */
1820 uint8_t bUnmapInfoStackFrame;
1821 RTPTRUNION uStackFrame;
1822 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
1823 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
1824 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
1825 if (rcStrict != VINF_SUCCESS)
1826 return rcStrict;
1827 if (f32BitGate)
1828 {
1829 if (fFlags & IEM_XCPT_FLAGS_ERR)
1830 *uStackFrame.pu32++ = uErr;
1831 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
1832 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
1833 uStackFrame.pu32[2] = fEfl;
1834 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
1835 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
1836 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
1837 if (fEfl & X86_EFL_VM)
1838 {
1839 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
1840 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
1841 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
1842 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
1843 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
1844 }
1845 }
1846 else
1847 {
1848 if (fFlags & IEM_XCPT_FLAGS_ERR)
1849 *uStackFrame.pu16++ = uErr;
1850 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1851 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
1852 uStackFrame.pu16[2] = fEfl;
1853 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
1854 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
1855 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
1856 if (fEfl & X86_EFL_VM)
1857 {
1858 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1859 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
1860 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
1861 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
1862 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
1863 }
1864 }
1865 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
1866 if (rcStrict != VINF_SUCCESS)
1867 return rcStrict;
1868
1869 /* Mark the selectors 'accessed' (hope this is the correct time). */
1870 /** @todo testcase: excatly _when_ are the accessed bits set - before or
1871 * after pushing the stack frame? (Write protect the gdt + stack to
1872 * find out.) */
1873 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1874 {
1875 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
1876 if (rcStrict != VINF_SUCCESS)
1877 return rcStrict;
1878 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1879 }
1880
1881 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1882 {
1883 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
1884 if (rcStrict != VINF_SUCCESS)
1885 return rcStrict;
1886 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1887 }
1888
1889 /*
1890 * Start comitting the register changes (joins with the DPL=CPL branch).
1891 */
1892 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
1893 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
1894 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1895 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
1896 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1897 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1898 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
1899 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
1900 * SP is loaded).
1901 * Need to check the other combinations too:
1902 * - 16-bit TSS, 32-bit handler
1903 * - 32-bit TSS, 16-bit handler */
1904 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1905 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
1906 else
1907 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
1908
1909 if (fEfl & X86_EFL_VM)
1910 {
1911 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
1912 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
1913 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
1914 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
1915 }
1916 }
1917 /*
1918 * Same privilege, no stack change and smaller stack frame.
1919 */
1920 else
1921 {
1922 uint64_t uNewRsp;
1923 uint8_t bUnmapInfoStackFrame;
1924 RTPTRUNION uStackFrame;
1925 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
1926 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
1927 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
1928 if (rcStrict != VINF_SUCCESS)
1929 return rcStrict;
1930
1931 if (f32BitGate)
1932 {
1933 if (fFlags & IEM_XCPT_FLAGS_ERR)
1934 *uStackFrame.pu32++ = uErr;
1935 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
1936 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
1937 uStackFrame.pu32[2] = fEfl;
1938 }
1939 else
1940 {
1941 if (fFlags & IEM_XCPT_FLAGS_ERR)
1942 *uStackFrame.pu16++ = uErr;
1943 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
1944 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
1945 uStackFrame.pu16[2] = fEfl;
1946 }
1947 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
1948 if (rcStrict != VINF_SUCCESS)
1949 return rcStrict;
1950
1951 /* Mark the CS selector as 'accessed'. */
1952 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1953 {
1954 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
1955 if (rcStrict != VINF_SUCCESS)
1956 return rcStrict;
1957 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1958 }
1959
1960 /*
1961 * Start committing the register changes (joins with the other branch).
1962 */
1963 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1964 }
1965
1966 /* ... register committing continues. */
1967 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
1968 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
1969 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1970 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
1971 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
1972 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1973
1974 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
1975 fEfl &= ~fEflToClear;
1976 IEMMISC_SET_EFL(pVCpu, fEfl);
1977
1978 if (fFlags & IEM_XCPT_FLAGS_CR2)
1979 pVCpu->cpum.GstCtx.cr2 = uCr2;
1980
1981 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1982 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1983
1984 /* Make sure the execution flags are correct. */
1985 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
1986 if (fExecNew != pVCpu->iem.s.fExec)
1987 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
1988 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
1989 pVCpu->iem.s.fExec = fExecNew;
1990 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
1991
1992 /*
1993 * Deal with debug events that follows the exception and clear inhibit flags.
1994 */
1995 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1996 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
1997 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
1998 else
1999 {
2000 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
2001 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2002 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2003 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2004 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2005 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2006 return iemRaiseDebugException(pVCpu);
2007 }
2008
2009 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2010}
2011
2012
2013/**
2014 * Implements exceptions and interrupts for long mode.
2015 *
2016 * @returns VBox strict status code.
2017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2018 * @param cbInstr The number of bytes to offset rIP by in the return
2019 * address.
2020 * @param u8Vector The interrupt / exception vector number.
2021 * @param fFlags The flags.
2022 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2023 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2024 */
2025static VBOXSTRICTRC
2026iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
2027 uint8_t cbInstr,
2028 uint8_t u8Vector,
2029 uint32_t fFlags,
2030 uint16_t uErr,
2031 uint64_t uCr2) RT_NOEXCEPT
2032{
2033 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2034
2035 /*
2036 * Read the IDT entry.
2037 */
2038 uint16_t offIdt = (uint16_t)u8Vector << 4;
2039 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
2040 {
2041 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2042 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2043 }
2044 X86DESC64 Idte;
2045#ifdef _MSC_VER /* Shut up silly compiler warning. */
2046 Idte.au64[0] = 0;
2047 Idte.au64[1] = 0;
2048#endif
2049 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
2050 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2051 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
2052 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2053 {
2054 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2055 return rcStrict;
2056 }
2057 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
2058 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2059 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2060
2061 /*
2062 * Check the descriptor type, DPL and such.
2063 * ASSUMES this is done in the same order as described for call-gate calls.
2064 */
2065 if (Idte.Gate.u1DescType)
2066 {
2067 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2068 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2069 }
2070 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2071 switch (Idte.Gate.u4Type)
2072 {
2073 case AMD64_SEL_TYPE_SYS_INT_GATE:
2074 fEflToClear |= X86_EFL_IF;
2075 break;
2076 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
2077 break;
2078
2079 default:
2080 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2081 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2082 }
2083
2084 /* Check DPL against CPL if applicable. */
2085 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2086 {
2087 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
2088 {
2089 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
2090 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2091 }
2092 }
2093
2094 /* Is it there? */
2095 if (!Idte.Gate.u1Present)
2096 {
2097 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
2098 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2099 }
2100
2101 /* A null CS is bad. */
2102 RTSEL NewCS = Idte.Gate.u16Sel;
2103 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2104 {
2105 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2106 return iemRaiseGeneralProtectionFault0(pVCpu);
2107 }
2108
2109 /* Fetch the descriptor for the new CS. */
2110 IEMSELDESC DescCS;
2111 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
2112 if (rcStrict != VINF_SUCCESS)
2113 {
2114 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2115 return rcStrict;
2116 }
2117
2118 /* Must be a 64-bit code segment. */
2119 if (!DescCS.Long.Gen.u1DescType)
2120 {
2121 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2122 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2123 }
2124 if ( !DescCS.Long.Gen.u1Long
2125 || DescCS.Long.Gen.u1DefBig
2126 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
2127 {
2128 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
2129 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
2130 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2131 }
2132
2133 /* Don't allow lowering the privilege level. For non-conforming CS
2134 selectors, the CS.DPL sets the privilege level the trap/interrupt
2135 handler runs at. For conforming CS selectors, the CPL remains
2136 unchanged, but the CS.DPL must be <= CPL. */
2137 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
2138 * when CPU in Ring-0. Result \#GP? */
2139 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
2140 {
2141 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2142 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2143 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2144 }
2145
2146
2147 /* Make sure the selector is present. */
2148 if (!DescCS.Legacy.Gen.u1Present)
2149 {
2150 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2151 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
2152 }
2153
2154 /* Check that the new RIP is canonical. */
2155 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
2156 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
2157 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
2158 if (!IEM_IS_CANONICAL(uNewRip))
2159 {
2160 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
2161 return iemRaiseGeneralProtectionFault0(pVCpu);
2162 }
2163
2164 /*
2165 * If the privilege level changes or if the IST isn't zero, we need to get
2166 * a new stack from the TSS.
2167 */
2168 uint64_t uNewRsp;
2169 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2170 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
2171 if ( uNewCpl != IEM_GET_CPL(pVCpu)
2172 || Idte.Gate.u3IST != 0)
2173 {
2174 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
2175 if (rcStrict != VINF_SUCCESS)
2176 return rcStrict;
2177 }
2178 else
2179 uNewRsp = pVCpu->cpum.GstCtx.rsp;
2180 uNewRsp &= ~(uint64_t)0xf;
2181
2182 /*
2183 * Calc the flag image to push.
2184 */
2185 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2186 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
2187 fEfl &= ~X86_EFL_RF;
2188 else
2189 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
2190
2191 /*
2192 * Start making changes.
2193 */
2194 /* Set the new CPL so that stack accesses use it. */
2195 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
2196 IEM_SET_CPL(pVCpu, uNewCpl);
2197/** @todo Setting CPL this early seems wrong as it would affect and errors we
2198 * raise accessing the stack and (?) GDT/LDT... */
2199
2200 /* Create the stack frame. */
2201 uint8_t bUnmapInfoStackFrame;
2202 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
2203 RTPTRUNION uStackFrame;
2204 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
2205 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
2206 if (rcStrict != VINF_SUCCESS)
2207 return rcStrict;
2208
2209 if (fFlags & IEM_XCPT_FLAGS_ERR)
2210 *uStackFrame.pu64++ = uErr;
2211 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
2212 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
2213 uStackFrame.pu64[2] = fEfl;
2214 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
2215 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
2216 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
2217 if (rcStrict != VINF_SUCCESS)
2218 return rcStrict;
2219
2220 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
2221 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2222 * after pushing the stack frame? (Write protect the gdt + stack to
2223 * find out.) */
2224 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2225 {
2226 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
2227 if (rcStrict != VINF_SUCCESS)
2228 return rcStrict;
2229 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2230 }
2231
2232 /*
2233 * Start comitting the register changes.
2234 */
2235 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
2236 * hidden registers when interrupting 32-bit or 16-bit code! */
2237 if (uNewCpl != uOldCpl)
2238 {
2239 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
2240 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
2241 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2242 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
2243 pVCpu->cpum.GstCtx.ss.u64Base = 0;
2244 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
2245 }
2246 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
2247 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2248 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2249 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2250 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
2251 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2252 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2253 pVCpu->cpum.GstCtx.rip = uNewRip;
2254
2255 fEfl &= ~fEflToClear;
2256 IEMMISC_SET_EFL(pVCpu, fEfl);
2257
2258 if (fFlags & IEM_XCPT_FLAGS_CR2)
2259 pVCpu->cpum.GstCtx.cr2 = uCr2;
2260
2261 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2262 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2263
2264 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
2265
2266 /*
2267 * Deal with debug events that follows the exception and clear inhibit flags.
2268 */
2269 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2270 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2271 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2272 else
2273 {
2274 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
2275 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2276 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2277 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2278 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2279 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2280 return iemRaiseDebugException(pVCpu);
2281 }
2282
2283 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2284}
2285
2286
2287/**
2288 * Implements exceptions and interrupts.
2289 *
2290 * All exceptions and interrupts goes thru this function!
2291 *
2292 * @returns VBox strict status code.
2293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2294 * @param cbInstr The number of bytes to offset rIP by in the return
2295 * address.
2296 * @param u8Vector The interrupt / exception vector number.
2297 * @param fFlags The flags.
2298 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2299 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2300 */
2301VBOXSTRICTRC
2302iemRaiseXcptOrInt(PVMCPUCC pVCpu,
2303 uint8_t cbInstr,
2304 uint8_t u8Vector,
2305 uint32_t fFlags,
2306 uint16_t uErr,
2307 uint64_t uCr2) RT_NOEXCEPT
2308{
2309 /*
2310 * Get all the state that we might need here.
2311 */
2312 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2313 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2314
2315#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
2316 /*
2317 * Flush prefetch buffer
2318 */
2319 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2320#endif
2321
2322 /*
2323 * Perform the V8086 IOPL check and upgrade the fault without nesting.
2324 */
2325 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
2326 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
2327 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
2328 | IEM_XCPT_FLAGS_BP_INSTR
2329 | IEM_XCPT_FLAGS_ICEBP_INSTR
2330 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2331 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
2332 {
2333 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
2334 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
2335 u8Vector = X86_XCPT_GP;
2336 uErr = 0;
2337 }
2338
2339 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
2340#ifdef DBGFTRACE_ENABLED
2341 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
2342 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
2343 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
2344#endif
2345
2346 /*
2347 * Check if DBGF wants to intercept the exception.
2348 */
2349 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
2350 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
2351 { /* likely */ }
2352 else
2353 {
2354 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
2355 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
2356 if (rcStrict != VINF_SUCCESS)
2357 return rcStrict;
2358 }
2359
2360 /*
2361 * Evaluate whether NMI blocking should be in effect.
2362 * Normally, NMI blocking is in effect whenever we inject an NMI.
2363 */
2364 bool fBlockNmi = u8Vector == X86_XCPT_NMI
2365 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
2366
2367#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2368 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2369 {
2370 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
2371 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2372 return rcStrict0;
2373
2374 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
2375 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
2376 {
2377 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
2378 fBlockNmi = false;
2379 }
2380 }
2381#endif
2382
2383#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2384 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
2385 {
2386 /*
2387 * If the event is being injected as part of VMRUN, it isn't subject to event
2388 * intercepts in the nested-guest. However, secondary exceptions that occur
2389 * during injection of any event -are- subject to exception intercepts.
2390 *
2391 * See AMD spec. 15.20 "Event Injection".
2392 */
2393 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
2394 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
2395 else
2396 {
2397 /*
2398 * Check and handle if the event being raised is intercepted.
2399 */
2400 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2401 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
2402 return rcStrict0;
2403 }
2404 }
2405#endif
2406
2407 /*
2408 * Set NMI blocking if necessary.
2409 */
2410 if (fBlockNmi)
2411 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
2412
2413 /*
2414 * Do recursion accounting.
2415 */
2416 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
2417 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
2418 if (pVCpu->iem.s.cXcptRecursions == 0)
2419 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2420 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
2421 else
2422 {
2423 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2424 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
2425 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
2426
2427 if (pVCpu->iem.s.cXcptRecursions >= 4)
2428 {
2429#ifdef DEBUG_bird
2430 AssertFailed();
2431#endif
2432 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2433 }
2434
2435 /*
2436 * Evaluate the sequence of recurring events.
2437 */
2438 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
2439 NULL /* pXcptRaiseInfo */);
2440 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
2441 { /* likely */ }
2442 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
2443 {
2444 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
2445 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
2446 u8Vector = X86_XCPT_DF;
2447 uErr = 0;
2448#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2449 /* VMX nested-guest #DF intercept needs to be checked here. */
2450 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2451 {
2452 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
2453 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2454 return rcStrict0;
2455 }
2456#endif
2457 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
2458 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
2459 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2460 }
2461 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
2462 {
2463 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
2464 return iemInitiateCpuShutdown(pVCpu);
2465 }
2466 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
2467 {
2468 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
2469 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
2470 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
2471 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
2472 return VERR_EM_GUEST_CPU_HANG;
2473 }
2474 else
2475 {
2476 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
2477 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
2478 return VERR_IEM_IPE_9;
2479 }
2480
2481 /*
2482 * The 'EXT' bit is set when an exception occurs during deliver of an external
2483 * event (such as an interrupt or earlier exception)[1]. Privileged software
2484 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
2485 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
2486 *
2487 * [1] - Intel spec. 6.13 "Error Code"
2488 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
2489 * [3] - Intel Instruction reference for INT n.
2490 */
2491 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
2492 && (fFlags & IEM_XCPT_FLAGS_ERR)
2493 && u8Vector != X86_XCPT_PF
2494 && u8Vector != X86_XCPT_DF)
2495 {
2496 uErr |= X86_TRAP_ERR_EXTERNAL;
2497 }
2498 }
2499
2500 pVCpu->iem.s.cXcptRecursions++;
2501 pVCpu->iem.s.uCurXcpt = u8Vector;
2502 pVCpu->iem.s.fCurXcpt = fFlags;
2503 pVCpu->iem.s.uCurXcptErr = uErr;
2504 pVCpu->iem.s.uCurXcptCr2 = uCr2;
2505
2506 /*
2507 * Extensive logging.
2508 */
2509#if defined(LOG_ENABLED) && defined(IN_RING3)
2510 if (LogIs3Enabled())
2511 {
2512 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
2513 char szRegs[4096];
2514 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2515 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2516 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2517 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2518 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2519 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2520 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2521 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2522 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2523 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2524 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2525 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2526 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2527 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2528 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2529 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2530 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2531 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2532 " efer=%016VR{efer}\n"
2533 " pat=%016VR{pat}\n"
2534 " sf_mask=%016VR{sf_mask}\n"
2535 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2536 " lstar=%016VR{lstar}\n"
2537 " star=%016VR{star} cstar=%016VR{cstar}\n"
2538 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2539 );
2540
2541 char szInstr[256];
2542 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
2543 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2544 szInstr, sizeof(szInstr), NULL);
2545 Log3(("%s%s\n", szRegs, szInstr));
2546 }
2547#endif /* LOG_ENABLED */
2548
2549 /*
2550 * Stats.
2551 */
2552 uint64_t const uTimestamp = ASMReadTSC();
2553 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
2554 {
2555 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
2556 EMHistoryAddExit(pVCpu,
2557 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
2558 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
2559 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
2560 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
2561 IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);
2562 }
2563 else
2564 {
2565 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
2566 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
2567 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
2568 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
2569 if (fFlags & IEM_XCPT_FLAGS_ERR)
2570 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
2571 if (fFlags & IEM_XCPT_FLAGS_CR2)
2572 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
2573 IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);
2574 }
2575
2576 /*
2577 * Hack alert! Convert incoming debug events to slient on Intel.
2578 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
2579 */
2580 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2581 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2582 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
2583 { /* ignore */ }
2584 else
2585 {
2586 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
2587 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
2588 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
2589 | CPUMCTX_DBG_HIT_DRX_SILENT;
2590 }
2591
2592 /*
2593 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
2594 * to ensure that a stale TLB or paging cache entry will only cause one
2595 * spurious #PF.
2596 */
2597 if ( u8Vector == X86_XCPT_PF
2598 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
2599 IEMTlbInvalidatePage(pVCpu, uCr2);
2600
2601 /*
2602 * Call the mode specific worker function.
2603 */
2604 VBOXSTRICTRC rcStrict;
2605 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
2606 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2607 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
2608 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2609 else
2610 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2611
2612 /* Flush the prefetch buffer. */
2613 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
2614
2615 /*
2616 * Unwind.
2617 */
2618 pVCpu->iem.s.cXcptRecursions--;
2619 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
2620 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
2621 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
2622 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
2623 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
2624 return rcStrict;
2625}
2626
2627#ifdef IEM_WITH_SETJMP
2628/**
2629 * See iemRaiseXcptOrInt. Will not return.
2630 */
2631DECL_NO_RETURN(void)
2632iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
2633 uint8_t cbInstr,
2634 uint8_t u8Vector,
2635 uint32_t fFlags,
2636 uint16_t uErr,
2637 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
2638{
2639 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2640 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
2641}
2642#endif
2643
2644
2645/** \#DE - 00. */
2646VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
2647{
2648 if (GCMIsInterceptingXcptDE(pVCpu))
2649 {
2650 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
2651 if (rc == VINF_SUCCESS)
2652 {
2653 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
2654 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
2655 }
2656 }
2657 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2658}
2659
2660
2661#ifdef IEM_WITH_SETJMP
2662/** \#DE - 00. */
2663DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2664{
2665 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2666}
2667#endif
2668
2669
2670/** \#DB - 01.
2671 * @note This automatically clear DR7.GD. */
2672VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
2673{
2674 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
2675 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2676 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
2677}
2678
2679
2680/** \#BR - 05. */
2681VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
2682{
2683 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2684}
2685
2686
2687/** \#UD - 06. */
2688VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
2689{
2690 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2691}
2692
2693
2694#ifdef IEM_WITH_SETJMP
2695/** \#UD - 06. */
2696DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2697{
2698 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2699}
2700#endif
2701
2702
2703/** \#NM - 07. */
2704VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
2705{
2706 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2707}
2708
2709
2710#ifdef IEM_WITH_SETJMP
2711/** \#NM - 07. */
2712DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2713{
2714 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2715}
2716#endif
2717
2718
2719/** \#TS(err) - 0a. */
2720VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2721{
2722 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2723}
2724
2725
2726/** \#TS(tr) - 0a. */
2727VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
2728{
2729 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2730 pVCpu->cpum.GstCtx.tr.Sel, 0);
2731}
2732
2733
2734/** \#TS(0) - 0a. */
2735VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
2736{
2737 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2738 0, 0);
2739}
2740
2741
2742/** \#TS(err) - 0a. */
2743VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
2744{
2745 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2746 uSel & X86_SEL_MASK_OFF_RPL, 0);
2747}
2748
2749
2750/** \#NP(err) - 0b. */
2751VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2752{
2753 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2754}
2755
2756
2757/** \#NP(sel) - 0b. */
2758VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
2759{
2760 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
2761 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
2762 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2763 uSel & ~X86_SEL_RPL, 0);
2764}
2765
2766
2767/** \#SS(seg) - 0c. */
2768VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
2769{
2770 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
2771 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
2772 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2773 uSel & ~X86_SEL_RPL, 0);
2774}
2775
2776
2777/** \#SS(err) - 0c. */
2778VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2779{
2780 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
2781 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
2782 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2783}
2784
2785
2786/** \#GP(n) - 0d. */
2787VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2788{
2789 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
2790 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2791}
2792
2793
2794/** \#GP(0) - 0d. */
2795VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
2796{
2797 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2798 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2799}
2800
2801#ifdef IEM_WITH_SETJMP
2802/** \#GP(0) - 0d. */
2803DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2804{
2805 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2806 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2807}
2808#endif
2809
2810
2811/** \#GP(sel) - 0d. */
2812VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
2813{
2814 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
2815 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
2816 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2817 Sel & ~X86_SEL_RPL, 0);
2818}
2819
2820
2821/** \#GP(0) - 0d. */
2822VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
2823{
2824 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2825 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2826}
2827
2828
2829/** \#GP(sel) - 0d. */
2830VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
2831{
2832 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
2833 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
2834 NOREF(iSegReg); NOREF(fAccess);
2835 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2836 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2837}
2838
2839#ifdef IEM_WITH_SETJMP
2840/** \#GP(sel) - 0d, longjmp. */
2841DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
2842{
2843 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
2844 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
2845 NOREF(iSegReg); NOREF(fAccess);
2846 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2847 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2848}
2849#endif
2850
2851/** \#GP(sel) - 0d. */
2852VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
2853{
2854 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
2855 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
2856 NOREF(Sel);
2857 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2858}
2859
2860#ifdef IEM_WITH_SETJMP
2861/** \#GP(sel) - 0d, longjmp. */
2862DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
2863{
2864 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
2865 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
2866 NOREF(Sel);
2867 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2868}
2869#endif
2870
2871
2872/** \#GP(sel) - 0d. */
2873VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
2874{
2875 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
2876 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
2877 NOREF(iSegReg); NOREF(fAccess);
2878 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2879}
2880
2881#ifdef IEM_WITH_SETJMP
2882/** \#GP(sel) - 0d, longjmp. */
2883DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
2884{
2885 NOREF(iSegReg); NOREF(fAccess);
2886 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2887}
2888#endif
2889
2890
2891/** \#PF(n) - 0e. */
2892VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
2893{
2894 uint16_t uErr;
2895 switch (rc)
2896 {
2897 case VERR_PAGE_NOT_PRESENT:
2898 case VERR_PAGE_TABLE_NOT_PRESENT:
2899 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2900 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2901 uErr = 0;
2902 break;
2903
2904 case VERR_RESERVED_PAGE_TABLE_BITS:
2905 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
2906 break;
2907
2908 default:
2909 AssertMsgFailed(("%Rrc\n", rc));
2910 RT_FALL_THRU();
2911 case VERR_ACCESS_DENIED:
2912 uErr = X86_TRAP_PF_P;
2913 break;
2914 }
2915
2916 if (IEM_GET_CPL(pVCpu) == 3)
2917 uErr |= X86_TRAP_PF_US;
2918
2919 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2920 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
2921 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
2922 uErr |= X86_TRAP_PF_ID;
2923
2924#if 0 /* This is so much non-sense, really. Why was it done like that? */
2925 /* Note! RW access callers reporting a WRITE protection fault, will clear
2926 the READ flag before calling. So, read-modify-write accesses (RW)
2927 can safely be reported as READ faults. */
2928 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2929 uErr |= X86_TRAP_PF_RW;
2930#else
2931 if (fAccess & IEM_ACCESS_TYPE_WRITE)
2932 {
2933 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
2934 /// (regardless of outcome of the comparison in the latter case).
2935 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
2936 uErr |= X86_TRAP_PF_RW;
2937 }
2938#endif
2939
2940 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
2941 of the memory operand rather than at the start of it. (Not sure what
2942 happens if it crosses a page boundrary.) The current heuristics for
2943 this is to report the #PF for the last byte if the access is more than
2944 64 bytes. This is probably not correct, but we can work that out later,
2945 main objective now is to get FXSAVE to work like for real hardware and
2946 make bs3-cpu-basic2 work. */
2947 if (cbAccess <= 64)
2948 { /* likely*/ }
2949 else
2950 GCPtrWhere += cbAccess - 1;
2951
2952 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2953 uErr, GCPtrWhere);
2954}
2955
2956#ifdef IEM_WITH_SETJMP
2957/** \#PF(n) - 0e, longjmp. */
2958DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
2959 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
2960{
2961 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
2962}
2963#endif
2964
2965
2966/** \#MF(0) - 10. */
2967VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
2968{
2969 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
2970 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2971
2972 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
2973 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
2974 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
2975}
2976
2977#ifdef IEM_WITH_SETJMP
2978/** \#MF(0) - 10, longjmp. */
2979DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2980{
2981 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
2982}
2983#endif
2984
2985
2986/** \#AC(0) - 11. */
2987VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
2988{
2989 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2990}
2991
2992#ifdef IEM_WITH_SETJMP
2993/** \#AC(0) - 11, longjmp. */
2994DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2995{
2996 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
2997}
2998#endif
2999
3000
3001/** \#XF(0)/\#XM(0) - 19. */
3002VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
3003{
3004 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3005}
3006
3007
3008#ifdef IEM_WITH_SETJMP
3009/** \#XF(0)/\#XM(0) - 19s, longjmp. */
3010DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
3011{
3012 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
3013}
3014#endif
3015
3016
3017/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
3018IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
3019{
3020 NOREF(cbInstr);
3021 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3022}
3023
3024
3025/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
3026IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
3027{
3028 NOREF(cbInstr);
3029 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3030}
3031
3032
3033/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
3034IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
3035{
3036 NOREF(cbInstr);
3037 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3038}
3039
3040
3041/**
3042 * Checks if IEM is in the process of delivering an event (interrupt or
3043 * exception).
3044 *
3045 * @returns true if we're in the process of raising an interrupt or exception,
3046 * false otherwise.
3047 * @param pVCpu The cross context virtual CPU structure.
3048 * @param puVector Where to store the vector associated with the
3049 * currently delivered event, optional.
3050 * @param pfFlags Where to store th event delivery flags (see
3051 * IEM_XCPT_FLAGS_XXX), optional.
3052 * @param puErr Where to store the error code associated with the
3053 * event, optional.
3054 * @param puCr2 Where to store the CR2 associated with the event,
3055 * optional.
3056 * @remarks The caller should check the flags to determine if the error code and
3057 * CR2 are valid for the event.
3058 */
3059VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
3060{
3061 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
3062 if (fRaisingXcpt)
3063 {
3064 if (puVector)
3065 *puVector = pVCpu->iem.s.uCurXcpt;
3066 if (pfFlags)
3067 *pfFlags = pVCpu->iem.s.fCurXcpt;
3068 if (puErr)
3069 *puErr = pVCpu->iem.s.uCurXcptErr;
3070 if (puCr2)
3071 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
3072 }
3073 return fRaisingXcpt;
3074}
3075
3076/** @} */
3077
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette