VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllXcpt-x86.cpp@ 108248

Last change on this file since 108248 was 108248, checked in by vboxsync, 2 months ago

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 125.9 KB
Line 
1/* $Id: IEMAllXcpt-x86.cpp 108248 2025-02-17 00:34:56Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - x86 target, exceptions & interrupts.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/pdmapic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gcm.h>
47#include <VBox/vmm/gim.h>
48#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
49# include <VBox/vmm/em.h>
50# include <VBox/vmm/hm_svm.h>
51#endif
52#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
53# include <VBox/vmm/hmvmxinline.h>
54#endif
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/dbgf.h>
57#include <VBox/vmm/dbgftrace.h>
58#include "IEMInternal.h"
59#include <VBox/vmm/vmcc.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <VBox/dis.h>
64#include <iprt/asm-math.h>
65#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
66# include <iprt/asm-amd64-x86.h>
67#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
68# include <iprt/asm-arm.h>
69#endif
70#include <iprt/assert.h>
71#include <iprt/string.h>
72#include <iprt/x86.h>
73
74#include "IEMInline.h"
75
76
77/*********************************************************************************************************************************
78* Structures and Typedefs *
79*********************************************************************************************************************************/
80/**
81 * CPU exception classes.
82 */
83typedef enum IEMXCPTCLASS
84{
85 IEMXCPTCLASS_BENIGN,
86 IEMXCPTCLASS_CONTRIBUTORY,
87 IEMXCPTCLASS_PAGE_FAULT,
88 IEMXCPTCLASS_DOUBLE_FAULT
89} IEMXCPTCLASS;
90
91
92
93/** @name Misc Worker Functions.
94 * @{
95 */
96
97/**
98 * Gets the exception class for the specified exception vector.
99 *
100 * @returns The class of the specified exception.
101 * @param uVector The exception vector.
102 */
103static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
104{
105 Assert(uVector <= X86_XCPT_LAST);
106 switch (uVector)
107 {
108 case X86_XCPT_DE:
109 case X86_XCPT_TS:
110 case X86_XCPT_NP:
111 case X86_XCPT_SS:
112 case X86_XCPT_GP:
113 case X86_XCPT_SX: /* AMD only */
114 return IEMXCPTCLASS_CONTRIBUTORY;
115
116 case X86_XCPT_PF:
117 case X86_XCPT_VE: /* Intel only */
118 return IEMXCPTCLASS_PAGE_FAULT;
119
120 case X86_XCPT_DF:
121 return IEMXCPTCLASS_DOUBLE_FAULT;
122 }
123 return IEMXCPTCLASS_BENIGN;
124}
125
126
127/**
128 * Evaluates how to handle an exception caused during delivery of another event
129 * (exception / interrupt).
130 *
131 * @returns How to handle the recursive exception.
132 * @param pVCpu The cross context virtual CPU structure of the
133 * calling thread.
134 * @param fPrevFlags The flags of the previous event.
135 * @param uPrevVector The vector of the previous event.
136 * @param fCurFlags The flags of the current exception.
137 * @param uCurVector The vector of the current exception.
138 * @param pfXcptRaiseInfo Where to store additional information about the
139 * exception condition. Optional.
140 */
141VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
142 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
143{
144 /*
145 * Only CPU exceptions can be raised while delivering other events, software interrupt
146 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
147 */
148 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
149 Assert(pVCpu); RT_NOREF(pVCpu);
150 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
151
152 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
153 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
154 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
155 {
156 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
157 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
158 {
159 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
160 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
161 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
162 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
163 {
164 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
165 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
166 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
167 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
168 uCurVector, pVCpu->cpum.GstCtx.cr2));
169 }
170 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
171 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
172 {
173 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
174 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
175 }
176 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
177 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
178 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
179 {
180 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
181 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
182 }
183 }
184 else
185 {
186 if (uPrevVector == X86_XCPT_NMI)
187 {
188 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
189 if (uCurVector == X86_XCPT_PF)
190 {
191 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
192 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
193 }
194 }
195 else if ( uPrevVector == X86_XCPT_AC
196 && uCurVector == X86_XCPT_AC)
197 {
198 enmRaise = IEMXCPTRAISE_CPU_HANG;
199 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
200 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
201 }
202 }
203 }
204 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
205 {
206 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
207 if (uCurVector == X86_XCPT_PF)
208 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
209 }
210 else
211 {
212 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
213 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
214 }
215
216 if (pfXcptRaiseInfo)
217 *pfXcptRaiseInfo = fRaiseInfo;
218 return enmRaise;
219}
220
221
222/**
223 * Enters the CPU shutdown state initiated by a triple fault or other
224 * unrecoverable conditions.
225 *
226 * @returns Strict VBox status code.
227 * @param pVCpu The cross context virtual CPU structure of the
228 * calling thread.
229 */
230static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
231{
232 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
233 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
234
235 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
236 {
237 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
238 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
239 }
240
241 RT_NOREF(pVCpu);
242 return VINF_EM_TRIPLE_FAULT;
243}
244
245
246/**
247 * Validates a new SS segment.
248 *
249 * @returns VBox strict status code.
250 * @param pVCpu The cross context virtual CPU structure of the
251 * calling thread.
252 * @param NewSS The new SS selctor.
253 * @param uCpl The CPL to load the stack for.
254 * @param pDesc Where to return the descriptor.
255 */
256static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
257{
258 /* Null selectors are not allowed (we're not called for dispatching
259 interrupts with SS=0 in long mode). */
260 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
261 {
262 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
263 return iemRaiseTaskSwitchFault0(pVCpu);
264 }
265
266 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
267 if ((NewSS & X86_SEL_RPL) != uCpl)
268 {
269 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
270 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
271 }
272
273 /*
274 * Read the descriptor.
275 */
276 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
277 if (rcStrict != VINF_SUCCESS)
278 return rcStrict;
279
280 /*
281 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
282 */
283 if (!pDesc->Legacy.Gen.u1DescType)
284 {
285 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
286 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
287 }
288
289 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
290 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
291 {
292 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
293 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
294 }
295 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
296 {
297 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
298 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
299 }
300
301 /* Is it there? */
302 /** @todo testcase: Is this checked before the canonical / limit check below? */
303 if (!pDesc->Legacy.Gen.u1Present)
304 {
305 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
306 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
307 }
308
309 return VINF_SUCCESS;
310}
311
312/** @} */
313
314
315/** @name Raising Exceptions.
316 *
317 * @{
318 */
319
320
321/**
322 * Loads the specified stack far pointer from the TSS.
323 *
324 * @returns VBox strict status code.
325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
326 * @param uCpl The CPL to load the stack for.
327 * @param pSelSS Where to return the new stack segment.
328 * @param puEsp Where to return the new stack pointer.
329 */
330static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
331{
332 VBOXSTRICTRC rcStrict;
333 Assert(uCpl < 4);
334
335 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
336 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
337 {
338 /*
339 * 16-bit TSS (X86TSS16).
340 */
341 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
342 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
343 {
344 uint32_t off = uCpl * 4 + 2;
345 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
346 {
347 /** @todo check actual access pattern here. */
348 uint32_t u32Tmp = 0; /* gcc maybe... */
349 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
350 if (rcStrict == VINF_SUCCESS)
351 {
352 *puEsp = RT_LOWORD(u32Tmp);
353 *pSelSS = RT_HIWORD(u32Tmp);
354 return VINF_SUCCESS;
355 }
356 }
357 else
358 {
359 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
360 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
361 }
362 break;
363 }
364
365 /*
366 * 32-bit TSS (X86TSS32).
367 */
368 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
369 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
370 {
371 uint32_t off = uCpl * 8 + 4;
372 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
373 {
374/** @todo check actual access pattern here. */
375 uint64_t u64Tmp;
376 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
377 if (rcStrict == VINF_SUCCESS)
378 {
379 *puEsp = u64Tmp & UINT32_MAX;
380 *pSelSS = (RTSEL)(u64Tmp >> 32);
381 return VINF_SUCCESS;
382 }
383 }
384 else
385 {
386 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
387 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
388 }
389 break;
390 }
391
392 default:
393 AssertFailed();
394 rcStrict = VERR_IEM_IPE_4;
395 break;
396 }
397
398 *puEsp = 0; /* make gcc happy */
399 *pSelSS = 0; /* make gcc happy */
400 return rcStrict;
401}
402
403
404/**
405 * Loads the specified stack pointer from the 64-bit TSS.
406 *
407 * @returns VBox strict status code.
408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
409 * @param uCpl The CPL to load the stack for.
410 * @param uIst The interrupt stack table index, 0 if to use uCpl.
411 * @param puRsp Where to return the new stack pointer.
412 */
413static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
414{
415 Assert(uCpl < 4);
416 Assert(uIst < 8);
417 *puRsp = 0; /* make gcc happy */
418
419 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
420 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
421
422 uint32_t off;
423 if (uIst)
424 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
425 else
426 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
427 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
428 {
429 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
430 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
431 }
432
433 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
434}
435
436
437/**
438 * Adjust the CPU state according to the exception being raised.
439 *
440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
441 * @param u8Vector The exception that has been raised.
442 */
443DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
444{
445 switch (u8Vector)
446 {
447 case X86_XCPT_DB:
448 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
449 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
450 break;
451 /** @todo Read the AMD and Intel exception reference... */
452 }
453}
454
455
456/**
457 * Implements exceptions and interrupts for real mode.
458 *
459 * @returns VBox strict status code.
460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
461 * @param cbInstr The number of bytes to offset rIP by in the return
462 * address.
463 * @param u8Vector The interrupt / exception vector number.
464 * @param fFlags The flags.
465 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
466 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
467 */
468static VBOXSTRICTRC
469iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
470 uint8_t cbInstr,
471 uint8_t u8Vector,
472 uint32_t fFlags,
473 uint16_t uErr,
474 uint64_t uCr2) RT_NOEXCEPT
475{
476 NOREF(uErr); NOREF(uCr2);
477 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
478
479 /*
480 * Read the IDT entry.
481 */
482 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
483 {
484 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
485 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
486 }
487 RTFAR16 Idte;
488 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
489 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
490 {
491 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
492 return rcStrict;
493 }
494
495#ifdef LOG_ENABLED
496 /* If software interrupt, try decode it if logging is enabled and such. */
497 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
498 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
499 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
500#endif
501
502 /*
503 * Push the stack frame.
504 */
505 uint8_t bUnmapInfo;
506 uint16_t *pu16Frame;
507 uint64_t uNewRsp;
508 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
509 if (rcStrict != VINF_SUCCESS)
510 return rcStrict;
511
512 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
513#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
514 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
515 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
516 fEfl |= UINT16_C(0xf000);
517#endif
518 pu16Frame[2] = (uint16_t)fEfl;
519 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
520 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
521 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
522 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
523 return rcStrict;
524
525 /*
526 * Load the vector address into cs:ip and make exception specific state
527 * adjustments.
528 */
529 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
530 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
531 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
532 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
533 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
534 pVCpu->cpum.GstCtx.rip = Idte.off;
535 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
536 IEMMISC_SET_EFL(pVCpu, fEfl);
537
538 /** @todo do we actually do this in real mode? */
539 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
540 iemRaiseXcptAdjustState(pVCpu, u8Vector);
541
542 /*
543 * Deal with debug events that follows the exception and clear inhibit flags.
544 */
545 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
546 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
547 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
548 else
549 {
550 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
551 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
552 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
553 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
554 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
555 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
556 return iemRaiseDebugException(pVCpu);
557 }
558
559 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
560 so best leave them alone in case we're in a weird kind of real mode... */
561
562 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
563}
564
565
566/**
567 * Loads a NULL data selector into when coming from V8086 mode.
568 *
569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
570 * @param pSReg Pointer to the segment register.
571 */
572DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
573{
574 pSReg->Sel = 0;
575 pSReg->ValidSel = 0;
576 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
577 {
578 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
579 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
580 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
581 }
582 else
583 {
584 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
585 /** @todo check this on AMD-V */
586 pSReg->u64Base = 0;
587 pSReg->u32Limit = 0;
588 }
589}
590
591
592/**
593 * Loads a segment selector during a task switch in V8086 mode.
594 *
595 * @param pSReg Pointer to the segment register.
596 * @param uSel The selector value to load.
597 */
598DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
599{
600 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
601 pSReg->Sel = uSel;
602 pSReg->ValidSel = uSel;
603 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
604 pSReg->u64Base = uSel << 4;
605 pSReg->u32Limit = 0xffff;
606 pSReg->Attr.u = 0xf3;
607}
608
609
610/**
611 * Loads a segment selector during a task switch in protected mode.
612 *
613 * In this task switch scenario, we would throw \#TS exceptions rather than
614 * \#GPs.
615 *
616 * @returns VBox strict status code.
617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
618 * @param pSReg Pointer to the segment register.
619 * @param uSel The new selector value.
620 *
621 * @remarks This does _not_ handle CS or SS.
622 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
623 */
624static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
625{
626 Assert(!IEM_IS_64BIT_CODE(pVCpu));
627
628 /* Null data selector. */
629 if (!(uSel & X86_SEL_MASK_OFF_RPL))
630 {
631 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
632 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
633 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
634 return VINF_SUCCESS;
635 }
636
637 /* Fetch the descriptor. */
638 IEMSELDESC Desc;
639 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
640 if (rcStrict != VINF_SUCCESS)
641 {
642 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
643 VBOXSTRICTRC_VAL(rcStrict)));
644 return rcStrict;
645 }
646
647 /* Must be a data segment or readable code segment. */
648 if ( !Desc.Legacy.Gen.u1DescType
649 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
650 {
651 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
652 Desc.Legacy.Gen.u4Type));
653 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
654 }
655
656 /* Check privileges for data segments and non-conforming code segments. */
657 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
658 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
659 {
660 /* The RPL and the new CPL must be less than or equal to the DPL. */
661 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
662 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
663 {
664 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
665 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
666 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
667 }
668 }
669
670 /* Is it there? */
671 if (!Desc.Legacy.Gen.u1Present)
672 {
673 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
674 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
675 }
676
677 /* The base and limit. */
678 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
679 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
680
681 /*
682 * Ok, everything checked out fine. Now set the accessed bit before
683 * committing the result into the registers.
684 */
685 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
686 {
687 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
688 if (rcStrict != VINF_SUCCESS)
689 return rcStrict;
690 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
691 }
692
693 /* Commit */
694 pSReg->Sel = uSel;
695 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
696 pSReg->u32Limit = cbLimit;
697 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
698 pSReg->ValidSel = uSel;
699 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
700 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
701 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
702
703 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
704 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
705 return VINF_SUCCESS;
706}
707
708
709/**
710 * Performs a task switch.
711 *
712 * If the task switch is the result of a JMP, CALL or IRET instruction, the
713 * caller is responsible for performing the necessary checks (like DPL, TSS
714 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
715 * reference for JMP, CALL, IRET.
716 *
717 * If the task switch is the due to a software interrupt or hardware exception,
718 * the caller is responsible for validating the TSS selector and descriptor. See
719 * Intel Instruction reference for INT n.
720 *
721 * @returns VBox strict status code.
722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
723 * @param enmTaskSwitch The cause of the task switch.
724 * @param uNextEip The EIP effective after the task switch.
725 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
726 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
727 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
728 * @param SelTss The TSS selector of the new task.
729 * @param pNewDescTss Pointer to the new TSS descriptor.
730 */
731VBOXSTRICTRC
732iemTaskSwitch(PVMCPUCC pVCpu,
733 IEMTASKSWITCH enmTaskSwitch,
734 uint32_t uNextEip,
735 uint32_t fFlags,
736 uint16_t uErr,
737 uint64_t uCr2,
738 RTSEL SelTss,
739 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
740{
741 Assert(!IEM_IS_REAL_MODE(pVCpu));
742 Assert(!IEM_IS_64BIT_CODE(pVCpu));
743 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
744
745 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
746 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
747 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
748 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
749 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
750
751 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
752 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
753
754 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
755 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
756
757 /* Update CR2 in case it's a page-fault. */
758 /** @todo This should probably be done much earlier in IEM/PGM. See
759 * @bugref{5653#c49}. */
760 if (fFlags & IEM_XCPT_FLAGS_CR2)
761 pVCpu->cpum.GstCtx.cr2 = uCr2;
762
763 /*
764 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
765 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
766 */
767 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
768 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
769 if (uNewTssLimit < uNewTssLimitMin)
770 {
771 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
772 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
773 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
774 }
775
776 /*
777 * Task switches in VMX non-root mode always cause task switches.
778 * The new TSS must have been read and validated (DPL, limits etc.) before a
779 * task-switch VM-exit commences.
780 *
781 * See Intel spec. 25.4.2 "Treatment of Task Switches".
782 */
783 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
784 {
785 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
786 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
787 }
788
789 /*
790 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
791 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
792 */
793 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
794 {
795 uint64_t const uExitInfo1 = SelTss;
796 uint64_t uExitInfo2 = uErr;
797 switch (enmTaskSwitch)
798 {
799 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
800 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
801 default: break;
802 }
803 if (fFlags & IEM_XCPT_FLAGS_ERR)
804 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
805 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
806 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
807
808 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
809 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
810 RT_NOREF2(uExitInfo1, uExitInfo2);
811 }
812
813 /*
814 * Check the current TSS limit. The last written byte to the current TSS during the
815 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
816 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
817 *
818 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
819 * end up with smaller than "legal" TSS limits.
820 */
821 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
822 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
823 if (uCurTssLimit < uCurTssLimitMin)
824 {
825 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
826 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
827 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
828 }
829
830 /*
831 * Verify that the new TSS can be accessed and map it. Map only the required contents
832 * and not the entire TSS.
833 */
834 uint8_t bUnmapInfoNewTss;
835 void *pvNewTss;
836 uint32_t const cbNewTss = uNewTssLimitMin + 1;
837 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
838 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
839 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
840 * not perform correct translation if this happens. See Intel spec. 7.2.1
841 * "Task-State Segment". */
842 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
843/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
844 * Consider wrapping the remainder into a function for simpler cleanup. */
845 if (rcStrict != VINF_SUCCESS)
846 {
847 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
848 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
849 return rcStrict;
850 }
851
852 /*
853 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
854 */
855 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
856 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
857 || enmTaskSwitch == IEMTASKSWITCH_IRET)
858 {
859 uint8_t bUnmapInfoDescCurTss;
860 PX86DESC pDescCurTss;
861 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
862 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
863 if (rcStrict != VINF_SUCCESS)
864 {
865 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
866 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
867 return rcStrict;
868 }
869
870 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
871 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
872 if (rcStrict != VINF_SUCCESS)
873 {
874 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
875 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
876 return rcStrict;
877 }
878
879 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
880 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
881 {
882 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
883 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
884 fEFlags &= ~X86_EFL_NT;
885 }
886 }
887
888 /*
889 * Save the CPU state into the current TSS.
890 */
891 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
892 if (GCPtrNewTss == GCPtrCurTss)
893 {
894 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
895 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
896 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
897 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
898 pVCpu->cpum.GstCtx.ldtr.Sel));
899 }
900 if (fIsNewTss386)
901 {
902 /*
903 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
904 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
905 */
906 uint8_t bUnmapInfoCurTss32;
907 void *pvCurTss32;
908 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
909 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
910 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
911 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
912 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
913 if (rcStrict != VINF_SUCCESS)
914 {
915 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
916 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
917 return rcStrict;
918 }
919
920 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
921 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
922 pCurTss32->eip = uNextEip;
923 pCurTss32->eflags = fEFlags;
924 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
925 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
926 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
927 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
928 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
929 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
930 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
931 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
932 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
933 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
934 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
935 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
936 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
937 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
938
939 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
940 if (rcStrict != VINF_SUCCESS)
941 {
942 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
943 VBOXSTRICTRC_VAL(rcStrict)));
944 return rcStrict;
945 }
946 }
947 else
948 {
949 /*
950 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
951 */
952 uint8_t bUnmapInfoCurTss16;
953 void *pvCurTss16;
954 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
955 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
956 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
957 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
958 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
959 if (rcStrict != VINF_SUCCESS)
960 {
961 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
962 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
963 return rcStrict;
964 }
965
966 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
967 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
968 pCurTss16->ip = uNextEip;
969 pCurTss16->flags = (uint16_t)fEFlags;
970 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
971 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
972 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
973 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
974 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
975 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
976 pCurTss16->si = pVCpu->cpum.GstCtx.si;
977 pCurTss16->di = pVCpu->cpum.GstCtx.di;
978 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
979 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
980 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
981 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
982
983 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
984 if (rcStrict != VINF_SUCCESS)
985 {
986 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
987 VBOXSTRICTRC_VAL(rcStrict)));
988 return rcStrict;
989 }
990 }
991
992 /*
993 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
994 */
995 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
996 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
997 {
998 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
999 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
1000 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
1001 }
1002
1003 /*
1004 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
1005 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
1006 */
1007 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
1008 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
1009 bool fNewDebugTrap;
1010 if (fIsNewTss386)
1011 {
1012 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
1013 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
1014 uNewEip = pNewTss32->eip;
1015 uNewEflags = pNewTss32->eflags;
1016 uNewEax = pNewTss32->eax;
1017 uNewEcx = pNewTss32->ecx;
1018 uNewEdx = pNewTss32->edx;
1019 uNewEbx = pNewTss32->ebx;
1020 uNewEsp = pNewTss32->esp;
1021 uNewEbp = pNewTss32->ebp;
1022 uNewEsi = pNewTss32->esi;
1023 uNewEdi = pNewTss32->edi;
1024 uNewES = pNewTss32->es;
1025 uNewCS = pNewTss32->cs;
1026 uNewSS = pNewTss32->ss;
1027 uNewDS = pNewTss32->ds;
1028 uNewFS = pNewTss32->fs;
1029 uNewGS = pNewTss32->gs;
1030 uNewLdt = pNewTss32->selLdt;
1031 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
1032 }
1033 else
1034 {
1035 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
1036 uNewCr3 = 0;
1037 uNewEip = pNewTss16->ip;
1038 uNewEflags = pNewTss16->flags;
1039 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
1040 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
1041 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
1042 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
1043 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
1044 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
1045 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
1046 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
1047 uNewES = pNewTss16->es;
1048 uNewCS = pNewTss16->cs;
1049 uNewSS = pNewTss16->ss;
1050 uNewDS = pNewTss16->ds;
1051 uNewFS = 0;
1052 uNewGS = 0;
1053 uNewLdt = pNewTss16->selLdt;
1054 fNewDebugTrap = false;
1055 }
1056
1057 if (GCPtrNewTss == GCPtrCurTss)
1058 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
1059 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
1060
1061 /*
1062 * We're done accessing the new TSS.
1063 */
1064 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
1065 if (rcStrict != VINF_SUCCESS)
1066 {
1067 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
1068 return rcStrict;
1069 }
1070
1071 /*
1072 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
1073 */
1074 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
1075 {
1076 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
1077 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
1078 if (rcStrict != VINF_SUCCESS)
1079 {
1080 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
1081 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
1082 return rcStrict;
1083 }
1084
1085 /* Check that the descriptor indicates the new TSS is available (not busy). */
1086 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
1087 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
1088 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
1089
1090 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
1091 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
1092 if (rcStrict != VINF_SUCCESS)
1093 {
1094 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
1095 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
1096 return rcStrict;
1097 }
1098 }
1099
1100 /*
1101 * From this point on, we're technically in the new task. We will defer exceptions
1102 * until the completion of the task switch but before executing any instructions in the new task.
1103 */
1104 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
1105 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
1106 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1107 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
1108 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
1109 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
1110 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
1111
1112 /* Set the busy bit in TR. */
1113 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
1114
1115 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
1116 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
1117 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
1118 {
1119 uNewEflags |= X86_EFL_NT;
1120 }
1121
1122 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
1123 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
1124 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
1125
1126 pVCpu->cpum.GstCtx.eip = uNewEip;
1127 pVCpu->cpum.GstCtx.eax = uNewEax;
1128 pVCpu->cpum.GstCtx.ecx = uNewEcx;
1129 pVCpu->cpum.GstCtx.edx = uNewEdx;
1130 pVCpu->cpum.GstCtx.ebx = uNewEbx;
1131 pVCpu->cpum.GstCtx.esp = uNewEsp;
1132 pVCpu->cpum.GstCtx.ebp = uNewEbp;
1133 pVCpu->cpum.GstCtx.esi = uNewEsi;
1134 pVCpu->cpum.GstCtx.edi = uNewEdi;
1135
1136 uNewEflags &= X86_EFL_LIVE_MASK;
1137 uNewEflags |= X86_EFL_RA1_MASK;
1138 IEMMISC_SET_EFL(pVCpu, uNewEflags);
1139
1140 /*
1141 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
1142 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
1143 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
1144 */
1145 pVCpu->cpum.GstCtx.es.Sel = uNewES;
1146 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
1147
1148 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
1149 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
1150
1151 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1152 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
1153
1154 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
1155 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
1156
1157 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
1158 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
1159
1160 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
1161 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
1162 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1163
1164 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
1165 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
1166 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
1167 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
1168
1169 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1170 {
1171 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
1172 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
1173 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
1174 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
1175 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
1176 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
1177 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
1178 }
1179
1180 /*
1181 * Switch CR3 for the new task.
1182 */
1183 if ( fIsNewTss386
1184 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
1185 {
1186 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
1187 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
1188 AssertRCSuccessReturn(rc, rc);
1189
1190 /* Inform PGM. */
1191 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
1192 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
1193 AssertRCReturn(rc, rc);
1194 /* ignore informational status codes */
1195
1196 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
1197 }
1198
1199 /*
1200 * Switch LDTR for the new task.
1201 */
1202 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
1203 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
1204 else
1205 {
1206 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
1207
1208 IEMSELDESC DescNewLdt;
1209 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
1210 if (rcStrict != VINF_SUCCESS)
1211 {
1212 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
1213 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
1214 return rcStrict;
1215 }
1216 if ( !DescNewLdt.Legacy.Gen.u1Present
1217 || DescNewLdt.Legacy.Gen.u1DescType
1218 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1219 {
1220 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
1221 uNewLdt, DescNewLdt.Legacy.u));
1222 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
1223 }
1224
1225 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
1226 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1227 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
1228 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
1229 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
1230 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1231 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
1232 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1233 }
1234
1235 IEMSELDESC DescSS;
1236 if (IEM_IS_V86_MODE(pVCpu))
1237 {
1238 IEM_SET_CPL(pVCpu, 3);
1239 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
1240 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
1241 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
1242 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
1243 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
1244 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
1245
1246 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
1247 DescSS.Legacy.u = 0;
1248 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
1249 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
1250 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
1251 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
1252 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
1253 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
1254 DescSS.Legacy.Gen.u2Dpl = 3;
1255 }
1256 else
1257 {
1258 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
1259
1260 /*
1261 * Load the stack segment for the new task.
1262 */
1263 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
1264 {
1265 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
1266 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1267 }
1268
1269 /* Fetch the descriptor. */
1270 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
1271 if (rcStrict != VINF_SUCCESS)
1272 {
1273 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
1274 VBOXSTRICTRC_VAL(rcStrict)));
1275 return rcStrict;
1276 }
1277
1278 /* SS must be a data segment and writable. */
1279 if ( !DescSS.Legacy.Gen.u1DescType
1280 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1281 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
1282 {
1283 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
1284 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
1285 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1286 }
1287
1288 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
1289 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
1290 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
1291 {
1292 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
1293 uNewCpl));
1294 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1295 }
1296
1297 /* Is it there? */
1298 if (!DescSS.Legacy.Gen.u1Present)
1299 {
1300 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
1301 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1302 }
1303
1304 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
1305 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
1306
1307 /* Set the accessed bit before committing the result into SS. */
1308 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1309 {
1310 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1311 if (rcStrict != VINF_SUCCESS)
1312 return rcStrict;
1313 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1314 }
1315
1316 /* Commit SS. */
1317 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1318 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
1319 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1320 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
1321 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
1322 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1323 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1324
1325 /* CPL has changed, update IEM before loading rest of segments. */
1326 IEM_SET_CPL(pVCpu, uNewCpl);
1327
1328 /*
1329 * Load the data segments for the new task.
1330 */
1331 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
1332 if (rcStrict != VINF_SUCCESS)
1333 return rcStrict;
1334 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
1335 if (rcStrict != VINF_SUCCESS)
1336 return rcStrict;
1337 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
1338 if (rcStrict != VINF_SUCCESS)
1339 return rcStrict;
1340 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
1341 if (rcStrict != VINF_SUCCESS)
1342 return rcStrict;
1343
1344 /*
1345 * Load the code segment for the new task.
1346 */
1347 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
1348 {
1349 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
1350 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1351 }
1352
1353 /* Fetch the descriptor. */
1354 IEMSELDESC DescCS;
1355 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
1356 if (rcStrict != VINF_SUCCESS)
1357 {
1358 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
1359 return rcStrict;
1360 }
1361
1362 /* CS must be a code segment. */
1363 if ( !DescCS.Legacy.Gen.u1DescType
1364 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1365 {
1366 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
1367 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1368 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1369 }
1370
1371 /* For conforming CS, DPL must be less than or equal to the RPL. */
1372 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1373 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
1374 {
1375 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
1376 DescCS.Legacy.Gen.u2Dpl));
1377 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1378 }
1379
1380 /* For non-conforming CS, DPL must match RPL. */
1381 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1382 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
1383 {
1384 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
1385 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
1386 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1387 }
1388
1389 /* Is it there? */
1390 if (!DescCS.Legacy.Gen.u1Present)
1391 {
1392 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
1393 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1394 }
1395
1396 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1397 u64Base = X86DESC_BASE(&DescCS.Legacy);
1398
1399 /* Set the accessed bit before committing the result into CS. */
1400 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1401 {
1402 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1403 if (rcStrict != VINF_SUCCESS)
1404 return rcStrict;
1405 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1406 }
1407
1408 /* Commit CS. */
1409 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
1410 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
1411 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1412 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1413 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1414 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1415 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1416 }
1417
1418 /* Make sure the CPU mode is correct. */
1419 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
1420 if (fExecNew != pVCpu->iem.s.fExec)
1421 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
1422 pVCpu->iem.s.fExec = fExecNew;
1423
1424 /** @todo Debug trap. */
1425 if (fIsNewTss386 && fNewDebugTrap)
1426 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
1427
1428 /*
1429 * Construct the error code masks based on what caused this task switch.
1430 * See Intel Instruction reference for INT.
1431 */
1432 uint16_t uExt;
1433 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
1434 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1435 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
1436 uExt = 1;
1437 else
1438 uExt = 0;
1439
1440 /*
1441 * Push any error code on to the new stack.
1442 */
1443 if (fFlags & IEM_XCPT_FLAGS_ERR)
1444 {
1445 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
1446 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
1447 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
1448
1449 /* Check that there is sufficient space on the stack. */
1450 /** @todo Factor out segment limit checking for normal/expand down segments
1451 * into a separate function. */
1452 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
1453 {
1454 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
1455 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
1456 {
1457 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
1458 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
1459 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
1460 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
1461 }
1462 }
1463 else
1464 {
1465 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
1466 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
1467 {
1468 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
1469 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
1470 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
1471 }
1472 }
1473
1474
1475 if (fIsNewTss386)
1476 rcStrict = iemMemStackPushU32(pVCpu, uErr);
1477 else
1478 rcStrict = iemMemStackPushU16(pVCpu, uErr);
1479 if (rcStrict != VINF_SUCCESS)
1480 {
1481 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
1482 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
1483 return rcStrict;
1484 }
1485 }
1486
1487 /* Check the new EIP against the new CS limit. */
1488 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
1489 {
1490 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
1491 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
1492 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
1493 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
1494 }
1495
1496 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
1497 pVCpu->cpum.GstCtx.ss.Sel));
1498 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1499}
1500
1501
1502/**
1503 * Implements exceptions and interrupts for protected mode.
1504 *
1505 * @returns VBox strict status code.
1506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1507 * @param cbInstr The number of bytes to offset rIP by in the return
1508 * address.
1509 * @param u8Vector The interrupt / exception vector number.
1510 * @param fFlags The flags.
1511 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1512 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1513 */
1514static VBOXSTRICTRC
1515iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
1516 uint8_t cbInstr,
1517 uint8_t u8Vector,
1518 uint32_t fFlags,
1519 uint16_t uErr,
1520 uint64_t uCr2) RT_NOEXCEPT
1521{
1522 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1523
1524 /*
1525 * Read the IDT entry.
1526 */
1527 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1528 {
1529 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1530 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1531 }
1532 X86DESC Idte;
1533 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
1534 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
1535 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1536 {
1537 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1538 return rcStrict;
1539 }
1540 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
1541 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
1542 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
1543 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
1544
1545 /*
1546 * Check the descriptor type, DPL and such.
1547 * ASSUMES this is done in the same order as described for call-gate calls.
1548 */
1549 if (Idte.Gate.u1DescType)
1550 {
1551 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1552 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1553 }
1554 bool fTaskGate = false;
1555 uint8_t f32BitGate = true;
1556 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
1557 switch (Idte.Gate.u4Type)
1558 {
1559 case X86_SEL_TYPE_SYS_UNDEFINED:
1560 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1561 case X86_SEL_TYPE_SYS_LDT:
1562 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1563 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1564 case X86_SEL_TYPE_SYS_UNDEFINED2:
1565 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1566 case X86_SEL_TYPE_SYS_UNDEFINED3:
1567 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1568 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1569 case X86_SEL_TYPE_SYS_UNDEFINED4:
1570 {
1571 /** @todo check what actually happens when the type is wrong...
1572 * esp. call gates. */
1573 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1574 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1575 }
1576
1577 case X86_SEL_TYPE_SYS_286_INT_GATE:
1578 f32BitGate = false;
1579 RT_FALL_THRU();
1580 case X86_SEL_TYPE_SYS_386_INT_GATE:
1581 fEflToClear |= X86_EFL_IF;
1582 break;
1583
1584 case X86_SEL_TYPE_SYS_TASK_GATE:
1585 fTaskGate = true;
1586#ifndef IEM_IMPLEMENTS_TASKSWITCH
1587 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
1588#endif
1589 break;
1590
1591 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1592 f32BitGate = false;
1593 break;
1594 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1595 break;
1596
1597 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1598 }
1599
1600 /* Check DPL against CPL if applicable. */
1601 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
1602 {
1603 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
1604 {
1605 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
1606 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1607 }
1608 }
1609
1610 /* Is it there? */
1611 if (!Idte.Gate.u1Present)
1612 {
1613 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
1614 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1615 }
1616
1617 /* Is it a task-gate? */
1618 if (fTaskGate)
1619 {
1620 /*
1621 * Construct the error code masks based on what caused this task switch.
1622 * See Intel Instruction reference for INT.
1623 */
1624 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1625 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
1626 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
1627 RTSEL SelTss = Idte.Gate.u16Sel;
1628
1629 /*
1630 * Fetch the TSS descriptor in the GDT.
1631 */
1632 IEMSELDESC DescTSS;
1633 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
1634 if (rcStrict != VINF_SUCCESS)
1635 {
1636 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
1637 VBOXSTRICTRC_VAL(rcStrict)));
1638 return rcStrict;
1639 }
1640
1641 /* The TSS descriptor must be a system segment and be available (not busy). */
1642 if ( DescTSS.Legacy.Gen.u1DescType
1643 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
1644 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
1645 {
1646 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
1647 u8Vector, SelTss, DescTSS.Legacy.au64));
1648 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
1649 }
1650
1651 /* The TSS must be present. */
1652 if (!DescTSS.Legacy.Gen.u1Present)
1653 {
1654 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
1655 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
1656 }
1657
1658 /* Do the actual task switch. */
1659 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
1660 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
1661 fFlags, uErr, uCr2, SelTss, &DescTSS);
1662 }
1663
1664 /* A null CS is bad. */
1665 RTSEL NewCS = Idte.Gate.u16Sel;
1666 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
1667 {
1668 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
1669 return iemRaiseGeneralProtectionFault0(pVCpu);
1670 }
1671
1672 /* Fetch the descriptor for the new CS. */
1673 IEMSELDESC DescCS;
1674 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
1675 if (rcStrict != VINF_SUCCESS)
1676 {
1677 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
1678 return rcStrict;
1679 }
1680
1681 /* Must be a code segment. */
1682 if (!DescCS.Legacy.Gen.u1DescType)
1683 {
1684 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1685 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
1686 }
1687 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1688 {
1689 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1690 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
1691 }
1692
1693 /* Don't allow lowering the privilege level. */
1694 /** @todo Does the lowering of privileges apply to software interrupts
1695 * only? This has bearings on the more-privileged or
1696 * same-privilege stack behavior further down. A testcase would
1697 * be nice. */
1698 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
1699 {
1700 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1701 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1702 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
1703 }
1704
1705 /* Make sure the selector is present. */
1706 if (!DescCS.Legacy.Gen.u1Present)
1707 {
1708 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
1709 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
1710 }
1711
1712#ifdef LOG_ENABLED
1713 /* If software interrupt, try decode it if logging is enabled and such. */
1714 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1715 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
1716 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
1717#endif
1718
1719 /* Check the new EIP against the new CS limit. */
1720 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
1721 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
1722 ? Idte.Gate.u16OffsetLow
1723 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
1724 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
1725 if (uNewEip > cbLimitCS)
1726 {
1727 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
1728 u8Vector, uNewEip, cbLimitCS, NewCS));
1729 return iemRaiseGeneralProtectionFault(pVCpu, 0);
1730 }
1731 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
1732
1733 /* Calc the flag image to push. */
1734 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1735 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
1736 fEfl &= ~X86_EFL_RF;
1737 else
1738 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
1739
1740 /* From V8086 mode only go to CPL 0. */
1741 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
1742 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
1743 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
1744 {
1745 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
1746 return iemRaiseGeneralProtectionFault(pVCpu, 0);
1747 }
1748
1749 /*
1750 * If the privilege level changes, we need to get a new stack from the TSS.
1751 * This in turns means validating the new SS and ESP...
1752 */
1753 if (uNewCpl != IEM_GET_CPL(pVCpu))
1754 {
1755 RTSEL NewSS;
1756 uint32_t uNewEsp;
1757 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
1758 if (rcStrict != VINF_SUCCESS)
1759 return rcStrict;
1760
1761 IEMSELDESC DescSS;
1762 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
1763 if (rcStrict != VINF_SUCCESS)
1764 return rcStrict;
1765 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
1766 if (!DescSS.Legacy.Gen.u1DefBig)
1767 {
1768 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
1769 uNewEsp = (uint16_t)uNewEsp;
1770 }
1771
1772 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
1773
1774 /* Check that there is sufficient space for the stack frame. */
1775 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
1776 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
1777 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
1778 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
1779
1780 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
1781 {
1782 if ( uNewEsp - 1 > cbLimitSS
1783 || uNewEsp < cbStackFrame)
1784 {
1785 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
1786 u8Vector, NewSS, uNewEsp, cbStackFrame));
1787 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
1788 }
1789 }
1790 else
1791 {
1792 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
1793 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
1794 {
1795 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
1796 u8Vector, NewSS, uNewEsp, cbStackFrame));
1797 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
1798 }
1799 }
1800
1801 /*
1802 * Start making changes.
1803 */
1804
1805 /* Set the new CPL so that stack accesses use it. */
1806 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
1807 IEM_SET_CPL(pVCpu, uNewCpl);
1808
1809 /* Create the stack frame. */
1810 uint8_t bUnmapInfoStackFrame;
1811 RTPTRUNION uStackFrame;
1812 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
1813 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
1814 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
1815 if (rcStrict != VINF_SUCCESS)
1816 return rcStrict;
1817 if (f32BitGate)
1818 {
1819 if (fFlags & IEM_XCPT_FLAGS_ERR)
1820 *uStackFrame.pu32++ = uErr;
1821 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
1822 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
1823 uStackFrame.pu32[2] = fEfl;
1824 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
1825 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
1826 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
1827 if (fEfl & X86_EFL_VM)
1828 {
1829 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
1830 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
1831 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
1832 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
1833 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
1834 }
1835 }
1836 else
1837 {
1838 if (fFlags & IEM_XCPT_FLAGS_ERR)
1839 *uStackFrame.pu16++ = uErr;
1840 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1841 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
1842 uStackFrame.pu16[2] = fEfl;
1843 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
1844 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
1845 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
1846 if (fEfl & X86_EFL_VM)
1847 {
1848 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1849 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
1850 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
1851 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
1852 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
1853 }
1854 }
1855 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
1856 if (rcStrict != VINF_SUCCESS)
1857 return rcStrict;
1858
1859 /* Mark the selectors 'accessed' (hope this is the correct time). */
1860 /** @todo testcase: excatly _when_ are the accessed bits set - before or
1861 * after pushing the stack frame? (Write protect the gdt + stack to
1862 * find out.) */
1863 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1864 {
1865 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
1866 if (rcStrict != VINF_SUCCESS)
1867 return rcStrict;
1868 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1869 }
1870
1871 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1872 {
1873 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
1874 if (rcStrict != VINF_SUCCESS)
1875 return rcStrict;
1876 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1877 }
1878
1879 /*
1880 * Start comitting the register changes (joins with the DPL=CPL branch).
1881 */
1882 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
1883 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
1884 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1885 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
1886 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1887 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1888 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
1889 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
1890 * SP is loaded).
1891 * Need to check the other combinations too:
1892 * - 16-bit TSS, 32-bit handler
1893 * - 32-bit TSS, 16-bit handler */
1894 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1895 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
1896 else
1897 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
1898
1899 if (fEfl & X86_EFL_VM)
1900 {
1901 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
1902 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
1903 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
1904 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
1905 }
1906 }
1907 /*
1908 * Same privilege, no stack change and smaller stack frame.
1909 */
1910 else
1911 {
1912 uint64_t uNewRsp;
1913 uint8_t bUnmapInfoStackFrame;
1914 RTPTRUNION uStackFrame;
1915 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
1916 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
1917 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
1918 if (rcStrict != VINF_SUCCESS)
1919 return rcStrict;
1920
1921 if (f32BitGate)
1922 {
1923 if (fFlags & IEM_XCPT_FLAGS_ERR)
1924 *uStackFrame.pu32++ = uErr;
1925 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
1926 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
1927 uStackFrame.pu32[2] = fEfl;
1928 }
1929 else
1930 {
1931 if (fFlags & IEM_XCPT_FLAGS_ERR)
1932 *uStackFrame.pu16++ = uErr;
1933 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
1934 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
1935 uStackFrame.pu16[2] = fEfl;
1936 }
1937 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
1938 if (rcStrict != VINF_SUCCESS)
1939 return rcStrict;
1940
1941 /* Mark the CS selector as 'accessed'. */
1942 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1943 {
1944 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
1945 if (rcStrict != VINF_SUCCESS)
1946 return rcStrict;
1947 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1948 }
1949
1950 /*
1951 * Start committing the register changes (joins with the other branch).
1952 */
1953 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1954 }
1955
1956 /* ... register committing continues. */
1957 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
1958 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
1959 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1960 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
1961 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
1962 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1963
1964 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
1965 fEfl &= ~fEflToClear;
1966 IEMMISC_SET_EFL(pVCpu, fEfl);
1967
1968 if (fFlags & IEM_XCPT_FLAGS_CR2)
1969 pVCpu->cpum.GstCtx.cr2 = uCr2;
1970
1971 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1972 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1973
1974 /* Make sure the execution flags are correct. */
1975 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
1976 if (fExecNew != pVCpu->iem.s.fExec)
1977 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
1978 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
1979 pVCpu->iem.s.fExec = fExecNew;
1980 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
1981
1982 /*
1983 * Deal with debug events that follows the exception and clear inhibit flags.
1984 */
1985 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1986 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
1987 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
1988 else
1989 {
1990 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
1991 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
1992 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
1993 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
1994 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
1995 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
1996 return iemRaiseDebugException(pVCpu);
1997 }
1998
1999 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2000}
2001
2002
2003/**
2004 * Implements exceptions and interrupts for long mode.
2005 *
2006 * @returns VBox strict status code.
2007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2008 * @param cbInstr The number of bytes to offset rIP by in the return
2009 * address.
2010 * @param u8Vector The interrupt / exception vector number.
2011 * @param fFlags The flags.
2012 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2013 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2014 */
2015static VBOXSTRICTRC
2016iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
2017 uint8_t cbInstr,
2018 uint8_t u8Vector,
2019 uint32_t fFlags,
2020 uint16_t uErr,
2021 uint64_t uCr2) RT_NOEXCEPT
2022{
2023 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2024
2025 /*
2026 * Read the IDT entry.
2027 */
2028 uint16_t offIdt = (uint16_t)u8Vector << 4;
2029 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
2030 {
2031 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2032 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2033 }
2034 X86DESC64 Idte;
2035#ifdef _MSC_VER /* Shut up silly compiler warning. */
2036 Idte.au64[0] = 0;
2037 Idte.au64[1] = 0;
2038#endif
2039 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
2040 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2041 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
2042 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2043 {
2044 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2045 return rcStrict;
2046 }
2047 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
2048 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2049 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2050
2051 /*
2052 * Check the descriptor type, DPL and such.
2053 * ASSUMES this is done in the same order as described for call-gate calls.
2054 */
2055 if (Idte.Gate.u1DescType)
2056 {
2057 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2058 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2059 }
2060 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2061 switch (Idte.Gate.u4Type)
2062 {
2063 case AMD64_SEL_TYPE_SYS_INT_GATE:
2064 fEflToClear |= X86_EFL_IF;
2065 break;
2066 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
2067 break;
2068
2069 default:
2070 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2071 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2072 }
2073
2074 /* Check DPL against CPL if applicable. */
2075 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2076 {
2077 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
2078 {
2079 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
2080 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2081 }
2082 }
2083
2084 /* Is it there? */
2085 if (!Idte.Gate.u1Present)
2086 {
2087 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
2088 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2089 }
2090
2091 /* A null CS is bad. */
2092 RTSEL NewCS = Idte.Gate.u16Sel;
2093 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2094 {
2095 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2096 return iemRaiseGeneralProtectionFault0(pVCpu);
2097 }
2098
2099 /* Fetch the descriptor for the new CS. */
2100 IEMSELDESC DescCS;
2101 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
2102 if (rcStrict != VINF_SUCCESS)
2103 {
2104 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2105 return rcStrict;
2106 }
2107
2108 /* Must be a 64-bit code segment. */
2109 if (!DescCS.Long.Gen.u1DescType)
2110 {
2111 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2112 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2113 }
2114 if ( !DescCS.Long.Gen.u1Long
2115 || DescCS.Long.Gen.u1DefBig
2116 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
2117 {
2118 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
2119 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
2120 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2121 }
2122
2123 /* Don't allow lowering the privilege level. For non-conforming CS
2124 selectors, the CS.DPL sets the privilege level the trap/interrupt
2125 handler runs at. For conforming CS selectors, the CPL remains
2126 unchanged, but the CS.DPL must be <= CPL. */
2127 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
2128 * when CPU in Ring-0. Result \#GP? */
2129 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
2130 {
2131 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2132 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2133 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2134 }
2135
2136
2137 /* Make sure the selector is present. */
2138 if (!DescCS.Legacy.Gen.u1Present)
2139 {
2140 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2141 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
2142 }
2143
2144 /* Check that the new RIP is canonical. */
2145 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
2146 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
2147 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
2148 if (!IEM_IS_CANONICAL(uNewRip))
2149 {
2150 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
2151 return iemRaiseGeneralProtectionFault0(pVCpu);
2152 }
2153
2154 /*
2155 * If the privilege level changes or if the IST isn't zero, we need to get
2156 * a new stack from the TSS.
2157 */
2158 uint64_t uNewRsp;
2159 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2160 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
2161 if ( uNewCpl != IEM_GET_CPL(pVCpu)
2162 || Idte.Gate.u3IST != 0)
2163 {
2164 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
2165 if (rcStrict != VINF_SUCCESS)
2166 return rcStrict;
2167 }
2168 else
2169 uNewRsp = pVCpu->cpum.GstCtx.rsp;
2170 uNewRsp &= ~(uint64_t)0xf;
2171
2172 /*
2173 * Calc the flag image to push.
2174 */
2175 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2176 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
2177 fEfl &= ~X86_EFL_RF;
2178 else
2179 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
2180
2181 /*
2182 * Start making changes.
2183 */
2184 /* Set the new CPL so that stack accesses use it. */
2185 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
2186 IEM_SET_CPL(pVCpu, uNewCpl);
2187/** @todo Setting CPL this early seems wrong as it would affect and errors we
2188 * raise accessing the stack and (?) GDT/LDT... */
2189
2190 /* Create the stack frame. */
2191 uint8_t bUnmapInfoStackFrame;
2192 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
2193 RTPTRUNION uStackFrame;
2194 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
2195 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
2196 if (rcStrict != VINF_SUCCESS)
2197 return rcStrict;
2198
2199 if (fFlags & IEM_XCPT_FLAGS_ERR)
2200 *uStackFrame.pu64++ = uErr;
2201 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
2202 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
2203 uStackFrame.pu64[2] = fEfl;
2204 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
2205 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
2206 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
2207 if (rcStrict != VINF_SUCCESS)
2208 return rcStrict;
2209
2210 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
2211 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2212 * after pushing the stack frame? (Write protect the gdt + stack to
2213 * find out.) */
2214 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2215 {
2216 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
2217 if (rcStrict != VINF_SUCCESS)
2218 return rcStrict;
2219 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2220 }
2221
2222 /*
2223 * Start comitting the register changes.
2224 */
2225 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
2226 * hidden registers when interrupting 32-bit or 16-bit code! */
2227 if (uNewCpl != uOldCpl)
2228 {
2229 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
2230 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
2231 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2232 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
2233 pVCpu->cpum.GstCtx.ss.u64Base = 0;
2234 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
2235 }
2236 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
2237 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2238 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2239 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2240 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
2241 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2242 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2243 pVCpu->cpum.GstCtx.rip = uNewRip;
2244
2245 fEfl &= ~fEflToClear;
2246 IEMMISC_SET_EFL(pVCpu, fEfl);
2247
2248 if (fFlags & IEM_XCPT_FLAGS_CR2)
2249 pVCpu->cpum.GstCtx.cr2 = uCr2;
2250
2251 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2252 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2253
2254 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
2255
2256 /*
2257 * Deal with debug events that follows the exception and clear inhibit flags.
2258 */
2259 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2260 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2261 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2262 else
2263 {
2264 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
2265 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2266 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2267 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2268 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2269 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2270 return iemRaiseDebugException(pVCpu);
2271 }
2272
2273 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2274}
2275
2276
2277/**
2278 * Implements exceptions and interrupts.
2279 *
2280 * All exceptions and interrupts goes thru this function!
2281 *
2282 * @returns VBox strict status code.
2283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2284 * @param cbInstr The number of bytes to offset rIP by in the return
2285 * address.
2286 * @param u8Vector The interrupt / exception vector number.
2287 * @param fFlags The flags.
2288 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2289 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2290 */
2291VBOXSTRICTRC
2292iemRaiseXcptOrInt(PVMCPUCC pVCpu,
2293 uint8_t cbInstr,
2294 uint8_t u8Vector,
2295 uint32_t fFlags,
2296 uint16_t uErr,
2297 uint64_t uCr2) RT_NOEXCEPT
2298{
2299 /*
2300 * Get all the state that we might need here.
2301 */
2302 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2303 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2304
2305#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
2306 /*
2307 * Flush prefetch buffer
2308 */
2309 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2310#endif
2311
2312 /*
2313 * Perform the V8086 IOPL check and upgrade the fault without nesting.
2314 */
2315 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
2316 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
2317 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
2318 | IEM_XCPT_FLAGS_BP_INSTR
2319 | IEM_XCPT_FLAGS_ICEBP_INSTR
2320 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2321 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
2322 {
2323 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
2324 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
2325 u8Vector = X86_XCPT_GP;
2326 uErr = 0;
2327 }
2328
2329 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
2330#ifdef DBGFTRACE_ENABLED
2331 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
2332 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
2333 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
2334#endif
2335
2336 /*
2337 * Check if DBGF wants to intercept the exception.
2338 */
2339 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
2340 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
2341 { /* likely */ }
2342 else
2343 {
2344 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
2345 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
2346 if (rcStrict != VINF_SUCCESS)
2347 return rcStrict;
2348 }
2349
2350 /*
2351 * Evaluate whether NMI blocking should be in effect.
2352 * Normally, NMI blocking is in effect whenever we inject an NMI.
2353 */
2354 bool fBlockNmi = u8Vector == X86_XCPT_NMI
2355 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
2356
2357#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2358 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2359 {
2360 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
2361 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2362 return rcStrict0;
2363
2364 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
2365 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
2366 {
2367 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
2368 fBlockNmi = false;
2369 }
2370 }
2371#endif
2372
2373#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2374 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
2375 {
2376 /*
2377 * If the event is being injected as part of VMRUN, it isn't subject to event
2378 * intercepts in the nested-guest. However, secondary exceptions that occur
2379 * during injection of any event -are- subject to exception intercepts.
2380 *
2381 * See AMD spec. 15.20 "Event Injection".
2382 */
2383 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
2384 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
2385 else
2386 {
2387 /*
2388 * Check and handle if the event being raised is intercepted.
2389 */
2390 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2391 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
2392 return rcStrict0;
2393 }
2394 }
2395#endif
2396
2397 /*
2398 * Set NMI blocking if necessary.
2399 */
2400 if (fBlockNmi)
2401 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
2402
2403 /*
2404 * Do recursion accounting.
2405 */
2406 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
2407 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
2408 if (pVCpu->iem.s.cXcptRecursions == 0)
2409 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2410 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
2411 else
2412 {
2413 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2414 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
2415 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
2416
2417 if (pVCpu->iem.s.cXcptRecursions >= 4)
2418 {
2419#ifdef DEBUG_bird
2420 AssertFailed();
2421#endif
2422 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2423 }
2424
2425 /*
2426 * Evaluate the sequence of recurring events.
2427 */
2428 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
2429 NULL /* pXcptRaiseInfo */);
2430 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
2431 { /* likely */ }
2432 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
2433 {
2434 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
2435 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
2436 u8Vector = X86_XCPT_DF;
2437 uErr = 0;
2438#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2439 /* VMX nested-guest #DF intercept needs to be checked here. */
2440 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2441 {
2442 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
2443 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2444 return rcStrict0;
2445 }
2446#endif
2447 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
2448 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
2449 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2450 }
2451 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
2452 {
2453 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
2454 return iemInitiateCpuShutdown(pVCpu);
2455 }
2456 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
2457 {
2458 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
2459 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
2460 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
2461 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
2462 return VERR_EM_GUEST_CPU_HANG;
2463 }
2464 else
2465 {
2466 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
2467 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
2468 return VERR_IEM_IPE_9;
2469 }
2470
2471 /*
2472 * The 'EXT' bit is set when an exception occurs during deliver of an external
2473 * event (such as an interrupt or earlier exception)[1]. Privileged software
2474 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
2475 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
2476 *
2477 * [1] - Intel spec. 6.13 "Error Code"
2478 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
2479 * [3] - Intel Instruction reference for INT n.
2480 */
2481 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
2482 && (fFlags & IEM_XCPT_FLAGS_ERR)
2483 && u8Vector != X86_XCPT_PF
2484 && u8Vector != X86_XCPT_DF)
2485 {
2486 uErr |= X86_TRAP_ERR_EXTERNAL;
2487 }
2488 }
2489
2490 pVCpu->iem.s.cXcptRecursions++;
2491 pVCpu->iem.s.uCurXcpt = u8Vector;
2492 pVCpu->iem.s.fCurXcpt = fFlags;
2493 pVCpu->iem.s.uCurXcptErr = uErr;
2494 pVCpu->iem.s.uCurXcptCr2 = uCr2;
2495
2496 /*
2497 * Extensive logging.
2498 */
2499#if defined(LOG_ENABLED) && defined(IN_RING3)
2500 if (LogIs3Enabled())
2501 {
2502 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
2503 char szRegs[4096];
2504 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2505 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2506 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2507 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2508 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2509 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2510 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2511 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2512 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2513 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2514 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2515 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2516 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2517 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2518 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2519 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2520 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2521 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2522 " efer=%016VR{efer}\n"
2523 " pat=%016VR{pat}\n"
2524 " sf_mask=%016VR{sf_mask}\n"
2525 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2526 " lstar=%016VR{lstar}\n"
2527 " star=%016VR{star} cstar=%016VR{cstar}\n"
2528 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2529 );
2530
2531 char szInstr[256];
2532 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
2533 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2534 szInstr, sizeof(szInstr), NULL);
2535 Log3(("%s%s\n", szRegs, szInstr));
2536 }
2537#endif /* LOG_ENABLED */
2538
2539 /*
2540 * Stats.
2541 */
2542 uint64_t const uTimestamp = ASMReadTSC();
2543 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
2544 {
2545 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
2546 EMHistoryAddExit(pVCpu,
2547 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
2548 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
2549 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
2550 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
2551 IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);
2552 }
2553 else
2554 {
2555 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
2556 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
2557 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
2558 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
2559 if (fFlags & IEM_XCPT_FLAGS_ERR)
2560 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
2561 if (fFlags & IEM_XCPT_FLAGS_CR2)
2562 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
2563 IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);
2564 }
2565
2566 /*
2567 * Hack alert! Convert incoming debug events to slient on Intel.
2568 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
2569 */
2570 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2571 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2572 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
2573 { /* ignore */ }
2574 else
2575 {
2576 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
2577 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
2578 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
2579 | CPUMCTX_DBG_HIT_DRX_SILENT;
2580 }
2581
2582 /*
2583 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
2584 * to ensure that a stale TLB or paging cache entry will only cause one
2585 * spurious #PF.
2586 */
2587 if ( u8Vector == X86_XCPT_PF
2588 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
2589 IEMTlbInvalidatePage(pVCpu, uCr2);
2590
2591 /*
2592 * Call the mode specific worker function.
2593 */
2594 VBOXSTRICTRC rcStrict;
2595 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
2596 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2597 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
2598 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2599 else
2600 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2601
2602 /* Flush the prefetch buffer. */
2603 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
2604
2605 /*
2606 * Unwind.
2607 */
2608 pVCpu->iem.s.cXcptRecursions--;
2609 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
2610 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
2611 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
2612 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
2613 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
2614 return rcStrict;
2615}
2616
2617#ifdef IEM_WITH_SETJMP
2618/**
2619 * See iemRaiseXcptOrInt. Will not return.
2620 */
2621DECL_NO_RETURN(void)
2622iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
2623 uint8_t cbInstr,
2624 uint8_t u8Vector,
2625 uint32_t fFlags,
2626 uint16_t uErr,
2627 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
2628{
2629 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2630 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
2631}
2632#endif
2633
2634
2635/** \#DE - 00. */
2636VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
2637{
2638 if (GCMIsInterceptingXcptDE(pVCpu))
2639 {
2640 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
2641 if (rc == VINF_SUCCESS)
2642 {
2643 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
2644 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
2645 }
2646 }
2647 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2648}
2649
2650
2651#ifdef IEM_WITH_SETJMP
2652/** \#DE - 00. */
2653DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2654{
2655 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2656}
2657#endif
2658
2659
2660/** \#DB - 01.
2661 * @note This automatically clear DR7.GD. */
2662VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
2663{
2664 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
2665 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2666 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
2667}
2668
2669
2670/** \#BR - 05. */
2671VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
2672{
2673 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2674}
2675
2676
2677/** \#UD - 06. */
2678VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
2679{
2680 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2681}
2682
2683
2684#ifdef IEM_WITH_SETJMP
2685/** \#UD - 06. */
2686DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2687{
2688 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2689}
2690#endif
2691
2692
2693/** \#NM - 07. */
2694VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
2695{
2696 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2697}
2698
2699
2700#ifdef IEM_WITH_SETJMP
2701/** \#NM - 07. */
2702DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2703{
2704 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2705}
2706#endif
2707
2708
2709/** \#TS(err) - 0a. */
2710VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2711{
2712 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2713}
2714
2715
2716/** \#TS(tr) - 0a. */
2717VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
2718{
2719 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2720 pVCpu->cpum.GstCtx.tr.Sel, 0);
2721}
2722
2723
2724/** \#TS(0) - 0a. */
2725VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
2726{
2727 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2728 0, 0);
2729}
2730
2731
2732/** \#TS(err) - 0a. */
2733VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
2734{
2735 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2736 uSel & X86_SEL_MASK_OFF_RPL, 0);
2737}
2738
2739
2740/** \#NP(err) - 0b. */
2741VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2742{
2743 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2744}
2745
2746
2747/** \#NP(sel) - 0b. */
2748VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
2749{
2750 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
2751 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
2752 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2753 uSel & ~X86_SEL_RPL, 0);
2754}
2755
2756
2757/** \#SS(seg) - 0c. */
2758VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
2759{
2760 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
2761 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
2762 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2763 uSel & ~X86_SEL_RPL, 0);
2764}
2765
2766
2767/** \#SS(err) - 0c. */
2768VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2769{
2770 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
2771 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
2772 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2773}
2774
2775
2776/** \#GP(n) - 0d. */
2777VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2778{
2779 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
2780 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2781}
2782
2783
2784/** \#GP(0) - 0d. */
2785VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
2786{
2787 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2788 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2789}
2790
2791#ifdef IEM_WITH_SETJMP
2792/** \#GP(0) - 0d. */
2793DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2794{
2795 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2796 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2797}
2798#endif
2799
2800
2801/** \#GP(sel) - 0d. */
2802VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
2803{
2804 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
2805 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
2806 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2807 Sel & ~X86_SEL_RPL, 0);
2808}
2809
2810
2811/** \#GP(0) - 0d. */
2812VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
2813{
2814 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2815 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2816}
2817
2818
2819/** \#GP(sel) - 0d. */
2820VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
2821{
2822 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
2823 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
2824 NOREF(iSegReg); NOREF(fAccess);
2825 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2826 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2827}
2828
2829#ifdef IEM_WITH_SETJMP
2830/** \#GP(sel) - 0d, longjmp. */
2831DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
2832{
2833 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
2834 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
2835 NOREF(iSegReg); NOREF(fAccess);
2836 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2837 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2838}
2839#endif
2840
2841/** \#GP(sel) - 0d. */
2842VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
2843{
2844 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
2845 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
2846 NOREF(Sel);
2847 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2848}
2849
2850#ifdef IEM_WITH_SETJMP
2851/** \#GP(sel) - 0d, longjmp. */
2852DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
2853{
2854 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
2855 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
2856 NOREF(Sel);
2857 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2858}
2859#endif
2860
2861
2862/** \#GP(sel) - 0d. */
2863VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
2864{
2865 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
2866 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
2867 NOREF(iSegReg); NOREF(fAccess);
2868 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2869}
2870
2871#ifdef IEM_WITH_SETJMP
2872/** \#GP(sel) - 0d, longjmp. */
2873DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
2874{
2875 NOREF(iSegReg); NOREF(fAccess);
2876 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2877}
2878#endif
2879
2880
2881/** \#PF(n) - 0e. */
2882VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
2883{
2884 uint16_t uErr;
2885 switch (rc)
2886 {
2887 case VERR_PAGE_NOT_PRESENT:
2888 case VERR_PAGE_TABLE_NOT_PRESENT:
2889 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2890 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2891 uErr = 0;
2892 break;
2893
2894 case VERR_RESERVED_PAGE_TABLE_BITS:
2895 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
2896 break;
2897
2898 default:
2899 AssertMsgFailed(("%Rrc\n", rc));
2900 RT_FALL_THRU();
2901 case VERR_ACCESS_DENIED:
2902 uErr = X86_TRAP_PF_P;
2903 break;
2904 }
2905
2906 if (IEM_GET_CPL(pVCpu) == 3)
2907 uErr |= X86_TRAP_PF_US;
2908
2909 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2910 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
2911 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
2912 uErr |= X86_TRAP_PF_ID;
2913
2914#if 0 /* This is so much non-sense, really. Why was it done like that? */
2915 /* Note! RW access callers reporting a WRITE protection fault, will clear
2916 the READ flag before calling. So, read-modify-write accesses (RW)
2917 can safely be reported as READ faults. */
2918 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2919 uErr |= X86_TRAP_PF_RW;
2920#else
2921 if (fAccess & IEM_ACCESS_TYPE_WRITE)
2922 {
2923 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
2924 /// (regardless of outcome of the comparison in the latter case).
2925 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
2926 uErr |= X86_TRAP_PF_RW;
2927 }
2928#endif
2929
2930 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
2931 of the memory operand rather than at the start of it. (Not sure what
2932 happens if it crosses a page boundrary.) The current heuristics for
2933 this is to report the #PF for the last byte if the access is more than
2934 64 bytes. This is probably not correct, but we can work that out later,
2935 main objective now is to get FXSAVE to work like for real hardware and
2936 make bs3-cpu-basic2 work. */
2937 if (cbAccess <= 64)
2938 { /* likely*/ }
2939 else
2940 GCPtrWhere += cbAccess - 1;
2941
2942 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2943 uErr, GCPtrWhere);
2944}
2945
2946#ifdef IEM_WITH_SETJMP
2947/** \#PF(n) - 0e, longjmp. */
2948DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
2949 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
2950{
2951 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
2952}
2953#endif
2954
2955
2956/** \#MF(0) - 10. */
2957VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
2958{
2959 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
2960 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2961
2962 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
2963 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
2964 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
2965}
2966
2967#ifdef IEM_WITH_SETJMP
2968/** \#MF(0) - 10, longjmp. */
2969DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2970{
2971 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
2972}
2973#endif
2974
2975
2976/** \#AC(0) - 11. */
2977VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
2978{
2979 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2980}
2981
2982#ifdef IEM_WITH_SETJMP
2983/** \#AC(0) - 11, longjmp. */
2984DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2985{
2986 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
2987}
2988#endif
2989
2990
2991/** \#XF(0)/\#XM(0) - 19. */
2992VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
2993{
2994 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2995}
2996
2997
2998#ifdef IEM_WITH_SETJMP
2999/** \#XF(0)/\#XM(0) - 19s, longjmp. */
3000DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
3001{
3002 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
3003}
3004#endif
3005
3006
3007/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
3008IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
3009{
3010 NOREF(cbInstr);
3011 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3012}
3013
3014
3015/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
3016IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
3017{
3018 NOREF(cbInstr);
3019 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3020}
3021
3022
3023/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
3024IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
3025{
3026 NOREF(cbInstr);
3027 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3028}
3029
3030
3031/**
3032 * Checks if IEM is in the process of delivering an event (interrupt or
3033 * exception).
3034 *
3035 * @returns true if we're in the process of raising an interrupt or exception,
3036 * false otherwise.
3037 * @param pVCpu The cross context virtual CPU structure.
3038 * @param puVector Where to store the vector associated with the
3039 * currently delivered event, optional.
3040 * @param pfFlags Where to store th event delivery flags (see
3041 * IEM_XCPT_FLAGS_XXX), optional.
3042 * @param puErr Where to store the error code associated with the
3043 * event, optional.
3044 * @param puCr2 Where to store the CR2 associated with the event,
3045 * optional.
3046 * @remarks The caller should check the flags to determine if the error code and
3047 * CR2 are valid for the event.
3048 */
3049VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
3050{
3051 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
3052 if (fRaisingXcpt)
3053 {
3054 if (puVector)
3055 *puVector = pVCpu->iem.s.uCurXcpt;
3056 if (pfFlags)
3057 *pfFlags = pVCpu->iem.s.fCurXcpt;
3058 if (puErr)
3059 *puErr = pVCpu->iem.s.uCurXcptErr;
3060 if (puCr2)
3061 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
3062 }
3063 return fRaisingXcpt;
3064}
3065
3066/** @} */
3067
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette