VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 47396

Last change on this file since 47396 was 47379, checked in by vboxsync, 12 years ago

IEM: syscall and sysret.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 179.1 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 47379 2013-07-24 17:21:12Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 X86EFLAGS Efl;
38 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
39 if ( (pCtx->cr0 & X86_CR0_PE)
40 && ( pIemCpu->uCpl > Efl.Bits.u2IOPL
41 || Efl.Bits.u1VM) )
42 {
43 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
44 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n"));
45 }
46 return VINF_SUCCESS;
47}
48
49
50#if 0
51/**
52 * Calculates the parity bit.
53 *
54 * @returns true if the bit is set, false if not.
55 * @param u8Result The least significant byte of the result.
56 */
57static bool iemHlpCalcParityFlag(uint8_t u8Result)
58{
59 /*
60 * Parity is set if the number of bits in the least significant byte of
61 * the result is even.
62 */
63 uint8_t cBits;
64 cBits = u8Result & 1; /* 0 */
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1;
71 u8Result >>= 1;
72 cBits += u8Result & 1; /* 4 */
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 u8Result >>= 1;
78 cBits += u8Result & 1;
79 return !(cBits & 1);
80}
81#endif /* not used */
82
83
84/**
85 * Updates the specified flags according to a 8-bit result.
86 *
87 * @param pIemCpu The IEM state of the calling EMT.
88 * @param u8Result The result to set the flags according to.
89 * @param fToUpdate The flags to update.
90 * @param fUndefined The flags that are specified as undefined.
91 */
92static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
93{
94 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
95
96 uint32_t fEFlags = pCtx->eflags.u;
97 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
98 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
99 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
100}
101
102
103/**
104 * Loads a NULL data selector into a selector register, both the hidden and
105 * visible parts, in protected mode.
106 *
107 * @param pSReg Pointer to the segment register.
108 * @param uRpl The RPL.
109 */
110static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg, RTSEL uRpl)
111{
112 /** @todo Testcase: write a testcase checking what happends when loading a NULL
113 * data selector in protected mode. */
114 pSReg->Sel = uRpl;
115 pSReg->ValidSel = uRpl;
116 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
117 pSReg->u64Base = 0;
118 pSReg->u32Limit = 0;
119 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
120}
121
122
123/**
124 * Helper used by iret.
125 *
126 * @param uCpl The new CPL.
127 * @param pSReg Pointer to the segment register.
128 */
129static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
130{
131#ifdef VBOX_WITH_RAW_MODE_NOT_R0
132 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
133 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
134#else
135 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
136#endif
137
138 if ( uCpl > pSReg->Attr.n.u2Dpl
139 && pSReg->Attr.n.u1DescType /* code or data, not system */
140 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
141 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
142 iemHlpLoadNullDataSelectorProt(pSReg, 0);
143}
144
145
146/**
147 * Indicates that we have modified the FPU state.
148 *
149 * @param pIemCpu The IEM state of the calling EMT.
150 */
151DECLINLINE(void) iemHlpUsedFpu(PIEMCPU pIemCpu)
152{
153 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
154}
155
156/** @} */
157
158/** @name C Implementations
159 * @{
160 */
161
162/**
163 * Implements a 16-bit popa.
164 */
165IEM_CIMPL_DEF_0(iemCImpl_popa_16)
166{
167 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
168 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
169 RTGCPTR GCPtrLast = GCPtrStart + 15;
170 VBOXSTRICTRC rcStrict;
171
172 /*
173 * The docs are a bit hard to comprehend here, but it looks like we wrap
174 * around in real mode as long as none of the individual "popa" crosses the
175 * end of the stack segment. In protected mode we check the whole access
176 * in one go. For efficiency, only do the word-by-word thing if we're in
177 * danger of wrapping around.
178 */
179 /** @todo do popa boundary / wrap-around checks. */
180 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
181 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
182 {
183 /* word-by-word */
184 RTUINT64U TmpRsp;
185 TmpRsp.u = pCtx->rsp;
186 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
187 if (rcStrict == VINF_SUCCESS)
188 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
189 if (rcStrict == VINF_SUCCESS)
190 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
191 if (rcStrict == VINF_SUCCESS)
192 {
193 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
194 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
195 }
196 if (rcStrict == VINF_SUCCESS)
197 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
198 if (rcStrict == VINF_SUCCESS)
199 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
200 if (rcStrict == VINF_SUCCESS)
201 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
202 if (rcStrict == VINF_SUCCESS)
203 {
204 pCtx->rsp = TmpRsp.u;
205 iemRegAddToRip(pIemCpu, cbInstr);
206 }
207 }
208 else
209 {
210 uint16_t const *pa16Mem = NULL;
211 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
212 if (rcStrict == VINF_SUCCESS)
213 {
214 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
215 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
216 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
217 /* skip sp */
218 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
219 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
220 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
221 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
222 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
223 if (rcStrict == VINF_SUCCESS)
224 {
225 iemRegAddToRsp(pIemCpu, pCtx, 16);
226 iemRegAddToRip(pIemCpu, cbInstr);
227 }
228 }
229 }
230 return rcStrict;
231}
232
233
234/**
235 * Implements a 32-bit popa.
236 */
237IEM_CIMPL_DEF_0(iemCImpl_popa_32)
238{
239 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
240 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
241 RTGCPTR GCPtrLast = GCPtrStart + 31;
242 VBOXSTRICTRC rcStrict;
243
244 /*
245 * The docs are a bit hard to comprehend here, but it looks like we wrap
246 * around in real mode as long as none of the individual "popa" crosses the
247 * end of the stack segment. In protected mode we check the whole access
248 * in one go. For efficiency, only do the word-by-word thing if we're in
249 * danger of wrapping around.
250 */
251 /** @todo do popa boundary / wrap-around checks. */
252 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
253 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
254 {
255 /* word-by-word */
256 RTUINT64U TmpRsp;
257 TmpRsp.u = pCtx->rsp;
258 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
261 if (rcStrict == VINF_SUCCESS)
262 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
263 if (rcStrict == VINF_SUCCESS)
264 {
265 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
266 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
267 }
268 if (rcStrict == VINF_SUCCESS)
269 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
270 if (rcStrict == VINF_SUCCESS)
271 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
272 if (rcStrict == VINF_SUCCESS)
273 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
274 if (rcStrict == VINF_SUCCESS)
275 {
276#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
277 pCtx->rdi &= UINT32_MAX;
278 pCtx->rsi &= UINT32_MAX;
279 pCtx->rbp &= UINT32_MAX;
280 pCtx->rbx &= UINT32_MAX;
281 pCtx->rdx &= UINT32_MAX;
282 pCtx->rcx &= UINT32_MAX;
283 pCtx->rax &= UINT32_MAX;
284#endif
285 pCtx->rsp = TmpRsp.u;
286 iemRegAddToRip(pIemCpu, cbInstr);
287 }
288 }
289 else
290 {
291 uint32_t const *pa32Mem;
292 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
293 if (rcStrict == VINF_SUCCESS)
294 {
295 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
296 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
297 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
298 /* skip esp */
299 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
300 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
301 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
302 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
303 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
304 if (rcStrict == VINF_SUCCESS)
305 {
306 iemRegAddToRsp(pIemCpu, pCtx, 32);
307 iemRegAddToRip(pIemCpu, cbInstr);
308 }
309 }
310 }
311 return rcStrict;
312}
313
314
315/**
316 * Implements a 16-bit pusha.
317 */
318IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
319{
320 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
321 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
322 RTGCPTR GCPtrBottom = GCPtrTop - 15;
323 VBOXSTRICTRC rcStrict;
324
325 /*
326 * The docs are a bit hard to comprehend here, but it looks like we wrap
327 * around in real mode as long as none of the individual "pushd" crosses the
328 * end of the stack segment. In protected mode we check the whole access
329 * in one go. For efficiency, only do the word-by-word thing if we're in
330 * danger of wrapping around.
331 */
332 /** @todo do pusha boundary / wrap-around checks. */
333 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
334 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
335 {
336 /* word-by-word */
337 RTUINT64U TmpRsp;
338 TmpRsp.u = pCtx->rsp;
339 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
342 if (rcStrict == VINF_SUCCESS)
343 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
344 if (rcStrict == VINF_SUCCESS)
345 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
346 if (rcStrict == VINF_SUCCESS)
347 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
348 if (rcStrict == VINF_SUCCESS)
349 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
350 if (rcStrict == VINF_SUCCESS)
351 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
352 if (rcStrict == VINF_SUCCESS)
353 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
354 if (rcStrict == VINF_SUCCESS)
355 {
356 pCtx->rsp = TmpRsp.u;
357 iemRegAddToRip(pIemCpu, cbInstr);
358 }
359 }
360 else
361 {
362 GCPtrBottom--;
363 uint16_t *pa16Mem = NULL;
364 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
365 if (rcStrict == VINF_SUCCESS)
366 {
367 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
368 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
369 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
370 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
371 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
372 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
373 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
374 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
375 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
376 if (rcStrict == VINF_SUCCESS)
377 {
378 iemRegSubFromRsp(pIemCpu, pCtx, 16);
379 iemRegAddToRip(pIemCpu, cbInstr);
380 }
381 }
382 }
383 return rcStrict;
384}
385
386
387/**
388 * Implements a 32-bit pusha.
389 */
390IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
391{
392 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
393 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
394 RTGCPTR GCPtrBottom = GCPtrTop - 31;
395 VBOXSTRICTRC rcStrict;
396
397 /*
398 * The docs are a bit hard to comprehend here, but it looks like we wrap
399 * around in real mode as long as none of the individual "pusha" crosses the
400 * end of the stack segment. In protected mode we check the whole access
401 * in one go. For efficiency, only do the word-by-word thing if we're in
402 * danger of wrapping around.
403 */
404 /** @todo do pusha boundary / wrap-around checks. */
405 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
406 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
407 {
408 /* word-by-word */
409 RTUINT64U TmpRsp;
410 TmpRsp.u = pCtx->rsp;
411 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
416 if (rcStrict == VINF_SUCCESS)
417 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
418 if (rcStrict == VINF_SUCCESS)
419 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
420 if (rcStrict == VINF_SUCCESS)
421 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
422 if (rcStrict == VINF_SUCCESS)
423 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
424 if (rcStrict == VINF_SUCCESS)
425 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
426 if (rcStrict == VINF_SUCCESS)
427 {
428 pCtx->rsp = TmpRsp.u;
429 iemRegAddToRip(pIemCpu, cbInstr);
430 }
431 }
432 else
433 {
434 GCPtrBottom--;
435 uint32_t *pa32Mem;
436 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
437 if (rcStrict == VINF_SUCCESS)
438 {
439 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
440 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
441 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
442 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
443 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
444 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
445 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
446 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
447 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
448 if (rcStrict == VINF_SUCCESS)
449 {
450 iemRegSubFromRsp(pIemCpu, pCtx, 32);
451 iemRegAddToRip(pIemCpu, cbInstr);
452 }
453 }
454 }
455 return rcStrict;
456}
457
458
459/**
460 * Implements pushf.
461 *
462 *
463 * @param enmEffOpSize The effective operand size.
464 */
465IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
466{
467 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
468
469 /*
470 * If we're in V8086 mode some care is required (which is why we're in
471 * doing this in a C implementation).
472 */
473 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
474 if ( (fEfl & X86_EFL_VM)
475 && X86_EFL_GET_IOPL(fEfl) != 3 )
476 {
477 Assert(pCtx->cr0 & X86_CR0_PE);
478 if ( enmEffOpSize != IEMMODE_16BIT
479 || !(pCtx->cr4 & X86_CR4_VME))
480 return iemRaiseGeneralProtectionFault0(pIemCpu);
481 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
482 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
483 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
484 }
485
486 /*
487 * Ok, clear RF and VM and push the flags.
488 */
489 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
490
491 VBOXSTRICTRC rcStrict;
492 switch (enmEffOpSize)
493 {
494 case IEMMODE_16BIT:
495 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
496 break;
497 case IEMMODE_32BIT:
498 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
499 break;
500 case IEMMODE_64BIT:
501 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
502 break;
503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
504 }
505 if (rcStrict != VINF_SUCCESS)
506 return rcStrict;
507
508 iemRegAddToRip(pIemCpu, cbInstr);
509 return VINF_SUCCESS;
510}
511
512
513/**
514 * Implements popf.
515 *
516 * @param enmEffOpSize The effective operand size.
517 */
518IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
519{
520 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
521 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
522 uint32_t const fEflOld = IEMMISC_GET_EFL(pIemCpu, pCtx);
523 VBOXSTRICTRC rcStrict;
524 uint32_t fEflNew;
525
526 /*
527 * V8086 is special as usual.
528 */
529 if (fEflOld & X86_EFL_VM)
530 {
531 /*
532 * Almost anything goes if IOPL is 3.
533 */
534 if (X86_EFL_GET_IOPL(fEflOld) == 3)
535 {
536 switch (enmEffOpSize)
537 {
538 case IEMMODE_16BIT:
539 {
540 uint16_t u16Value;
541 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
542 if (rcStrict != VINF_SUCCESS)
543 return rcStrict;
544 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
545 break;
546 }
547 case IEMMODE_32BIT:
548 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
549 if (rcStrict != VINF_SUCCESS)
550 return rcStrict;
551 break;
552 IEM_NOT_REACHED_DEFAULT_CASE_RET();
553 }
554
555 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
556 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
557 }
558 /*
559 * Interrupt flag virtualization with CR4.VME=1.
560 */
561 else if ( enmEffOpSize == IEMMODE_16BIT
562 && (pCtx->cr4 & X86_CR4_VME) )
563 {
564 uint16_t u16Value;
565 RTUINT64U TmpRsp;
566 TmpRsp.u = pCtx->rsp;
567 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
568 if (rcStrict != VINF_SUCCESS)
569 return rcStrict;
570
571 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
572 * or before? */
573 if ( ( (u16Value & X86_EFL_IF)
574 && (fEflOld & X86_EFL_VIP))
575 || (u16Value & X86_EFL_TF) )
576 return iemRaiseGeneralProtectionFault0(pIemCpu);
577
578 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
579 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
580 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
581 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
582
583 pCtx->rsp = TmpRsp.u;
584 }
585 else
586 return iemRaiseGeneralProtectionFault0(pIemCpu);
587
588 }
589 /*
590 * Not in V8086 mode.
591 */
592 else
593 {
594 /* Pop the flags. */
595 switch (enmEffOpSize)
596 {
597 case IEMMODE_16BIT:
598 {
599 uint16_t u16Value;
600 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
601 if (rcStrict != VINF_SUCCESS)
602 return rcStrict;
603 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
604 break;
605 }
606 case IEMMODE_32BIT:
607 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
608 if (rcStrict != VINF_SUCCESS)
609 return rcStrict;
610 break;
611 case IEMMODE_64BIT:
612 {
613 uint64_t u64Value;
614 rcStrict = iemMemStackPopU64(pIemCpu, &u64Value);
615 if (rcStrict != VINF_SUCCESS)
616 return rcStrict;
617 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
618 break;
619 }
620 IEM_NOT_REACHED_DEFAULT_CASE_RET();
621 }
622
623 /* Merge them with the current flags. */
624 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
625 || pIemCpu->uCpl == 0)
626 {
627 fEflNew &= X86_EFL_POPF_BITS;
628 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
629 }
630 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
631 {
632 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
633 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
634 }
635 else
636 {
637 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
638 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
639 }
640 }
641
642 /*
643 * Commit the flags.
644 */
645 Assert(fEflNew & RT_BIT_32(1));
646 IEMMISC_SET_EFL(pIemCpu, pCtx, fEflNew);
647 iemRegAddToRip(pIemCpu, cbInstr);
648
649 return VINF_SUCCESS;
650}
651
652
653/**
654 * Implements an indirect call.
655 *
656 * @param uNewPC The new program counter (RIP) value (loaded from the
657 * operand).
658 * @param enmEffOpSize The effective operand size.
659 */
660IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
661{
662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
663 uint16_t uOldPC = pCtx->ip + cbInstr;
664 if (uNewPC > pCtx->cs.u32Limit)
665 return iemRaiseGeneralProtectionFault0(pIemCpu);
666
667 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
668 if (rcStrict != VINF_SUCCESS)
669 return rcStrict;
670
671 pCtx->rip = uNewPC;
672 return VINF_SUCCESS;
673
674}
675
676
677/**
678 * Implements a 16-bit relative call.
679 *
680 * @param offDisp The displacment offset.
681 */
682IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
683{
684 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
685 uint16_t uOldPC = pCtx->ip + cbInstr;
686 uint16_t uNewPC = uOldPC + offDisp;
687 if (uNewPC > pCtx->cs.u32Limit)
688 return iemRaiseGeneralProtectionFault0(pIemCpu);
689
690 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
691 if (rcStrict != VINF_SUCCESS)
692 return rcStrict;
693
694 pCtx->rip = uNewPC;
695 return VINF_SUCCESS;
696}
697
698
699/**
700 * Implements a 32-bit indirect call.
701 *
702 * @param uNewPC The new program counter (RIP) value (loaded from the
703 * operand).
704 * @param enmEffOpSize The effective operand size.
705 */
706IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
707{
708 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
709 uint32_t uOldPC = pCtx->eip + cbInstr;
710 if (uNewPC > pCtx->cs.u32Limit)
711 return iemRaiseGeneralProtectionFault0(pIemCpu);
712
713 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
714 if (rcStrict != VINF_SUCCESS)
715 return rcStrict;
716
717 pCtx->rip = uNewPC;
718 return VINF_SUCCESS;
719
720}
721
722
723/**
724 * Implements a 32-bit relative call.
725 *
726 * @param offDisp The displacment offset.
727 */
728IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
729{
730 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
731 uint32_t uOldPC = pCtx->eip + cbInstr;
732 uint32_t uNewPC = uOldPC + offDisp;
733 if (uNewPC > pCtx->cs.u32Limit)
734 return iemRaiseGeneralProtectionFault0(pIemCpu);
735
736 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
737 if (rcStrict != VINF_SUCCESS)
738 return rcStrict;
739
740 pCtx->rip = uNewPC;
741 return VINF_SUCCESS;
742}
743
744
745/**
746 * Implements a 64-bit indirect call.
747 *
748 * @param uNewPC The new program counter (RIP) value (loaded from the
749 * operand).
750 * @param enmEffOpSize The effective operand size.
751 */
752IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
753{
754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
755 uint64_t uOldPC = pCtx->rip + cbInstr;
756 if (!IEM_IS_CANONICAL(uNewPC))
757 return iemRaiseGeneralProtectionFault0(pIemCpu);
758
759 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
760 if (rcStrict != VINF_SUCCESS)
761 return rcStrict;
762
763 pCtx->rip = uNewPC;
764 return VINF_SUCCESS;
765
766}
767
768
769/**
770 * Implements a 64-bit relative call.
771 *
772 * @param offDisp The displacment offset.
773 */
774IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
775{
776 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
777 uint64_t uOldPC = pCtx->rip + cbInstr;
778 uint64_t uNewPC = uOldPC + offDisp;
779 if (!IEM_IS_CANONICAL(uNewPC))
780 return iemRaiseNotCanonical(pIemCpu);
781
782 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
783 if (rcStrict != VINF_SUCCESS)
784 return rcStrict;
785
786 pCtx->rip = uNewPC;
787 return VINF_SUCCESS;
788}
789
790
791/**
792 * Implements far jumps and calls thru task segments (TSS).
793 *
794 * @param uSel The selector.
795 * @param enmBranch The kind of branching we're performing.
796 * @param enmEffOpSize The effective operand size.
797 * @param pDesc The descriptor corrsponding to @a uSel. The type is
798 * call gate.
799 */
800IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
801{
802 /* Call various functions to do the work. */
803 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
804}
805
806
807/**
808 * Implements far jumps and calls thru task gates.
809 *
810 * @param uSel The selector.
811 * @param enmBranch The kind of branching we're performing.
812 * @param enmEffOpSize The effective operand size.
813 * @param pDesc The descriptor corrsponding to @a uSel. The type is
814 * call gate.
815 */
816IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
817{
818 /* Call various functions to do the work. */
819 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
820}
821
822
823/**
824 * Implements far jumps and calls thru call gates.
825 *
826 * @param uSel The selector.
827 * @param enmBranch The kind of branching we're performing.
828 * @param enmEffOpSize The effective operand size.
829 * @param pDesc The descriptor corrsponding to @a uSel. The type is
830 * call gate.
831 */
832IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
833{
834 /* Call various functions to do the work. */
835 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
836}
837
838
839/**
840 * Implements far jumps and calls thru system selectors.
841 *
842 * @param uSel The selector.
843 * @param enmBranch The kind of branching we're performing.
844 * @param enmEffOpSize The effective operand size.
845 * @param pDesc The descriptor corrsponding to @a uSel.
846 */
847IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
848{
849 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
850 Assert((uSel & X86_SEL_MASK_OFF_RPL));
851
852 if (IEM_IS_LONG_MODE(pIemCpu))
853 switch (pDesc->Legacy.Gen.u4Type)
854 {
855 case AMD64_SEL_TYPE_SYS_CALL_GATE:
856 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
857
858 default:
859 case AMD64_SEL_TYPE_SYS_LDT:
860 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
861 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
862 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
863 case AMD64_SEL_TYPE_SYS_INT_GATE:
864 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
865 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
866
867 }
868
869 switch (pDesc->Legacy.Gen.u4Type)
870 {
871 case X86_SEL_TYPE_SYS_286_CALL_GATE:
872 case X86_SEL_TYPE_SYS_386_CALL_GATE:
873 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
874
875 case X86_SEL_TYPE_SYS_TASK_GATE:
876 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
877
878 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
879 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
880 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
881
882 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
883 Log(("branch %04x -> busy 286 TSS\n", uSel));
884 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
885
886 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
887 Log(("branch %04x -> busy 386 TSS\n", uSel));
888 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
889
890 default:
891 case X86_SEL_TYPE_SYS_LDT:
892 case X86_SEL_TYPE_SYS_286_INT_GATE:
893 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
894 case X86_SEL_TYPE_SYS_386_INT_GATE:
895 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
896 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
897 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
898 }
899}
900
901
902/**
903 * Implements far jumps.
904 *
905 * @param uSel The selector.
906 * @param offSeg The segment offset.
907 * @param enmEffOpSize The effective operand size.
908 */
909IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
910{
911 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
912 NOREF(cbInstr);
913 Assert(offSeg <= UINT32_MAX);
914
915 /*
916 * Real mode and V8086 mode are easy. The only snag seems to be that
917 * CS.limit doesn't change and the limit check is done against the current
918 * limit.
919 */
920 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
921 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
922 {
923 if (offSeg > pCtx->cs.u32Limit)
924 return iemRaiseGeneralProtectionFault0(pIemCpu);
925
926 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
927 pCtx->rip = offSeg;
928 else
929 pCtx->rip = offSeg & UINT16_MAX;
930 pCtx->cs.Sel = uSel;
931 pCtx->cs.ValidSel = uSel;
932 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
933 pCtx->cs.u64Base = (uint32_t)uSel << 4;
934 return VINF_SUCCESS;
935 }
936
937 /*
938 * Protected mode. Need to parse the specified descriptor...
939 */
940 if (!(uSel & X86_SEL_MASK_OFF_RPL))
941 {
942 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
943 return iemRaiseGeneralProtectionFault0(pIemCpu);
944 }
945
946 /* Fetch the descriptor. */
947 IEMSELDESC Desc;
948 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
949 if (rcStrict != VINF_SUCCESS)
950 return rcStrict;
951
952 /* Is it there? */
953 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
954 {
955 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
956 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
957 }
958
959 /*
960 * Deal with it according to its type. We do the standard code selectors
961 * here and dispatch the system selectors to worker functions.
962 */
963 if (!Desc.Legacy.Gen.u1DescType)
964 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
965
966 /* Only code segments. */
967 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
968 {
969 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
970 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
971 }
972
973 /* L vs D. */
974 if ( Desc.Legacy.Gen.u1Long
975 && Desc.Legacy.Gen.u1DefBig
976 && IEM_IS_LONG_MODE(pIemCpu))
977 {
978 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
979 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
980 }
981
982 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
983 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
984 {
985 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
986 {
987 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
988 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
989 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
990 }
991 }
992 else
993 {
994 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
995 {
996 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
997 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
998 }
999 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1000 {
1001 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1002 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1003 }
1004 }
1005
1006 /* Chop the high bits if 16-bit (Intel says so). */
1007 if (enmEffOpSize == IEMMODE_16BIT)
1008 offSeg &= UINT16_MAX;
1009
1010 /* Limit check. (Should alternatively check for non-canonical addresses
1011 here, but that is ruled out by offSeg being 32-bit, right?) */
1012 uint64_t u64Base;
1013 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1014 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1015 u64Base = 0;
1016 else
1017 {
1018 if (offSeg > cbLimit)
1019 {
1020 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1021 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1022 }
1023 u64Base = X86DESC_BASE(&Desc.Legacy);
1024 }
1025
1026 /*
1027 * Ok, everything checked out fine. Now set the accessed bit before
1028 * committing the result into CS, CSHID and RIP.
1029 */
1030 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1031 {
1032 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1033 if (rcStrict != VINF_SUCCESS)
1034 return rcStrict;
1035 /** @todo check what VT-x and AMD-V does. */
1036 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1037 }
1038
1039 /* commit */
1040 pCtx->rip = offSeg;
1041 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1042 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1043 pCtx->cs.ValidSel = pCtx->cs.Sel;
1044 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1045 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1046 pCtx->cs.u32Limit = cbLimit;
1047 pCtx->cs.u64Base = u64Base;
1048 /** @todo check if the hidden bits are loaded correctly for 64-bit
1049 * mode. */
1050 return VINF_SUCCESS;
1051}
1052
1053
1054/**
1055 * Implements far calls.
1056 *
1057 * This very similar to iemCImpl_FarJmp.
1058 *
1059 * @param uSel The selector.
1060 * @param offSeg The segment offset.
1061 * @param enmEffOpSize The operand size (in case we need it).
1062 */
1063IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1064{
1065 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1066 VBOXSTRICTRC rcStrict;
1067 uint64_t uNewRsp;
1068 RTPTRUNION uPtrRet;
1069
1070 /*
1071 * Real mode and V8086 mode are easy. The only snag seems to be that
1072 * CS.limit doesn't change and the limit check is done against the current
1073 * limit.
1074 */
1075 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1076 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1077 {
1078 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1079
1080 /* Check stack first - may #SS(0). */
1081 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1082 &uPtrRet.pv, &uNewRsp);
1083 if (rcStrict != VINF_SUCCESS)
1084 return rcStrict;
1085
1086 /* Check the target address range. */
1087 if (offSeg > UINT32_MAX)
1088 return iemRaiseGeneralProtectionFault0(pIemCpu);
1089
1090 /* Everything is fine, push the return address. */
1091 if (enmEffOpSize == IEMMODE_16BIT)
1092 {
1093 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1094 uPtrRet.pu16[1] = pCtx->cs.Sel;
1095 }
1096 else
1097 {
1098 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1099 uPtrRet.pu16[3] = pCtx->cs.Sel;
1100 }
1101 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1102 if (rcStrict != VINF_SUCCESS)
1103 return rcStrict;
1104
1105 /* Branch. */
1106 pCtx->rip = offSeg;
1107 pCtx->cs.Sel = uSel;
1108 pCtx->cs.ValidSel = uSel;
1109 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1110 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1111 return VINF_SUCCESS;
1112 }
1113
1114 /*
1115 * Protected mode. Need to parse the specified descriptor...
1116 */
1117 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1118 {
1119 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1120 return iemRaiseGeneralProtectionFault0(pIemCpu);
1121 }
1122
1123 /* Fetch the descriptor. */
1124 IEMSELDESC Desc;
1125 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1126 if (rcStrict != VINF_SUCCESS)
1127 return rcStrict;
1128
1129 /*
1130 * Deal with it according to its type. We do the standard code selectors
1131 * here and dispatch the system selectors to worker functions.
1132 */
1133 if (!Desc.Legacy.Gen.u1DescType)
1134 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1135
1136 /* Only code segments. */
1137 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1138 {
1139 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1140 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1141 }
1142
1143 /* L vs D. */
1144 if ( Desc.Legacy.Gen.u1Long
1145 && Desc.Legacy.Gen.u1DefBig
1146 && IEM_IS_LONG_MODE(pIemCpu))
1147 {
1148 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1149 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1150 }
1151
1152 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1153 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1154 {
1155 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1156 {
1157 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1158 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1159 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1160 }
1161 }
1162 else
1163 {
1164 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1165 {
1166 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1167 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1168 }
1169 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1170 {
1171 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1172 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1173 }
1174 }
1175
1176 /* Is it there? */
1177 if (!Desc.Legacy.Gen.u1Present)
1178 {
1179 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1180 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1181 }
1182
1183 /* Check stack first - may #SS(0). */
1184 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1185 * 16-bit code cause a two or four byte CS to be pushed? */
1186 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1187 enmEffOpSize == IEMMODE_64BIT ? 8+8
1188 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1189 &uPtrRet.pv, &uNewRsp);
1190 if (rcStrict != VINF_SUCCESS)
1191 return rcStrict;
1192
1193 /* Chop the high bits if 16-bit (Intel says so). */
1194 if (enmEffOpSize == IEMMODE_16BIT)
1195 offSeg &= UINT16_MAX;
1196
1197 /* Limit / canonical check. */
1198 uint64_t u64Base;
1199 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1200 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1201 {
1202 if (!IEM_IS_CANONICAL(offSeg))
1203 {
1204 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1205 return iemRaiseNotCanonical(pIemCpu);
1206 }
1207 u64Base = 0;
1208 }
1209 else
1210 {
1211 if (offSeg > cbLimit)
1212 {
1213 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1214 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1215 }
1216 u64Base = X86DESC_BASE(&Desc.Legacy);
1217 }
1218
1219 /*
1220 * Now set the accessed bit before
1221 * writing the return address to the stack and committing the result into
1222 * CS, CSHID and RIP.
1223 */
1224 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1225 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1226 {
1227 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1228 if (rcStrict != VINF_SUCCESS)
1229 return rcStrict;
1230 /** @todo check what VT-x and AMD-V does. */
1231 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1232 }
1233
1234 /* stack */
1235 if (enmEffOpSize == IEMMODE_16BIT)
1236 {
1237 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1238 uPtrRet.pu16[1] = pCtx->cs.Sel;
1239 }
1240 else if (enmEffOpSize == IEMMODE_32BIT)
1241 {
1242 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1243 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1244 }
1245 else
1246 {
1247 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1248 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1249 }
1250 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1251 if (rcStrict != VINF_SUCCESS)
1252 return rcStrict;
1253
1254 /* commit */
1255 pCtx->rip = offSeg;
1256 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1257 pCtx->cs.Sel |= pIemCpu->uCpl;
1258 pCtx->cs.ValidSel = pCtx->cs.Sel;
1259 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1260 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1261 pCtx->cs.u32Limit = cbLimit;
1262 pCtx->cs.u64Base = u64Base;
1263 /** @todo check if the hidden bits are loaded correctly for 64-bit
1264 * mode. */
1265 return VINF_SUCCESS;
1266}
1267
1268
1269/**
1270 * Implements retf.
1271 *
1272 * @param enmEffOpSize The effective operand size.
1273 * @param cbPop The amount of arguments to pop from the stack
1274 * (bytes).
1275 */
1276IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1277{
1278 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1279 VBOXSTRICTRC rcStrict;
1280 RTCPTRUNION uPtrFrame;
1281 uint64_t uNewRsp;
1282 uint64_t uNewRip;
1283 uint16_t uNewCs;
1284 NOREF(cbInstr);
1285
1286 /*
1287 * Read the stack values first.
1288 */
1289 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1290 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1291 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1292 if (rcStrict != VINF_SUCCESS)
1293 return rcStrict;
1294 if (enmEffOpSize == IEMMODE_16BIT)
1295 {
1296 uNewRip = uPtrFrame.pu16[0];
1297 uNewCs = uPtrFrame.pu16[1];
1298 }
1299 else if (enmEffOpSize == IEMMODE_32BIT)
1300 {
1301 uNewRip = uPtrFrame.pu32[0];
1302 uNewCs = uPtrFrame.pu16[2];
1303 }
1304 else
1305 {
1306 uNewRip = uPtrFrame.pu64[0];
1307 uNewCs = uPtrFrame.pu16[4];
1308 }
1309
1310 /*
1311 * Real mode and V8086 mode are easy.
1312 */
1313 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1314 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1315 {
1316 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1317 /** @todo check how this is supposed to work if sp=0xfffe. */
1318
1319 /* Check the limit of the new EIP. */
1320 /** @todo Intel pseudo code only does the limit check for 16-bit
1321 * operands, AMD does not make any distinction. What is right? */
1322 if (uNewRip > pCtx->cs.u32Limit)
1323 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1324
1325 /* commit the operation. */
1326 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1327 if (rcStrict != VINF_SUCCESS)
1328 return rcStrict;
1329 pCtx->rip = uNewRip;
1330 pCtx->cs.Sel = uNewCs;
1331 pCtx->cs.ValidSel = uNewCs;
1332 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1333 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1334 /** @todo do we load attribs and limit as well? */
1335 if (cbPop)
1336 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
1337 return VINF_SUCCESS;
1338 }
1339
1340 /*
1341 * Protected mode is complicated, of course.
1342 */
1343 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
1344 {
1345 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1346 return iemRaiseGeneralProtectionFault0(pIemCpu);
1347 }
1348
1349 /* Fetch the descriptor. */
1350 IEMSELDESC DescCs;
1351 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1352 if (rcStrict != VINF_SUCCESS)
1353 return rcStrict;
1354
1355 /* Can only return to a code selector. */
1356 if ( !DescCs.Legacy.Gen.u1DescType
1357 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1358 {
1359 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1360 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1361 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1362 }
1363
1364 /* L vs D. */
1365 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1366 && DescCs.Legacy.Gen.u1DefBig
1367 && IEM_IS_LONG_MODE(pIemCpu))
1368 {
1369 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1370 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1371 }
1372
1373 /* DPL/RPL/CPL checks. */
1374 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1375 {
1376 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1377 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1378 }
1379
1380 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1381 {
1382 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1383 {
1384 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1385 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1386 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1387 }
1388 }
1389 else
1390 {
1391 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1392 {
1393 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1394 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1395 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1396 }
1397 }
1398
1399 /* Is it there? */
1400 if (!DescCs.Legacy.Gen.u1Present)
1401 {
1402 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1403 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1404 }
1405
1406 /*
1407 * Return to outer privilege? (We'll typically have entered via a call gate.)
1408 */
1409 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1410 {
1411 /* Read the return pointer, it comes before the parameters. */
1412 RTCPTRUNION uPtrStack;
1413 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1414 if (rcStrict != VINF_SUCCESS)
1415 return rcStrict;
1416 uint16_t uNewOuterSs;
1417 uint64_t uNewOuterRsp;
1418 if (enmEffOpSize == IEMMODE_16BIT)
1419 {
1420 uNewOuterRsp = uPtrFrame.pu16[0];
1421 uNewOuterSs = uPtrFrame.pu16[1];
1422 }
1423 else if (enmEffOpSize == IEMMODE_32BIT)
1424 {
1425 uNewOuterRsp = uPtrFrame.pu32[0];
1426 uNewOuterSs = uPtrFrame.pu16[2];
1427 }
1428 else
1429 {
1430 uNewOuterRsp = uPtrFrame.pu64[0];
1431 uNewOuterSs = uPtrFrame.pu16[4];
1432 }
1433
1434 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1435 and read the selector. */
1436 IEMSELDESC DescSs;
1437 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
1438 {
1439 if ( !DescCs.Legacy.Gen.u1Long
1440 || (uNewOuterSs & X86_SEL_RPL) == 3)
1441 {
1442 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1443 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1444 return iemRaiseGeneralProtectionFault0(pIemCpu);
1445 }
1446 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1447 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1448 }
1449 else
1450 {
1451 /* Fetch the descriptor for the new stack segment. */
1452 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1453 if (rcStrict != VINF_SUCCESS)
1454 return rcStrict;
1455 }
1456
1457 /* Check that RPL of stack and code selectors match. */
1458 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1459 {
1460 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1461 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1462 }
1463
1464 /* Must be a writable data segment. */
1465 if ( !DescSs.Legacy.Gen.u1DescType
1466 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1467 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1468 {
1469 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1470 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1471 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1472 }
1473
1474 /* L vs D. (Not mentioned by intel.) */
1475 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1476 && DescSs.Legacy.Gen.u1DefBig
1477 && IEM_IS_LONG_MODE(pIemCpu))
1478 {
1479 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1480 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1481 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1482 }
1483
1484 /* DPL/RPL/CPL checks. */
1485 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1486 {
1487 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1488 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1489 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1490 }
1491
1492 /* Is it there? */
1493 if (!DescSs.Legacy.Gen.u1Present)
1494 {
1495 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1496 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1497 }
1498
1499 /* Calc SS limit.*/
1500 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
1501
1502 /* Is RIP canonical or within CS.limit? */
1503 uint64_t u64Base;
1504 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1505
1506 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1507 {
1508 if (!IEM_IS_CANONICAL(uNewRip))
1509 {
1510 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1511 return iemRaiseNotCanonical(pIemCpu);
1512 }
1513 u64Base = 0;
1514 }
1515 else
1516 {
1517 if (uNewRip > cbLimitCs)
1518 {
1519 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1520 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1521 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1522 }
1523 u64Base = X86DESC_BASE(&DescCs.Legacy);
1524 }
1525
1526 /*
1527 * Now set the accessed bit before
1528 * writing the return address to the stack and committing the result into
1529 * CS, CSHID and RIP.
1530 */
1531 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1532 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1533 {
1534 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1535 if (rcStrict != VINF_SUCCESS)
1536 return rcStrict;
1537 /** @todo check what VT-x and AMD-V does. */
1538 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1539 }
1540 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1541 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1542 {
1543 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1544 if (rcStrict != VINF_SUCCESS)
1545 return rcStrict;
1546 /** @todo check what VT-x and AMD-V does. */
1547 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1548 }
1549
1550 /* commit */
1551 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1552 if (rcStrict != VINF_SUCCESS)
1553 return rcStrict;
1554 if (enmEffOpSize == IEMMODE_16BIT)
1555 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1556 else
1557 pCtx->rip = uNewRip;
1558 pCtx->cs.Sel = uNewCs;
1559 pCtx->cs.ValidSel = uNewCs;
1560 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1561 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1562 pCtx->cs.u32Limit = cbLimitCs;
1563 pCtx->cs.u64Base = u64Base;
1564 pCtx->rsp = uNewRsp;
1565 pCtx->ss.Sel = uNewOuterSs;
1566 pCtx->ss.ValidSel = uNewOuterSs;
1567 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1568 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
1569 pCtx->ss.u32Limit = cbLimitSs;
1570 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1571 pCtx->ss.u64Base = 0;
1572 else
1573 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
1574
1575 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1576 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
1577 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
1578 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
1579 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
1580
1581 /** @todo check if the hidden bits are loaded correctly for 64-bit
1582 * mode. */
1583
1584 if (cbPop)
1585 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
1586
1587 /* Done! */
1588 }
1589 /*
1590 * Return to the same privilege level
1591 */
1592 else
1593 {
1594 /* Limit / canonical check. */
1595 uint64_t u64Base;
1596 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1597
1598 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1599 {
1600 if (!IEM_IS_CANONICAL(uNewRip))
1601 {
1602 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1603 return iemRaiseNotCanonical(pIemCpu);
1604 }
1605 u64Base = 0;
1606 }
1607 else
1608 {
1609 if (uNewRip > cbLimitCs)
1610 {
1611 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1612 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1613 }
1614 u64Base = X86DESC_BASE(&DescCs.Legacy);
1615 }
1616
1617 /*
1618 * Now set the accessed bit before
1619 * writing the return address to the stack and committing the result into
1620 * CS, CSHID and RIP.
1621 */
1622 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1623 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1624 {
1625 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1626 if (rcStrict != VINF_SUCCESS)
1627 return rcStrict;
1628 /** @todo check what VT-x and AMD-V does. */
1629 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1630 }
1631
1632 /* commit */
1633 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1634 if (rcStrict != VINF_SUCCESS)
1635 return rcStrict;
1636 if (enmEffOpSize == IEMMODE_16BIT)
1637 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1638 else
1639 pCtx->rip = uNewRip;
1640 pCtx->cs.Sel = uNewCs;
1641 pCtx->cs.ValidSel = uNewCs;
1642 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1643 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1644 pCtx->cs.u32Limit = cbLimitCs;
1645 pCtx->cs.u64Base = u64Base;
1646 /** @todo check if the hidden bits are loaded correctly for 64-bit
1647 * mode. */
1648 if (cbPop)
1649 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
1650 }
1651 return VINF_SUCCESS;
1652}
1653
1654
1655/**
1656 * Implements retn.
1657 *
1658 * We're doing this in C because of the \#GP that might be raised if the popped
1659 * program counter is out of bounds.
1660 *
1661 * @param enmEffOpSize The effective operand size.
1662 * @param cbPop The amount of arguments to pop from the stack
1663 * (bytes).
1664 */
1665IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1666{
1667 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1668 NOREF(cbInstr);
1669
1670 /* Fetch the RSP from the stack. */
1671 VBOXSTRICTRC rcStrict;
1672 RTUINT64U NewRip;
1673 RTUINT64U NewRsp;
1674 NewRsp.u = pCtx->rsp;
1675 switch (enmEffOpSize)
1676 {
1677 case IEMMODE_16BIT:
1678 NewRip.u = 0;
1679 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1680 break;
1681 case IEMMODE_32BIT:
1682 NewRip.u = 0;
1683 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1684 break;
1685 case IEMMODE_64BIT:
1686 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1687 break;
1688 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1689 }
1690 if (rcStrict != VINF_SUCCESS)
1691 return rcStrict;
1692
1693 /* Check the new RSP before loading it. */
1694 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1695 * of it. The canonical test is performed here and for call. */
1696 if (enmEffOpSize != IEMMODE_64BIT)
1697 {
1698 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
1699 {
1700 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
1701 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1702 }
1703 }
1704 else
1705 {
1706 if (!IEM_IS_CANONICAL(NewRip.u))
1707 {
1708 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1709 return iemRaiseNotCanonical(pIemCpu);
1710 }
1711 }
1712
1713 /* Commit it. */
1714 pCtx->rip = NewRip.u;
1715 pCtx->rsp = NewRsp.u;
1716 if (cbPop)
1717 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
1718
1719 return VINF_SUCCESS;
1720}
1721
1722
1723/**
1724 * Implements enter.
1725 *
1726 * We're doing this in C because the instruction is insane, even for the
1727 * u8NestingLevel=0 case dealing with the stack is tedious.
1728 *
1729 * @param enmEffOpSize The effective operand size.
1730 */
1731IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
1732{
1733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1734
1735 /* Push RBP, saving the old value in TmpRbp. */
1736 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
1737 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
1738 RTUINT64U NewRbp;
1739 VBOXSTRICTRC rcStrict;
1740 if (enmEffOpSize == IEMMODE_64BIT)
1741 {
1742 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
1743 NewRbp = NewRsp;
1744 }
1745 else if (pCtx->ss.Attr.n.u1DefBig)
1746 {
1747 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
1748 NewRbp = NewRsp;
1749 }
1750 else
1751 {
1752 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
1753 NewRbp = TmpRbp;
1754 NewRbp.Words.w0 = NewRsp.Words.w0;
1755 }
1756 if (rcStrict != VINF_SUCCESS)
1757 return rcStrict;
1758
1759 /* Copy the parameters (aka nesting levels by Intel). */
1760 cParameters &= 0x1f;
1761 if (cParameters > 0)
1762 {
1763 switch (enmEffOpSize)
1764 {
1765 case IEMMODE_16BIT:
1766 if (pCtx->ss.Attr.n.u1DefBig)
1767 TmpRbp.DWords.dw0 -= 2;
1768 else
1769 TmpRbp.Words.w0 -= 2;
1770 do
1771 {
1772 uint16_t u16Tmp;
1773 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
1774 if (rcStrict != VINF_SUCCESS)
1775 break;
1776 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
1777 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1778 break;
1779
1780 case IEMMODE_32BIT:
1781 if (pCtx->ss.Attr.n.u1DefBig)
1782 TmpRbp.DWords.dw0 -= 4;
1783 else
1784 TmpRbp.Words.w0 -= 4;
1785 do
1786 {
1787 uint32_t u32Tmp;
1788 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
1789 if (rcStrict != VINF_SUCCESS)
1790 break;
1791 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
1792 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1793 break;
1794
1795 case IEMMODE_64BIT:
1796 TmpRbp.u -= 8;
1797 do
1798 {
1799 uint64_t u64Tmp;
1800 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
1801 if (rcStrict != VINF_SUCCESS)
1802 break;
1803 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
1804 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1805 break;
1806
1807 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1808 }
1809 if (rcStrict != VINF_SUCCESS)
1810 return VINF_SUCCESS;
1811
1812 /* Push the new RBP */
1813 if (enmEffOpSize == IEMMODE_64BIT)
1814 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
1815 else if (pCtx->ss.Attr.n.u1DefBig)
1816 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
1817 else
1818 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
1819 if (rcStrict != VINF_SUCCESS)
1820 return rcStrict;
1821
1822 }
1823
1824 /* Recalc RSP. */
1825 iemRegSubFromRspEx(pIemCpu, pCtx, &NewRsp, cbFrame);
1826
1827 /** @todo Should probe write access at the new RSP according to AMD. */
1828
1829 /* Commit it. */
1830 pCtx->rbp = NewRbp.u;
1831 pCtx->rsp = NewRsp.u;
1832 iemRegAddToRip(pIemCpu, cbInstr);
1833
1834 return VINF_SUCCESS;
1835}
1836
1837
1838
1839/**
1840 * Implements leave.
1841 *
1842 * We're doing this in C because messing with the stack registers is annoying
1843 * since they depends on SS attributes.
1844 *
1845 * @param enmEffOpSize The effective operand size.
1846 */
1847IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1848{
1849 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1850
1851 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1852 RTUINT64U NewRsp;
1853 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1854 NewRsp.u = pCtx->rbp;
1855 else if (pCtx->ss.Attr.n.u1DefBig)
1856 NewRsp.u = pCtx->ebp;
1857 else
1858 {
1859 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1860 NewRsp.u = pCtx->rsp;
1861 NewRsp.Words.w0 = pCtx->bp;
1862 }
1863
1864 /* Pop RBP according to the operand size. */
1865 VBOXSTRICTRC rcStrict;
1866 RTUINT64U NewRbp;
1867 switch (enmEffOpSize)
1868 {
1869 case IEMMODE_16BIT:
1870 NewRbp.u = pCtx->rbp;
1871 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1872 break;
1873 case IEMMODE_32BIT:
1874 NewRbp.u = 0;
1875 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1876 break;
1877 case IEMMODE_64BIT:
1878 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1879 break;
1880 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1881 }
1882 if (rcStrict != VINF_SUCCESS)
1883 return rcStrict;
1884
1885
1886 /* Commit it. */
1887 pCtx->rbp = NewRbp.u;
1888 pCtx->rsp = NewRsp.u;
1889 iemRegAddToRip(pIemCpu, cbInstr);
1890
1891 return VINF_SUCCESS;
1892}
1893
1894
1895/**
1896 * Implements int3 and int XX.
1897 *
1898 * @param u8Int The interrupt vector number.
1899 * @param fIsBpInstr Is it the breakpoint instruction.
1900 */
1901IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1902{
1903 Assert(pIemCpu->cXcptRecursions == 0);
1904 return iemRaiseXcptOrInt(pIemCpu,
1905 cbInstr,
1906 u8Int,
1907 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1908 0,
1909 0);
1910}
1911
1912
1913/**
1914 * Implements iret for real mode and V8086 mode.
1915 *
1916 * @param enmEffOpSize The effective operand size.
1917 */
1918IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1919{
1920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1921 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1922 X86EFLAGS Efl;
1923 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
1924 NOREF(cbInstr);
1925
1926 /*
1927 * iret throws an exception if VME isn't enabled.
1928 */
1929 if ( pCtx->eflags.Bits.u1VM
1930 && !(pCtx->cr4 & X86_CR4_VME))
1931 return iemRaiseGeneralProtectionFault0(pIemCpu);
1932
1933 /*
1934 * Do the stack bits, but don't commit RSP before everything checks
1935 * out right.
1936 */
1937 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1938 VBOXSTRICTRC rcStrict;
1939 RTCPTRUNION uFrame;
1940 uint16_t uNewCs;
1941 uint32_t uNewEip;
1942 uint32_t uNewFlags;
1943 uint64_t uNewRsp;
1944 if (enmEffOpSize == IEMMODE_32BIT)
1945 {
1946 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1947 if (rcStrict != VINF_SUCCESS)
1948 return rcStrict;
1949 uNewEip = uFrame.pu32[0];
1950 uNewCs = (uint16_t)uFrame.pu32[1];
1951 uNewFlags = uFrame.pu32[2];
1952 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1953 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1954 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1955 | X86_EFL_ID;
1956 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1957 }
1958 else
1959 {
1960 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1961 if (rcStrict != VINF_SUCCESS)
1962 return rcStrict;
1963 uNewEip = uFrame.pu16[0];
1964 uNewCs = uFrame.pu16[1];
1965 uNewFlags = uFrame.pu16[2];
1966 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1967 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1968 uNewFlags |= Efl.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1969 /** @todo The intel pseudo code does not indicate what happens to
1970 * reserved flags. We just ignore them. */
1971 }
1972 /** @todo Check how this is supposed to work if sp=0xfffe. */
1973
1974 /*
1975 * Check the limit of the new EIP.
1976 */
1977 /** @todo Only the AMD pseudo code check the limit here, what's
1978 * right? */
1979 if (uNewEip > pCtx->cs.u32Limit)
1980 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1981
1982 /*
1983 * V8086 checks and flag adjustments
1984 */
1985 if (Efl.Bits.u1VM)
1986 {
1987 if (Efl.Bits.u2IOPL == 3)
1988 {
1989 /* Preserve IOPL and clear RF. */
1990 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1991 uNewFlags |= Efl.u & (X86_EFL_IOPL);
1992 }
1993 else if ( enmEffOpSize == IEMMODE_16BIT
1994 && ( !(uNewFlags & X86_EFL_IF)
1995 || !Efl.Bits.u1VIP )
1996 && !(uNewFlags & X86_EFL_TF) )
1997 {
1998 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1999 uNewFlags &= ~X86_EFL_VIF;
2000 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2001 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2002 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2003 }
2004 else
2005 return iemRaiseGeneralProtectionFault0(pIemCpu);
2006 }
2007
2008 /*
2009 * Commit the operation.
2010 */
2011 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
2012 if (rcStrict != VINF_SUCCESS)
2013 return rcStrict;
2014 pCtx->rip = uNewEip;
2015 pCtx->cs.Sel = uNewCs;
2016 pCtx->cs.ValidSel = uNewCs;
2017 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2018 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2019 /** @todo do we load attribs and limit as well? */
2020 Assert(uNewFlags & X86_EFL_1);
2021 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2022
2023 return VINF_SUCCESS;
2024}
2025
2026
2027/**
2028 * Loads a segment register when entering V8086 mode.
2029 *
2030 * @param pSReg The segment register.
2031 * @param uSeg The segment to load.
2032 */
2033static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2034{
2035 pSReg->Sel = uSeg;
2036 pSReg->ValidSel = uSeg;
2037 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2038 pSReg->u64Base = (uint32_t)uSeg << 4;
2039 pSReg->u32Limit = 0xffff;
2040 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2041 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2042 * IRET'ing to V8086. */
2043}
2044
2045
2046/**
2047 * Implements iret for protected mode returning to V8086 mode.
2048 *
2049 * @param pCtx Pointer to the CPU context.
2050 * @param uNewEip The new EIP.
2051 * @param uNewCs The new CS.
2052 * @param uNewFlags The new EFLAGS.
2053 * @param uNewRsp The RSP after the initial IRET frame.
2054 */
2055IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
2056 uint32_t, uNewFlags, uint64_t, uNewRsp)
2057{
2058#if 0
2059 if (!LogIs6Enabled())
2060 {
2061 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
2062 RTLogFlags(NULL, "enabled");
2063 return VERR_IEM_RESTART_INSTRUCTION;
2064 }
2065#endif
2066
2067 /*
2068 * Pop the V8086 specific frame bits off the stack.
2069 */
2070 VBOXSTRICTRC rcStrict;
2071 RTCPTRUNION uFrame;
2072 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 24, &uFrame.pv, &uNewRsp);
2073 if (rcStrict != VINF_SUCCESS)
2074 return rcStrict;
2075 uint32_t uNewEsp = uFrame.pu32[0];
2076 uint16_t uNewSs = uFrame.pu32[1];
2077 uint16_t uNewEs = uFrame.pu32[2];
2078 uint16_t uNewDs = uFrame.pu32[3];
2079 uint16_t uNewFs = uFrame.pu32[4];
2080 uint16_t uNewGs = uFrame.pu32[5];
2081 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2082 if (rcStrict != VINF_SUCCESS)
2083 return rcStrict;
2084
2085 /*
2086 * Commit the operation.
2087 */
2088 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
2089 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
2090 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
2091 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
2092 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
2093 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
2094 pCtx->rip = uNewEip;
2095 pCtx->rsp = uNewEsp;
2096 pCtx->rflags.u = uNewFlags;
2097 pIemCpu->uCpl = 3;
2098
2099 return VINF_SUCCESS;
2100}
2101
2102
2103/**
2104 * Implements iret for protected mode returning via a nested task.
2105 *
2106 * @param enmEffOpSize The effective operand size.
2107 */
2108IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2109{
2110 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2111}
2112
2113
2114/**
2115 * Implements iret for protected mode
2116 *
2117 * @param enmEffOpSize The effective operand size.
2118 */
2119IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
2120{
2121 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2122 NOREF(cbInstr);
2123
2124 /*
2125 * Nested task return.
2126 */
2127 if (pCtx->eflags.Bits.u1NT)
2128 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
2129
2130 /*
2131 * Normal return.
2132 *
2133 * Do the stack bits, but don't commit RSP before everything checks
2134 * out right.
2135 */
2136 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2137 VBOXSTRICTRC rcStrict;
2138 RTCPTRUNION uFrame;
2139 uint16_t uNewCs;
2140 uint32_t uNewEip;
2141 uint32_t uNewFlags;
2142 uint64_t uNewRsp;
2143 if (enmEffOpSize == IEMMODE_32BIT)
2144 {
2145 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2146 if (rcStrict != VINF_SUCCESS)
2147 return rcStrict;
2148 uNewEip = uFrame.pu32[0];
2149 uNewCs = (uint16_t)uFrame.pu32[1];
2150 uNewFlags = uFrame.pu32[2];
2151 }
2152 else
2153 {
2154 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2155 if (rcStrict != VINF_SUCCESS)
2156 return rcStrict;
2157 uNewEip = uFrame.pu16[0];
2158 uNewCs = uFrame.pu16[1];
2159 uNewFlags = uFrame.pu16[2];
2160 }
2161 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2162 if (rcStrict != VINF_SUCCESS)
2163 return rcStrict;
2164
2165 /*
2166 * We're hopefully not returning to V8086 mode...
2167 */
2168 if ( (uNewFlags & X86_EFL_VM)
2169 && pIemCpu->uCpl == 0)
2170 {
2171 Assert(enmEffOpSize == IEMMODE_32BIT);
2172 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
2173 }
2174
2175 /*
2176 * Protected mode.
2177 */
2178 /* Read the CS descriptor. */
2179 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2180 {
2181 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
2182 return iemRaiseGeneralProtectionFault0(pIemCpu);
2183 }
2184
2185 IEMSELDESC DescCS;
2186 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
2187 if (rcStrict != VINF_SUCCESS)
2188 {
2189 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
2190 return rcStrict;
2191 }
2192
2193 /* Must be a code descriptor. */
2194 if (!DescCS.Legacy.Gen.u1DescType)
2195 {
2196 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2197 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2198 }
2199 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2200 {
2201 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2202 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2203 }
2204
2205 /* Privilege checks. */
2206 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2207 {
2208 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
2209 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2210 }
2211 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2212 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2213 {
2214 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
2215 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2216 }
2217
2218 /* Present? */
2219 if (!DescCS.Legacy.Gen.u1Present)
2220 {
2221 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2222 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2223 }
2224
2225 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2226
2227 /*
2228 * Return to outer level?
2229 */
2230 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2231 {
2232 uint16_t uNewSS;
2233 uint32_t uNewESP;
2234 if (enmEffOpSize == IEMMODE_32BIT)
2235 {
2236 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2237 if (rcStrict != VINF_SUCCESS)
2238 return rcStrict;
2239 uNewESP = uFrame.pu32[0];
2240 uNewSS = (uint16_t)uFrame.pu32[1];
2241 }
2242 else
2243 {
2244 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2245 if (rcStrict != VINF_SUCCESS)
2246 return rcStrict;
2247 uNewESP = uFrame.pu16[0];
2248 uNewSS = uFrame.pu16[1];
2249 }
2250 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2251 if (rcStrict != VINF_SUCCESS)
2252 return rcStrict;
2253
2254 /* Read the SS descriptor. */
2255 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2256 {
2257 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2258 return iemRaiseGeneralProtectionFault0(pIemCpu);
2259 }
2260
2261 IEMSELDESC DescSS;
2262 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2263 if (rcStrict != VINF_SUCCESS)
2264 {
2265 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2266 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2267 return rcStrict;
2268 }
2269
2270 /* Privilege checks. */
2271 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2272 {
2273 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2274 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2275 }
2276 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2277 {
2278 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2279 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2280 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2281 }
2282
2283 /* Must be a writeable data segment descriptor. */
2284 if (!DescSS.Legacy.Gen.u1DescType)
2285 {
2286 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2287 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2288 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2289 }
2290 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2291 {
2292 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2293 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2294 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2295 }
2296
2297 /* Present? */
2298 if (!DescSS.Legacy.Gen.u1Present)
2299 {
2300 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2301 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2302 }
2303
2304 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2305
2306 /* Check EIP. */
2307 if (uNewEip > cbLimitCS)
2308 {
2309 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2310 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2311 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2312 }
2313
2314 /*
2315 * Commit the changes, marking CS and SS accessed first since
2316 * that may fail.
2317 */
2318 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2319 {
2320 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2321 if (rcStrict != VINF_SUCCESS)
2322 return rcStrict;
2323 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2324 }
2325 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2326 {
2327 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2328 if (rcStrict != VINF_SUCCESS)
2329 return rcStrict;
2330 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2331 }
2332
2333 pCtx->rip = uNewEip;
2334 pCtx->cs.Sel = uNewCs;
2335 pCtx->cs.ValidSel = uNewCs;
2336 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2337 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2338 pCtx->cs.u32Limit = cbLimitCS;
2339 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2340 pCtx->rsp = uNewESP;
2341 pCtx->ss.Sel = uNewSS;
2342 pCtx->ss.ValidSel = uNewSS;
2343 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2344 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2345 pCtx->ss.u32Limit = cbLimitSs;
2346 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2347
2348 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2349 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2350 if (enmEffOpSize != IEMMODE_16BIT)
2351 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2352 if (pIemCpu->uCpl == 0)
2353 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2354 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2355 fEFlagsMask |= X86_EFL_IF;
2356 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
2357 fEFlagsNew &= ~fEFlagsMask;
2358 fEFlagsNew |= uNewFlags & fEFlagsMask;
2359 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
2360
2361 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2362 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2363 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2364 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2365 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2366
2367 /* Done! */
2368
2369 }
2370 /*
2371 * Return to the same level.
2372 */
2373 else
2374 {
2375 /* Check EIP. */
2376 if (uNewEip > cbLimitCS)
2377 {
2378 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2379 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2380 }
2381
2382 /*
2383 * Commit the changes, marking CS first since it may fail.
2384 */
2385 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2386 {
2387 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2388 if (rcStrict != VINF_SUCCESS)
2389 return rcStrict;
2390 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2391 }
2392
2393 pCtx->rip = uNewEip;
2394 pCtx->cs.Sel = uNewCs;
2395 pCtx->cs.ValidSel = uNewCs;
2396 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2397 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2398 pCtx->cs.u32Limit = cbLimitCS;
2399 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2400 pCtx->rsp = uNewRsp;
2401
2402 X86EFLAGS NewEfl;
2403 NewEfl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
2404 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2405 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2406 if (enmEffOpSize != IEMMODE_16BIT)
2407 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2408 if (pIemCpu->uCpl == 0)
2409 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2410 else if (pIemCpu->uCpl <= NewEfl.Bits.u2IOPL)
2411 fEFlagsMask |= X86_EFL_IF;
2412 NewEfl.u &= ~fEFlagsMask;
2413 NewEfl.u |= fEFlagsMask & uNewFlags;
2414 IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u);
2415 /* Done! */
2416 }
2417 return VINF_SUCCESS;
2418}
2419
2420
2421/**
2422 * Implements iret for long mode
2423 *
2424 * @param enmEffOpSize The effective operand size.
2425 */
2426IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2427{
2428 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2429 NOREF(cbInstr);
2430
2431 /*
2432 * Nested task return is not supported in long mode.
2433 */
2434 if (pCtx->eflags.Bits.u1NT)
2435 {
2436 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
2437 return iemRaiseGeneralProtectionFault0(pIemCpu);
2438 }
2439
2440 /*
2441 * Normal return.
2442 *
2443 * Do the stack bits, but don't commit RSP before everything checks
2444 * out right.
2445 */
2446 VBOXSTRICTRC rcStrict;
2447 RTCPTRUNION uFrame;
2448 uint64_t uNewRip;
2449 uint16_t uNewCs;
2450 uint16_t uNewSs;
2451 uint32_t uNewFlags;
2452 uint64_t uNewRsp;
2453 if (enmEffOpSize == IEMMODE_64BIT)
2454 {
2455 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*8, &uFrame.pv, &uNewRsp);
2456 if (rcStrict != VINF_SUCCESS)
2457 return rcStrict;
2458 uNewRip = uFrame.pu64[0];
2459 uNewCs = (uint16_t)uFrame.pu64[1];
2460 uNewFlags = (uint32_t)uFrame.pu64[2];
2461 uNewRsp = uFrame.pu64[3];
2462 uNewSs = (uint16_t)uFrame.pu64[4];
2463 }
2464 else if (enmEffOpSize == IEMMODE_32BIT)
2465 {
2466 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*4, &uFrame.pv, &uNewRsp);
2467 if (rcStrict != VINF_SUCCESS)
2468 return rcStrict;
2469 uNewRip = uFrame.pu32[0];
2470 uNewCs = (uint16_t)uFrame.pu32[1];
2471 uNewFlags = uFrame.pu32[2];
2472 uNewRsp = uFrame.pu32[3];
2473 uNewSs = (uint16_t)uFrame.pu32[4];
2474 }
2475 else
2476 {
2477 Assert(enmEffOpSize == IEMMODE_16BIT);
2478 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*2, &uFrame.pv, &uNewRsp);
2479 if (rcStrict != VINF_SUCCESS)
2480 return rcStrict;
2481 uNewRip = uFrame.pu16[0];
2482 uNewCs = uFrame.pu16[1];
2483 uNewFlags = uFrame.pu16[2];
2484 uNewRsp = uFrame.pu16[3];
2485 uNewSs = uFrame.pu16[4];
2486 }
2487 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2488 if (rcStrict != VINF_SUCCESS)
2489 return rcStrict;
2490 Log2(("iretq stack: cs:rip=%04x:%016RX16 rflags=%016RX16 ss:rsp=%04x:%016RX16\n",
2491 uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
2492
2493 /*
2494 * Check stuff.
2495 */
2496 /* Read the CS descriptor. */
2497 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2498 {
2499 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2500 return iemRaiseGeneralProtectionFault0(pIemCpu);
2501 }
2502
2503 IEMSELDESC DescCS;
2504 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
2505 if (rcStrict != VINF_SUCCESS)
2506 {
2507 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
2508 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
2509 return rcStrict;
2510 }
2511
2512 /* Must be a code descriptor. */
2513 if ( !DescCS.Legacy.Gen.u1DescType
2514 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2515 {
2516 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
2517 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2518 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2519 }
2520
2521 /* Privilege checks. */
2522 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
2523 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2524 {
2525 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp, pIemCpu->uCpl));
2526 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2527 }
2528 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2529 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2530 {
2531 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < DPL (%d) -> #GP\n",
2532 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u2Dpl));
2533 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2534 }
2535
2536 /* Present? */
2537 if (!DescCS.Legacy.Gen.u1Present)
2538 {
2539 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2540 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2541 }
2542
2543 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2544
2545 /* Read the SS descriptor. */
2546 IEMSELDESC DescSS;
2547 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
2548 {
2549 if ( !DescCS.Legacy.Gen.u1Long
2550 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
2551 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
2552 {
2553 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2554 return iemRaiseGeneralProtectionFault0(pIemCpu);
2555 }
2556 DescSS.Legacy.u = 0;
2557 }
2558 else
2559 {
2560 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSs);
2561 if (rcStrict != VINF_SUCCESS)
2562 {
2563 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
2564 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
2565 return rcStrict;
2566 }
2567 }
2568
2569 /* Privilege checks. */
2570 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2571 {
2572 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2573 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2574 }
2575
2576 uint32_t cbLimitSs;
2577 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
2578 cbLimitSs = UINT32_MAX;
2579 else
2580 {
2581 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2582 {
2583 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
2584 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
2585 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2586 }
2587
2588 /* Must be a writeable data segment descriptor. */
2589 if (!DescSS.Legacy.Gen.u1DescType)
2590 {
2591 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
2592 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
2593 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2594 }
2595 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2596 {
2597 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
2598 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
2599 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2600 }
2601
2602 /* Present? */
2603 if (!DescSS.Legacy.Gen.u1Present)
2604 {
2605 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2606 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSs);
2607 }
2608 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2609 }
2610
2611 /* Check EIP. */
2612 if (DescCS.Legacy.Gen.u1Long)
2613 {
2614 if (!IEM_IS_CANONICAL(uNewRip))
2615 {
2616 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
2617 uNewCs, uNewRip, uNewSs, uNewRsp));
2618 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2619 }
2620 }
2621 else
2622 {
2623 if (uNewRip > cbLimitCS)
2624 {
2625 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
2626 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
2627 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2628 }
2629 }
2630
2631 /*
2632 * Commit the changes, marking CS and SS accessed first since
2633 * that may fail.
2634 */
2635 /** @todo where exactly are these actually marked accessed by a real CPU? */
2636 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2637 {
2638 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2639 if (rcStrict != VINF_SUCCESS)
2640 return rcStrict;
2641 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2642 }
2643 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2644 {
2645 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSs);
2646 if (rcStrict != VINF_SUCCESS)
2647 return rcStrict;
2648 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2649 }
2650
2651 pCtx->rip = uNewRip;
2652 pCtx->cs.Sel = uNewCs;
2653 pCtx->cs.ValidSel = uNewCs;
2654 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2655 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2656 pCtx->cs.u32Limit = cbLimitCS;
2657 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2658 pCtx->rsp = uNewRsp;
2659 pCtx->ss.Sel = uNewSs;
2660 pCtx->ss.ValidSel = uNewSs;
2661 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
2662 {
2663 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2664 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
2665 pCtx->ss.u32Limit = UINT32_MAX;
2666 pCtx->ss.u64Base = 0;
2667 Log2(("iretq new SS: NULL\n"));
2668 }
2669 else
2670 {
2671 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2672 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2673 pCtx->ss.u32Limit = cbLimitSs;
2674 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2675 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
2676 }
2677
2678 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2679 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2680 if (enmEffOpSize != IEMMODE_16BIT)
2681 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2682 if (pIemCpu->uCpl == 0)
2683 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
2684 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2685 fEFlagsMask |= X86_EFL_IF;
2686 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
2687 fEFlagsNew &= ~fEFlagsMask;
2688 fEFlagsNew |= uNewFlags & fEFlagsMask;
2689 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
2690
2691 if (pIemCpu->uCpl != uNewCpl)
2692 {
2693 pIemCpu->uCpl = uNewCpl;
2694 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->ds);
2695 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->es);
2696 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->fs);
2697 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->gs);
2698 }
2699
2700 return VINF_SUCCESS;
2701}
2702
2703
2704/**
2705 * Implements iret.
2706 *
2707 * @param enmEffOpSize The effective operand size.
2708 */
2709IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2710{
2711 /*
2712 * Call a mode specific worker.
2713 */
2714 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2715 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2716 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2717 if (IEM_IS_LONG_MODE(pIemCpu))
2718 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2719
2720 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2721}
2722
2723
2724/**
2725 * Implements SYSCALL (AMD and Intel64).
2726 *
2727 * @param enmEffOpSize The effective operand size.
2728 */
2729IEM_CIMPL_DEF_0(iemCImpl_syscall)
2730{
2731 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2732
2733 /*
2734 * Check preconditions.
2735 *
2736 * Note that CPUs described in the documentation may load a few odd values
2737 * into CS and SS than we allow here. This has yet to be checked on real
2738 * hardware.
2739 */
2740 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
2741 {
2742 Log(("syscall: Not enabled in EFER -> #UD\n"));
2743 return iemRaiseUndefinedOpcode(pIemCpu);
2744 }
2745 if (!(pCtx->cr0 & X86_CR0_PE))
2746 {
2747 Log(("syscall: Protected mode is required -> #GP(0)\n"));
2748 return iemRaiseGeneralProtectionFault0(pIemCpu);
2749 }
2750 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
2751 {
2752 Log(("syscall: Only available in long mode on intel -> #UD\n"));
2753 return iemRaiseUndefinedOpcode(pIemCpu);
2754 }
2755
2756 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
2757 /** @todo what about LDT selectors? Shouldn't matter, really. */
2758 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
2759 uint16_t uNewSs = uNewCs + 8;
2760 if (uNewCs == 0 || uNewSs == 0)
2761 {
2762 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
2763 return iemRaiseGeneralProtectionFault0(pIemCpu);
2764 }
2765
2766 /* Long mode and legacy mode differs. */
2767 if (CPUMIsGuestInLongModeEx(pCtx))
2768 {
2769 uint64_t uNewRip = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR;
2770
2771 /* This test isn't in the docs, but I'm not trusting the guys writing
2772 the MSRs to have validated the values as canonical like they should. */
2773 if (!IEM_IS_CANONICAL(uNewRip))
2774 {
2775 Log(("syscall: Only available in long mode on intel -> #UD\n"));
2776 return iemRaiseUndefinedOpcode(pIemCpu);
2777 }
2778
2779 /*
2780 * Commit it.
2781 */
2782 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));
2783 pCtx->rcx = pCtx->rip + cbInstr;
2784 pCtx->rip = uNewRip;
2785
2786 pCtx->rflags.u &= ~X86_EFL_RF;
2787 pCtx->r11 = pCtx->rflags.u;
2788 pCtx->rflags.u &= ~pCtx->msrSFMASK;
2789 pCtx->rflags.u |= X86_EFL_1;
2790
2791 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
2792 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
2793 }
2794 else
2795 {
2796 /*
2797 * Commit it.
2798 */
2799 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
2800 pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
2801 pCtx->rcx = pCtx->eip + cbInstr;
2802 pCtx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
2803 pCtx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
2804
2805 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
2806 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
2807 }
2808 pCtx->cs.Sel = uNewCs;
2809 pCtx->cs.ValidSel = uNewCs;
2810 pCtx->cs.u64Base = 0;
2811 pCtx->cs.u32Limit = UINT32_MAX;
2812 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2813
2814 pCtx->ss.Sel = uNewSs;
2815 pCtx->ss.ValidSel = uNewSs;
2816 pCtx->ss.u64Base = 0;
2817 pCtx->ss.u32Limit = UINT32_MAX;
2818 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2819
2820 return VINF_SUCCESS;
2821}
2822
2823
2824/**
2825 * Implements SYSRET (AMD and Intel64).
2826 */
2827IEM_CIMPL_DEF_0(iemCImpl_sysret)
2828
2829{
2830 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2831
2832 /*
2833 * Check preconditions.
2834 *
2835 * Note that CPUs described in the documentation may load a few odd values
2836 * into CS and SS than we allow here. This has yet to be checked on real
2837 * hardware.
2838 */
2839 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
2840 {
2841 Log(("sysret: Not enabled in EFER -> #UD\n"));
2842 return iemRaiseUndefinedOpcode(pIemCpu);
2843 }
2844 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
2845 {
2846 Log(("sysret: Only available in long mode on intel -> #UD\n"));
2847 return iemRaiseUndefinedOpcode(pIemCpu);
2848 }
2849 if (!(pCtx->cr0 & X86_CR0_PE))
2850 {
2851 Log(("sysret: Protected mode is required -> #GP(0)\n"));
2852 return iemRaiseGeneralProtectionFault0(pIemCpu);
2853 }
2854 if (pIemCpu->uCpl != 0)
2855 {
2856 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pIemCpu->uCpl));
2857 return iemRaiseGeneralProtectionFault0(pIemCpu);
2858 }
2859
2860 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
2861 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
2862 uint16_t uNewSs = uNewCs + 8;
2863 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
2864 uNewCs += 16;
2865 if (uNewCs == 0 || uNewSs == 0)
2866 {
2867 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
2868 return iemRaiseGeneralProtectionFault0(pIemCpu);
2869 }
2870
2871 /*
2872 * Commit it.
2873 */
2874 if (CPUMIsGuestInLongModeEx(pCtx))
2875 {
2876 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
2877 {
2878 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
2879 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11));
2880 /* Note! We disregard intel manual regarding the RCX cananonical
2881 check, ask intel+xen why AMD doesn't do it. */
2882 pCtx->rip = pCtx->rcx;
2883 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
2884 | (3 << X86DESCATTR_DPL_SHIFT);
2885 }
2886 else
2887 {
2888 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
2889 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11));
2890 pCtx->rip = pCtx->ecx;
2891 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
2892 | (3 << X86DESCATTR_DPL_SHIFT);
2893 }
2894 /** @todo testcase: See what kind of flags we can make SYSRET restore and
2895 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
2896 pCtx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
2897 pCtx->rflags.u |= X86_EFL_1;
2898 }
2899 else
2900 {
2901 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));
2902 pCtx->rip = pCtx->rcx;
2903 pCtx->rflags.u |= X86_EFL_IF;
2904 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
2905 | (3 << X86DESCATTR_DPL_SHIFT);
2906 }
2907 pCtx->cs.Sel = uNewCs | 3;
2908 pCtx->cs.ValidSel = uNewCs | 3;
2909 pCtx->cs.u64Base = 0;
2910 pCtx->cs.u32Limit = UINT32_MAX;
2911 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2912
2913 pCtx->ss.Sel = uNewSs | 3;
2914 pCtx->ss.ValidSel = uNewSs | 3;
2915 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2916 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
2917 pCtx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
2918 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
2919 * on sysret. */
2920
2921 return VINF_SUCCESS;
2922}
2923
2924
2925/**
2926 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2927 *
2928 * @param iSegReg The segment register number (valid).
2929 * @param uSel The new selector value.
2930 */
2931IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2932{
2933 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2934 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2935 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2936
2937 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2938
2939 /*
2940 * Real mode and V8086 mode are easy.
2941 */
2942 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2943 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2944 {
2945 *pSel = uSel;
2946 pHid->u64Base = (uint32_t)uSel << 4;
2947 pHid->ValidSel = uSel;
2948 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2949#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2950 /** @todo Does the CPU actually load limits and attributes in the
2951 * real/V8086 mode segment load case? It doesn't for CS in far
2952 * jumps... Affects unreal mode. */
2953 pHid->u32Limit = 0xffff;
2954 pHid->Attr.u = 0;
2955 pHid->Attr.n.u1Present = 1;
2956 pHid->Attr.n.u1DescType = 1;
2957 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2958 ? X86_SEL_TYPE_RW
2959 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2960#endif
2961 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2962 iemRegAddToRip(pIemCpu, cbInstr);
2963 return VINF_SUCCESS;
2964 }
2965
2966 /*
2967 * Protected mode.
2968 *
2969 * Check if it's a null segment selector value first, that's OK for DS, ES,
2970 * FS and GS. If not null, then we have to load and parse the descriptor.
2971 */
2972 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2973 {
2974 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
2975 if (iSegReg == X86_SREG_SS)
2976 {
2977 /* In 64-bit kernel mode, the stack can be 0 because of the way
2978 interrupts are dispatched. AMD seems to have a slighly more
2979 relaxed relationship to SS.RPL than intel does. */
2980 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
2981 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2982 || pIemCpu->uCpl > 2
2983 || ( uSel != pIemCpu->uCpl
2984 && !IEM_IS_GUEST_CPU_AMD(pIemCpu)) )
2985 {
2986 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
2987 return iemRaiseGeneralProtectionFault0(pIemCpu);
2988 }
2989 }
2990
2991 *pSel = uSel; /* Not RPL, remember :-) */
2992 iemHlpLoadNullDataSelectorProt(pHid, uSel);
2993 if (iSegReg == X86_SREG_SS)
2994 pHid->Attr.u |= pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT;
2995
2996 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2997 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2998
2999 iemRegAddToRip(pIemCpu, cbInstr);
3000 return VINF_SUCCESS;
3001 }
3002
3003 /* Fetch the descriptor. */
3004 IEMSELDESC Desc;
3005 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
3006 if (rcStrict != VINF_SUCCESS)
3007 return rcStrict;
3008
3009 /* Check GPs first. */
3010 if (!Desc.Legacy.Gen.u1DescType)
3011 {
3012 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
3013 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3014 }
3015 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
3016 {
3017 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3018 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3019 {
3020 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
3021 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3022 }
3023 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
3024 {
3025 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
3026 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3027 }
3028 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
3029 {
3030 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3031 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3032 }
3033 }
3034 else
3035 {
3036 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3037 {
3038 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
3039 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3040 }
3041 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3042 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3043 {
3044#if 0 /* this is what intel says. */
3045 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3046 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
3047 {
3048 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
3049 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
3050 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3051 }
3052#else /* this is what makes more sense. */
3053 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
3054 {
3055 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
3056 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
3057 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3058 }
3059 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
3060 {
3061 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
3062 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
3063 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3064 }
3065#endif
3066 }
3067 }
3068
3069 /* Is it there? */
3070 if (!Desc.Legacy.Gen.u1Present)
3071 {
3072 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
3073 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
3074 }
3075
3076 /* The base and limit. */
3077 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3078 uint64_t u64Base;
3079 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
3080 && iSegReg < X86_SREG_FS)
3081 u64Base = 0;
3082 else
3083 u64Base = X86DESC_BASE(&Desc.Legacy);
3084
3085 /*
3086 * Ok, everything checked out fine. Now set the accessed bit before
3087 * committing the result into the registers.
3088 */
3089 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3090 {
3091 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
3092 if (rcStrict != VINF_SUCCESS)
3093 return rcStrict;
3094 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3095 }
3096
3097 /* commit */
3098 *pSel = uSel;
3099 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3100 pHid->u32Limit = cbLimit;
3101 pHid->u64Base = u64Base;
3102 pHid->ValidSel = uSel;
3103 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
3104
3105 /** @todo check if the hidden bits are loaded correctly for 64-bit
3106 * mode. */
3107 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
3108
3109 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
3110 iemRegAddToRip(pIemCpu, cbInstr);
3111 return VINF_SUCCESS;
3112}
3113
3114
3115/**
3116 * Implements 'mov SReg, r/m'.
3117 *
3118 * @param iSegReg The segment register number (valid).
3119 * @param uSel The new selector value.
3120 */
3121IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
3122{
3123 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
3124 if (rcStrict == VINF_SUCCESS)
3125 {
3126 if (iSegReg == X86_SREG_SS)
3127 {
3128 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3129 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3130 }
3131 }
3132 return rcStrict;
3133}
3134
3135
3136/**
3137 * Implements 'pop SReg'.
3138 *
3139 * @param iSegReg The segment register number (valid).
3140 * @param enmEffOpSize The efficient operand size (valid).
3141 */
3142IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
3143{
3144 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3145 VBOXSTRICTRC rcStrict;
3146
3147 /*
3148 * Read the selector off the stack and join paths with mov ss, reg.
3149 */
3150 RTUINT64U TmpRsp;
3151 TmpRsp.u = pCtx->rsp;
3152 switch (enmEffOpSize)
3153 {
3154 case IEMMODE_16BIT:
3155 {
3156 uint16_t uSel;
3157 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
3158 if (rcStrict == VINF_SUCCESS)
3159 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
3160 break;
3161 }
3162
3163 case IEMMODE_32BIT:
3164 {
3165 uint32_t u32Value;
3166 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
3167 if (rcStrict == VINF_SUCCESS)
3168 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
3169 break;
3170 }
3171
3172 case IEMMODE_64BIT:
3173 {
3174 uint64_t u64Value;
3175 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
3176 if (rcStrict == VINF_SUCCESS)
3177 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
3178 break;
3179 }
3180 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3181 }
3182
3183 /*
3184 * Commit the stack on success.
3185 */
3186 if (rcStrict == VINF_SUCCESS)
3187 {
3188 pCtx->rsp = TmpRsp.u;
3189 if (iSegReg == X86_SREG_SS)
3190 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3191 }
3192 return rcStrict;
3193}
3194
3195
3196/**
3197 * Implements lgs, lfs, les, lds & lss.
3198 */
3199IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
3200 uint16_t, uSel,
3201 uint64_t, offSeg,
3202 uint8_t, iSegReg,
3203 uint8_t, iGReg,
3204 IEMMODE, enmEffOpSize)
3205{
3206 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
3207 VBOXSTRICTRC rcStrict;
3208
3209 /*
3210 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
3211 */
3212 /** @todo verify and test that mov, pop and lXs works the segment
3213 * register loading in the exact same way. */
3214 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
3215 if (rcStrict == VINF_SUCCESS)
3216 {
3217 switch (enmEffOpSize)
3218 {
3219 case IEMMODE_16BIT:
3220 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
3221 break;
3222 case IEMMODE_32BIT:
3223 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
3224 break;
3225 case IEMMODE_64BIT:
3226 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
3227 break;
3228 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3229 }
3230 }
3231
3232 return rcStrict;
3233}
3234
3235
3236/**
3237 * Implements lgdt.
3238 *
3239 * @param iEffSeg The segment of the new gdtr contents
3240 * @param GCPtrEffSrc The address of the new gdtr contents.
3241 * @param enmEffOpSize The effective operand size.
3242 */
3243IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
3244{
3245 if (pIemCpu->uCpl != 0)
3246 return iemRaiseGeneralProtectionFault0(pIemCpu);
3247 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3248
3249 /*
3250 * Fetch the limit and base address.
3251 */
3252 uint16_t cbLimit;
3253 RTGCPTR GCPtrBase;
3254 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
3255 if (rcStrict == VINF_SUCCESS)
3256 {
3257 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3258 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
3259 else
3260 {
3261 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3262 pCtx->gdtr.cbGdt = cbLimit;
3263 pCtx->gdtr.pGdt = GCPtrBase;
3264 }
3265 if (rcStrict == VINF_SUCCESS)
3266 iemRegAddToRip(pIemCpu, cbInstr);
3267 }
3268 return rcStrict;
3269}
3270
3271
3272/**
3273 * Implements sgdt.
3274 *
3275 * @param iEffSeg The segment where to store the gdtr content.
3276 * @param GCPtrEffDst The address where to store the gdtr content.
3277 * @param enmEffOpSize The effective operand size.
3278 */
3279IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
3280{
3281 /*
3282 * Join paths with sidt.
3283 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
3284 * you really must know.
3285 */
3286 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3287 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
3288 if (rcStrict == VINF_SUCCESS)
3289 iemRegAddToRip(pIemCpu, cbInstr);
3290 return rcStrict;
3291}
3292
3293
3294/**
3295 * Implements lidt.
3296 *
3297 * @param iEffSeg The segment of the new idtr contents
3298 * @param GCPtrEffSrc The address of the new idtr contents.
3299 * @param enmEffOpSize The effective operand size.
3300 */
3301IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
3302{
3303 if (pIemCpu->uCpl != 0)
3304 return iemRaiseGeneralProtectionFault0(pIemCpu);
3305 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3306
3307 /*
3308 * Fetch the limit and base address.
3309 */
3310 uint16_t cbLimit;
3311 RTGCPTR GCPtrBase;
3312 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
3313 if (rcStrict == VINF_SUCCESS)
3314 {
3315 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3316 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
3317 else
3318 {
3319 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3320 pCtx->idtr.cbIdt = cbLimit;
3321 pCtx->idtr.pIdt = GCPtrBase;
3322 }
3323 iemRegAddToRip(pIemCpu, cbInstr);
3324 }
3325 return rcStrict;
3326}
3327
3328
3329/**
3330 * Implements sidt.
3331 *
3332 * @param iEffSeg The segment where to store the idtr content.
3333 * @param GCPtrEffDst The address where to store the idtr content.
3334 * @param enmEffOpSize The effective operand size.
3335 */
3336IEM_CIMPL_DEF_3(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
3337{
3338 /*
3339 * Join paths with sgdt.
3340 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
3341 * you really must know.
3342 */
3343 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3344 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
3345 if (rcStrict == VINF_SUCCESS)
3346 iemRegAddToRip(pIemCpu, cbInstr);
3347 return rcStrict;
3348}
3349
3350
3351/**
3352 * Implements lldt.
3353 *
3354 * @param uNewLdt The new LDT selector value.
3355 */
3356IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
3357{
3358 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3359
3360 /*
3361 * Check preconditions.
3362 */
3363 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3364 {
3365 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
3366 return iemRaiseUndefinedOpcode(pIemCpu);
3367 }
3368 if (pIemCpu->uCpl != 0)
3369 {
3370 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
3371 return iemRaiseGeneralProtectionFault0(pIemCpu);
3372 }
3373 if (uNewLdt & X86_SEL_LDT)
3374 {
3375 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
3376 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
3377 }
3378
3379 /*
3380 * Now, loading a NULL selector is easy.
3381 */
3382 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3383 {
3384 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
3385 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3386 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
3387 else
3388 pCtx->ldtr.Sel = uNewLdt;
3389 pCtx->ldtr.ValidSel = uNewLdt;
3390 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3391 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
3392 if (!IEM_IS_GUEST_CPU_AMD(pIemCpu) || !IEM_VERIFICATION_ENABLED(pIemCpu)) /* See bs-cpu-hidden-regs-1 on AMD. */
3393 {
3394 pCtx->ldtr.u64Base = 0;
3395 pCtx->ldtr.u32Limit = 0;
3396 }
3397
3398 iemRegAddToRip(pIemCpu, cbInstr);
3399 return VINF_SUCCESS;
3400 }
3401
3402 /*
3403 * Read the descriptor.
3404 */
3405 IEMSELDESC Desc;
3406 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
3407 if (rcStrict != VINF_SUCCESS)
3408 return rcStrict;
3409
3410 /* Check GPs first. */
3411 if (Desc.Legacy.Gen.u1DescType)
3412 {
3413 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
3414 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3415 }
3416 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3417 {
3418 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
3419 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3420 }
3421 uint64_t u64Base;
3422 if (!IEM_IS_LONG_MODE(pIemCpu))
3423 u64Base = X86DESC_BASE(&Desc.Legacy);
3424 else
3425 {
3426 if (Desc.Long.Gen.u5Zeros)
3427 {
3428 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
3429 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3430 }
3431
3432 u64Base = X86DESC64_BASE(&Desc.Long);
3433 if (!IEM_IS_CANONICAL(u64Base))
3434 {
3435 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
3436 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3437 }
3438 }
3439
3440 /* NP */
3441 if (!Desc.Legacy.Gen.u1Present)
3442 {
3443 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
3444 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
3445 }
3446
3447 /*
3448 * It checks out alright, update the registers.
3449 */
3450/** @todo check if the actual value is loaded or if the RPL is dropped */
3451 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3452 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
3453 else
3454 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
3455 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
3456 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3457 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3458 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3459 pCtx->ldtr.u64Base = u64Base;
3460
3461 iemRegAddToRip(pIemCpu, cbInstr);
3462 return VINF_SUCCESS;
3463}
3464
3465
3466/**
3467 * Implements lldt.
3468 *
3469 * @param uNewLdt The new LDT selector value.
3470 */
3471IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
3472{
3473 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3474
3475 /*
3476 * Check preconditions.
3477 */
3478 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3479 {
3480 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
3481 return iemRaiseUndefinedOpcode(pIemCpu);
3482 }
3483 if (pIemCpu->uCpl != 0)
3484 {
3485 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
3486 return iemRaiseGeneralProtectionFault0(pIemCpu);
3487 }
3488 if (uNewTr & X86_SEL_LDT)
3489 {
3490 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
3491 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
3492 }
3493 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
3494 {
3495 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
3496 return iemRaiseGeneralProtectionFault0(pIemCpu);
3497 }
3498
3499 /*
3500 * Read the descriptor.
3501 */
3502 IEMSELDESC Desc;
3503 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
3504 if (rcStrict != VINF_SUCCESS)
3505 return rcStrict;
3506
3507 /* Check GPs first. */
3508 if (Desc.Legacy.Gen.u1DescType)
3509 {
3510 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
3511 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3512 }
3513 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
3514 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3515 || IEM_IS_LONG_MODE(pIemCpu)) )
3516 {
3517 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
3518 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3519 }
3520 uint64_t u64Base;
3521 if (!IEM_IS_LONG_MODE(pIemCpu))
3522 u64Base = X86DESC_BASE(&Desc.Legacy);
3523 else
3524 {
3525 if (Desc.Long.Gen.u5Zeros)
3526 {
3527 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
3528 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3529 }
3530
3531 u64Base = X86DESC64_BASE(&Desc.Long);
3532 if (!IEM_IS_CANONICAL(u64Base))
3533 {
3534 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
3535 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3536 }
3537 }
3538
3539 /* NP */
3540 if (!Desc.Legacy.Gen.u1Present)
3541 {
3542 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
3543 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
3544 }
3545
3546 /*
3547 * Set it busy.
3548 * Note! Intel says this should lock down the whole descriptor, but we'll
3549 * restrict our selves to 32-bit for now due to lack of inline
3550 * assembly and such.
3551 */
3552 void *pvDesc;
3553 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
3554 if (rcStrict != VINF_SUCCESS)
3555 return rcStrict;
3556 switch ((uintptr_t)pvDesc & 3)
3557 {
3558 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
3559 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
3560 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
3561 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
3562 }
3563 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvDesc, IEM_ACCESS_DATA_RW);
3564 if (rcStrict != VINF_SUCCESS)
3565 return rcStrict;
3566 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3567
3568 /*
3569 * It checks out alright, update the registers.
3570 */
3571/** @todo check if the actual value is loaded or if the RPL is dropped */
3572 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3573 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
3574 else
3575 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
3576 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
3577 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3578 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3579 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3580 pCtx->tr.u64Base = u64Base;
3581
3582 iemRegAddToRip(pIemCpu, cbInstr);
3583 return VINF_SUCCESS;
3584}
3585
3586
3587/**
3588 * Implements mov GReg,CRx.
3589 *
3590 * @param iGReg The general register to store the CRx value in.
3591 * @param iCrReg The CRx register to read (valid).
3592 */
3593IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
3594{
3595 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3596 if (pIemCpu->uCpl != 0)
3597 return iemRaiseGeneralProtectionFault0(pIemCpu);
3598 Assert(!pCtx->eflags.Bits.u1VM);
3599
3600 /* read it */
3601 uint64_t crX;
3602 switch (iCrReg)
3603 {
3604 case 0: crX = pCtx->cr0; break;
3605 case 2: crX = pCtx->cr2; break;
3606 case 3: crX = pCtx->cr3; break;
3607 case 4: crX = pCtx->cr4; break;
3608 case 8:
3609 {
3610 uint8_t uTpr;
3611 int rc = PDMApicGetTPR(IEMCPU_TO_VMCPU(pIemCpu), &uTpr, NULL, NULL);
3612 if (RT_SUCCESS(rc))
3613 crX = uTpr >> 4;
3614 else
3615 crX = 0;
3616 break;
3617 }
3618 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3619 }
3620
3621 /* store it */
3622 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3623 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
3624 else
3625 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
3626
3627 iemRegAddToRip(pIemCpu, cbInstr);
3628 return VINF_SUCCESS;
3629}
3630
3631
3632/**
3633 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
3634 *
3635 * @param iCrReg The CRx register to write (valid).
3636 * @param uNewCrX The new value.
3637 */
3638IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
3639{
3640 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3641 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3642 VBOXSTRICTRC rcStrict;
3643 int rc;
3644
3645 /*
3646 * Try store it.
3647 * Unfortunately, CPUM only does a tiny bit of the work.
3648 */
3649 switch (iCrReg)
3650 {
3651 case 0:
3652 {
3653 /*
3654 * Perform checks.
3655 */
3656 uint64_t const uOldCrX = pCtx->cr0;
3657 uNewCrX |= X86_CR0_ET; /* hardcoded */
3658
3659 /* Check for reserved bits. */
3660 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
3661 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
3662 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
3663 if (uNewCrX & ~(uint64_t)fValid)
3664 {
3665 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3666 return iemRaiseGeneralProtectionFault0(pIemCpu);
3667 }
3668
3669 /* Check for invalid combinations. */
3670 if ( (uNewCrX & X86_CR0_PG)
3671 && !(uNewCrX & X86_CR0_PE) )
3672 {
3673 Log(("Trying to set CR0.PG without CR0.PE\n"));
3674 return iemRaiseGeneralProtectionFault0(pIemCpu);
3675 }
3676
3677 if ( !(uNewCrX & X86_CR0_CD)
3678 && (uNewCrX & X86_CR0_NW) )
3679 {
3680 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
3681 return iemRaiseGeneralProtectionFault0(pIemCpu);
3682 }
3683
3684 /* Long mode consistency checks. */
3685 if ( (uNewCrX & X86_CR0_PG)
3686 && !(uOldCrX & X86_CR0_PG)
3687 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3688 {
3689 if (!(pCtx->cr4 & X86_CR4_PAE))
3690 {
3691 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
3692 return iemRaiseGeneralProtectionFault0(pIemCpu);
3693 }
3694 if (pCtx->cs.Attr.n.u1Long)
3695 {
3696 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
3697 return iemRaiseGeneralProtectionFault0(pIemCpu);
3698 }
3699 }
3700
3701 /** @todo check reserved PDPTR bits as AMD states. */
3702
3703 /*
3704 * Change CR0.
3705 */
3706 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3707 CPUMSetGuestCR0(pVCpu, uNewCrX);
3708 else
3709 pCtx->cr0 = uNewCrX;
3710 Assert(pCtx->cr0 == uNewCrX);
3711
3712 /*
3713 * Change EFER.LMA if entering or leaving long mode.
3714 */
3715 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
3716 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3717 {
3718 uint64_t NewEFER = pCtx->msrEFER;
3719 if (uNewCrX & X86_CR0_PG)
3720 NewEFER |= MSR_K6_EFER_LMA;
3721 else
3722 NewEFER &= ~MSR_K6_EFER_LMA;
3723
3724 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3725 CPUMSetGuestEFER(pVCpu, NewEFER);
3726 else
3727 pCtx->msrEFER = NewEFER;
3728 Assert(pCtx->msrEFER == NewEFER);
3729 }
3730
3731 /*
3732 * Inform PGM.
3733 */
3734 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3735 {
3736 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3737 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3738 {
3739 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3740 AssertRCReturn(rc, rc);
3741 /* ignore informational status codes */
3742 }
3743 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3744 }
3745 else
3746 rcStrict = VINF_SUCCESS;
3747
3748#ifdef IN_RC
3749 /* Return to ring-3 for rescheduling if WP or AM changes. */
3750 if ( rcStrict == VINF_SUCCESS
3751 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
3752 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
3753 rcStrict = VINF_EM_RESCHEDULE;
3754#endif
3755 break;
3756 }
3757
3758 /*
3759 * CR2 can be changed without any restrictions.
3760 */
3761 case 2:
3762 pCtx->cr2 = uNewCrX;
3763 rcStrict = VINF_SUCCESS;
3764 break;
3765
3766 /*
3767 * CR3 is relatively simple, although AMD and Intel have different
3768 * accounts of how setting reserved bits are handled. We take intel's
3769 * word for the lower bits and AMD's for the high bits (63:52).
3770 */
3771 /** @todo Testcase: Setting reserved bits in CR3, especially before
3772 * enabling paging. */
3773 case 3:
3774 {
3775 /* check / mask the value. */
3776 if (uNewCrX & UINT64_C(0xfff0000000000000))
3777 {
3778 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3779 return iemRaiseGeneralProtectionFault0(pIemCpu);
3780 }
3781
3782 uint64_t fValid;
3783 if ( (pCtx->cr4 & X86_CR4_PAE)
3784 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3785 fValid = UINT64_C(0x000ffffffffff014);
3786 else if (pCtx->cr4 & X86_CR4_PAE)
3787 fValid = UINT64_C(0xfffffff4);
3788 else
3789 fValid = UINT64_C(0xfffff014);
3790 if (uNewCrX & ~fValid)
3791 {
3792 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3793 uNewCrX, uNewCrX & ~fValid));
3794 uNewCrX &= fValid;
3795 }
3796
3797 /** @todo If we're in PAE mode we should check the PDPTRs for
3798 * invalid bits. */
3799
3800 /* Make the change. */
3801 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3802 {
3803 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3804 AssertRCSuccessReturn(rc, rc);
3805 }
3806 else
3807 pCtx->cr3 = uNewCrX;
3808
3809 /* Inform PGM. */
3810 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3811 {
3812 if (pCtx->cr0 & X86_CR0_PG)
3813 {
3814 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
3815 AssertRCReturn(rc, rc);
3816 /* ignore informational status codes */
3817 }
3818 }
3819 rcStrict = VINF_SUCCESS;
3820 break;
3821 }
3822
3823 /*
3824 * CR4 is a bit more tedious as there are bits which cannot be cleared
3825 * under some circumstances and such.
3826 */
3827 case 4:
3828 {
3829 uint64_t const uOldCrX = pCtx->cr4;
3830
3831 /* reserved bits */
3832 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3833 | X86_CR4_TSD | X86_CR4_DE
3834 | X86_CR4_PSE | X86_CR4_PAE
3835 | X86_CR4_MCE | X86_CR4_PGE
3836 | X86_CR4_PCE | X86_CR4_OSFSXR
3837 | X86_CR4_OSXMMEEXCPT;
3838 //if (xxx)
3839 // fValid |= X86_CR4_VMXE;
3840 //if (xxx)
3841 // fValid |= X86_CR4_OSXSAVE;
3842 if (uNewCrX & ~(uint64_t)fValid)
3843 {
3844 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3845 return iemRaiseGeneralProtectionFault0(pIemCpu);
3846 }
3847
3848 /* long mode checks. */
3849 if ( (uOldCrX & X86_CR4_PAE)
3850 && !(uNewCrX & X86_CR4_PAE)
3851 && CPUMIsGuestInLongModeEx(pCtx) )
3852 {
3853 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3854 return iemRaiseGeneralProtectionFault0(pIemCpu);
3855 }
3856
3857
3858 /*
3859 * Change it.
3860 */
3861 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3862 {
3863 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3864 AssertRCSuccessReturn(rc, rc);
3865 }
3866 else
3867 pCtx->cr4 = uNewCrX;
3868 Assert(pCtx->cr4 == uNewCrX);
3869
3870 /*
3871 * Notify SELM and PGM.
3872 */
3873 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3874 {
3875 /* SELM - VME may change things wrt to the TSS shadowing. */
3876 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3877 {
3878 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
3879 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
3880#ifdef VBOX_WITH_RAW_MODE
3881 if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu)))
3882 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3883#endif
3884 }
3885
3886 /* PGM - flushing and mode. */
3887 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
3888 {
3889 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3890 AssertRCReturn(rc, rc);
3891 /* ignore informational status codes */
3892 }
3893 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3894 }
3895 else
3896 rcStrict = VINF_SUCCESS;
3897 break;
3898 }
3899
3900 /*
3901 * CR8 maps to the APIC TPR.
3902 */
3903 case 8:
3904 if (uNewCrX & ~(uint64_t)0xf)
3905 {
3906 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
3907 return iemRaiseGeneralProtectionFault0(pIemCpu);
3908 }
3909
3910 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3911 PDMApicSetTPR(IEMCPU_TO_VMCPU(pIemCpu), (uint8_t)uNewCrX << 4);
3912 rcStrict = VINF_SUCCESS;
3913 break;
3914
3915 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3916 }
3917
3918 /*
3919 * Advance the RIP on success.
3920 */
3921 if (RT_SUCCESS(rcStrict))
3922 {
3923 if (rcStrict != VINF_SUCCESS)
3924 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3925 iemRegAddToRip(pIemCpu, cbInstr);
3926 }
3927
3928 return rcStrict;
3929}
3930
3931
3932/**
3933 * Implements mov CRx,GReg.
3934 *
3935 * @param iCrReg The CRx register to write (valid).
3936 * @param iGReg The general register to load the DRx value from.
3937 */
3938IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3939{
3940 if (pIemCpu->uCpl != 0)
3941 return iemRaiseGeneralProtectionFault0(pIemCpu);
3942 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3943
3944 /*
3945 * Read the new value from the source register and call common worker.
3946 */
3947 uint64_t uNewCrX;
3948 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3949 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3950 else
3951 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3952 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3953}
3954
3955
3956/**
3957 * Implements 'LMSW r/m16'
3958 *
3959 * @param u16NewMsw The new value.
3960 */
3961IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3962{
3963 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3964
3965 if (pIemCpu->uCpl != 0)
3966 return iemRaiseGeneralProtectionFault0(pIemCpu);
3967 Assert(!pCtx->eflags.Bits.u1VM);
3968
3969 /*
3970 * Compose the new CR0 value and call common worker.
3971 */
3972 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3973 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3974 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3975}
3976
3977
3978/**
3979 * Implements 'CLTS'.
3980 */
3981IEM_CIMPL_DEF_0(iemCImpl_clts)
3982{
3983 if (pIemCpu->uCpl != 0)
3984 return iemRaiseGeneralProtectionFault0(pIemCpu);
3985
3986 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3987 uint64_t uNewCr0 = pCtx->cr0;
3988 uNewCr0 &= ~X86_CR0_TS;
3989 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3990}
3991
3992
3993/**
3994 * Implements mov GReg,DRx.
3995 *
3996 * @param iGReg The general register to store the DRx value in.
3997 * @param iDrReg The DRx register to read (0-7).
3998 */
3999IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
4000{
4001 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4002
4003 /*
4004 * Check preconditions.
4005 */
4006
4007 /* Raise GPs. */
4008 if (pIemCpu->uCpl != 0)
4009 return iemRaiseGeneralProtectionFault0(pIemCpu);
4010 Assert(!pCtx->eflags.Bits.u1VM);
4011
4012 if ( (iDrReg == 4 || iDrReg == 5)
4013 && (pCtx->cr4 & X86_CR4_DE) )
4014 {
4015 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
4016 return iemRaiseGeneralProtectionFault0(pIemCpu);
4017 }
4018
4019 /* Raise #DB if general access detect is enabled. */
4020 if (pCtx->dr[7] & X86_DR7_GD)
4021 {
4022 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
4023 return iemRaiseDebugException(pIemCpu);
4024 }
4025
4026 /*
4027 * Read the debug register and store it in the specified general register.
4028 */
4029 uint64_t drX;
4030 switch (iDrReg)
4031 {
4032 case 0: drX = pCtx->dr[0]; break;
4033 case 1: drX = pCtx->dr[1]; break;
4034 case 2: drX = pCtx->dr[2]; break;
4035 case 3: drX = pCtx->dr[3]; break;
4036 case 6:
4037 case 4:
4038 drX = pCtx->dr[6];
4039 drX |= X86_DR6_RA1_MASK;
4040 drX &= ~X86_DR6_RAZ_MASK;
4041 break;
4042 case 7:
4043 case 5:
4044 drX = pCtx->dr[7];
4045 drX |=X86_DR7_RA1_MASK;
4046 drX &= ~X86_DR7_RAZ_MASK;
4047 break;
4048 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
4049 }
4050
4051 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4052 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
4053 else
4054 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
4055
4056 iemRegAddToRip(pIemCpu, cbInstr);
4057 return VINF_SUCCESS;
4058}
4059
4060
4061/**
4062 * Implements mov DRx,GReg.
4063 *
4064 * @param iDrReg The DRx register to write (valid).
4065 * @param iGReg The general register to load the DRx value from.
4066 */
4067IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
4068{
4069 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4070
4071 /*
4072 * Check preconditions.
4073 */
4074 if (pIemCpu->uCpl != 0)
4075 return iemRaiseGeneralProtectionFault0(pIemCpu);
4076 Assert(!pCtx->eflags.Bits.u1VM);
4077
4078 if (iDrReg == 4 || iDrReg == 5)
4079 {
4080 if (pCtx->cr4 & X86_CR4_DE)
4081 {
4082 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
4083 return iemRaiseGeneralProtectionFault0(pIemCpu);
4084 }
4085 iDrReg += 2;
4086 }
4087
4088 /* Raise #DB if general access detect is enabled. */
4089 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
4090 * \#GP? */
4091 if (pCtx->dr[7] & X86_DR7_GD)
4092 {
4093 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
4094 return iemRaiseDebugException(pIemCpu);
4095 }
4096
4097 /*
4098 * Read the new value from the source register.
4099 */
4100 uint64_t uNewDrX;
4101 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4102 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
4103 else
4104 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
4105
4106 /*
4107 * Adjust it.
4108 */
4109 switch (iDrReg)
4110 {
4111 case 0:
4112 case 1:
4113 case 2:
4114 case 3:
4115 /* nothing to adjust */
4116 break;
4117
4118 case 6:
4119 if (uNewDrX & X86_DR6_MBZ_MASK)
4120 {
4121 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
4122 return iemRaiseGeneralProtectionFault0(pIemCpu);
4123 }
4124 uNewDrX |= X86_DR6_RA1_MASK;
4125 uNewDrX &= ~X86_DR6_RAZ_MASK;
4126 break;
4127
4128 case 7:
4129 if (uNewDrX & X86_DR7_MBZ_MASK)
4130 {
4131 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
4132 return iemRaiseGeneralProtectionFault0(pIemCpu);
4133 }
4134 uNewDrX |= X86_DR7_RA1_MASK;
4135 uNewDrX &= ~X86_DR7_RAZ_MASK;
4136 break;
4137
4138 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4139 }
4140
4141 /*
4142 * Do the actual setting.
4143 */
4144 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4145 {
4146 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
4147 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
4148 }
4149 else
4150 pCtx->dr[iDrReg] = uNewDrX;
4151
4152 iemRegAddToRip(pIemCpu, cbInstr);
4153 return VINF_SUCCESS;
4154}
4155
4156
4157/**
4158 * Implements 'INVLPG m'.
4159 *
4160 * @param GCPtrPage The effective address of the page to invalidate.
4161 * @remarks Updates the RIP.
4162 */
4163IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
4164{
4165 /* ring-0 only. */
4166 if (pIemCpu->uCpl != 0)
4167 return iemRaiseGeneralProtectionFault0(pIemCpu);
4168 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4169
4170 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
4171 iemRegAddToRip(pIemCpu, cbInstr);
4172
4173 if (rc == VINF_SUCCESS)
4174 return VINF_SUCCESS;
4175 if (rc == VINF_PGM_SYNC_CR3)
4176 return iemSetPassUpStatus(pIemCpu, rc);
4177
4178 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
4179 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
4180 return rc;
4181}
4182
4183
4184/**
4185 * Implements RDTSC.
4186 */
4187IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
4188{
4189 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4190
4191 /*
4192 * Check preconditions.
4193 */
4194 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
4195 return iemRaiseUndefinedOpcode(pIemCpu);
4196
4197 if ( (pCtx->cr4 & X86_CR4_TSD)
4198 && pIemCpu->uCpl != 0)
4199 {
4200 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
4201 return iemRaiseGeneralProtectionFault0(pIemCpu);
4202 }
4203
4204 /*
4205 * Do the job.
4206 */
4207 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
4208 pCtx->rax = (uint32_t)uTicks;
4209 pCtx->rdx = uTicks >> 32;
4210#ifdef IEM_VERIFICATION_MODE_FULL
4211 pIemCpu->fIgnoreRaxRdx = true;
4212#endif
4213
4214 iemRegAddToRip(pIemCpu, cbInstr);
4215 return VINF_SUCCESS;
4216}
4217
4218
4219/**
4220 * Implements RDMSR.
4221 */
4222IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
4223{
4224 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4225
4226 /*
4227 * Check preconditions.
4228 */
4229 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
4230 return iemRaiseUndefinedOpcode(pIemCpu);
4231 if (pIemCpu->uCpl != 0)
4232 return iemRaiseGeneralProtectionFault0(pIemCpu);
4233
4234 /*
4235 * Do the job.
4236 */
4237 RTUINT64U uValue;
4238 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
4239 if (rc != VINF_SUCCESS)
4240 {
4241 Log(("IEM: rdmsr(%#x) -> GP(0)\n", pCtx->ecx));
4242 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
4243 return iemRaiseGeneralProtectionFault0(pIemCpu);
4244 }
4245
4246 pCtx->rax = uValue.s.Lo;
4247 pCtx->rdx = uValue.s.Hi;
4248
4249 iemRegAddToRip(pIemCpu, cbInstr);
4250 return VINF_SUCCESS;
4251}
4252
4253
4254/**
4255 * Implements WRMSR.
4256 */
4257IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
4258{
4259 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4260
4261 /*
4262 * Check preconditions.
4263 */
4264 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
4265 return iemRaiseUndefinedOpcode(pIemCpu);
4266 if (pIemCpu->uCpl != 0)
4267 return iemRaiseGeneralProtectionFault0(pIemCpu);
4268
4269 /*
4270 * Do the job.
4271 */
4272 RTUINT64U uValue;
4273 uValue.s.Lo = pCtx->eax;
4274 uValue.s.Hi = pCtx->edx;
4275
4276 int rc;
4277 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4278 rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
4279 else
4280 {
4281 CPUMCTX CtxTmp = *pCtx;
4282 rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
4283 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
4284 *pCtx = *pCtx2;
4285 *pCtx2 = CtxTmp;
4286 }
4287 if (rc != VINF_SUCCESS)
4288 {
4289 Log(("IEM: wrmsr(%#x,%#x`%08x) -> GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
4290 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
4291 return iemRaiseGeneralProtectionFault0(pIemCpu);
4292 }
4293
4294 iemRegAddToRip(pIemCpu, cbInstr);
4295 return VINF_SUCCESS;
4296}
4297
4298
4299/**
4300 * Implements 'IN eAX, port'.
4301 *
4302 * @param u16Port The source port.
4303 * @param cbReg The register size.
4304 */
4305IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
4306{
4307 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4308
4309 /*
4310 * CPL check
4311 */
4312 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
4313 if (rcStrict != VINF_SUCCESS)
4314 return rcStrict;
4315
4316 /*
4317 * Perform the I/O.
4318 */
4319 uint32_t u32Value;
4320 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4321 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);
4322 else
4323 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
4324 if (IOM_SUCCESS(rcStrict))
4325 {
4326 switch (cbReg)
4327 {
4328 case 1: pCtx->al = (uint8_t)u32Value; break;
4329 case 2: pCtx->ax = (uint16_t)u32Value; break;
4330 case 4: pCtx->rax = u32Value; break;
4331 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
4332 }
4333 iemRegAddToRip(pIemCpu, cbInstr);
4334 pIemCpu->cPotentialExits++;
4335 if (rcStrict != VINF_SUCCESS)
4336 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
4337 }
4338
4339 return rcStrict;
4340}
4341
4342
4343/**
4344 * Implements 'IN eAX, DX'.
4345 *
4346 * @param cbReg The register size.
4347 */
4348IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
4349{
4350 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
4351}
4352
4353
4354/**
4355 * Implements 'OUT port, eAX'.
4356 *
4357 * @param u16Port The destination port.
4358 * @param cbReg The register size.
4359 */
4360IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
4361{
4362 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4363
4364 /*
4365 * CPL check
4366 */
4367 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
4368 if (rcStrict != VINF_SUCCESS)
4369 return rcStrict;
4370
4371 /*
4372 * Perform the I/O.
4373 */
4374 uint32_t u32Value;
4375 switch (cbReg)
4376 {
4377 case 1: u32Value = pCtx->al; break;
4378 case 2: u32Value = pCtx->ax; break;
4379 case 4: u32Value = pCtx->eax; break;
4380 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
4381 }
4382 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4383 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);
4384 else
4385 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
4386 if (IOM_SUCCESS(rcStrict))
4387 {
4388 iemRegAddToRip(pIemCpu, cbInstr);
4389 pIemCpu->cPotentialExits++;
4390 if (rcStrict != VINF_SUCCESS)
4391 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
4392 }
4393 return rcStrict;
4394}
4395
4396
4397/**
4398 * Implements 'OUT DX, eAX'.
4399 *
4400 * @param cbReg The register size.
4401 */
4402IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
4403{
4404 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
4405}
4406
4407
4408/**
4409 * Implements 'CLI'.
4410 */
4411IEM_CIMPL_DEF_0(iemCImpl_cli)
4412{
4413 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4414 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4415 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
4416 uint32_t const fEflOld = fEfl;
4417 if (pCtx->cr0 & X86_CR0_PE)
4418 {
4419 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
4420 if (!(fEfl & X86_EFL_VM))
4421 {
4422 if (pIemCpu->uCpl <= uIopl)
4423 fEfl &= ~X86_EFL_IF;
4424 else if ( pIemCpu->uCpl == 3
4425 && (pCtx->cr4 & X86_CR4_PVI) )
4426 fEfl &= ~X86_EFL_VIF;
4427 else
4428 return iemRaiseGeneralProtectionFault0(pIemCpu);
4429 }
4430 /* V8086 */
4431 else if (uIopl == 3)
4432 fEfl &= ~X86_EFL_IF;
4433 else if ( uIopl < 3
4434 && (pCtx->cr4 & X86_CR4_VME) )
4435 fEfl &= ~X86_EFL_VIF;
4436 else
4437 return iemRaiseGeneralProtectionFault0(pIemCpu);
4438 }
4439 /* real mode */
4440 else
4441 fEfl &= ~X86_EFL_IF;
4442
4443 /* Commit. */
4444 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
4445 iemRegAddToRip(pIemCpu, cbInstr);
4446 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
4447 return VINF_SUCCESS;
4448}
4449
4450
4451/**
4452 * Implements 'STI'.
4453 */
4454IEM_CIMPL_DEF_0(iemCImpl_sti)
4455{
4456 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4457 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4458 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
4459 uint32_t const fEflOld = fEfl;
4460
4461 if (pCtx->cr0 & X86_CR0_PE)
4462 {
4463 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
4464 if (!(fEfl & X86_EFL_VM))
4465 {
4466 if (pIemCpu->uCpl <= uIopl)
4467 fEfl |= X86_EFL_IF;
4468 else if ( pIemCpu->uCpl == 3
4469 && (pCtx->cr4 & X86_CR4_PVI)
4470 && !(fEfl & X86_EFL_VIP) )
4471 fEfl |= X86_EFL_VIF;
4472 else
4473 return iemRaiseGeneralProtectionFault0(pIemCpu);
4474 }
4475 /* V8086 */
4476 else if (uIopl == 3)
4477 fEfl |= X86_EFL_IF;
4478 else if ( uIopl < 3
4479 && (pCtx->cr4 & X86_CR4_VME)
4480 && !(fEfl & X86_EFL_VIP) )
4481 fEfl |= X86_EFL_VIF;
4482 else
4483 return iemRaiseGeneralProtectionFault0(pIemCpu);
4484 }
4485 /* real mode */
4486 else
4487 fEfl |= X86_EFL_IF;
4488
4489 /* Commit. */
4490 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
4491 iemRegAddToRip(pIemCpu, cbInstr);
4492 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_VERIFICATION_ENABLED(pIemCpu))
4493 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4494 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
4495 return VINF_SUCCESS;
4496}
4497
4498
4499/**
4500 * Implements 'HLT'.
4501 */
4502IEM_CIMPL_DEF_0(iemCImpl_hlt)
4503{
4504 if (pIemCpu->uCpl != 0)
4505 return iemRaiseGeneralProtectionFault0(pIemCpu);
4506 iemRegAddToRip(pIemCpu, cbInstr);
4507 return VINF_EM_HALT;
4508}
4509
4510
4511/**
4512 * Implements 'MONITOR'.
4513 */
4514IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
4515{
4516 /*
4517 * Permission checks.
4518 */
4519 if (pIemCpu->uCpl != 0)
4520 {
4521 Log2(("monitor: CPL != 0\n"));
4522 return iemRaiseUndefinedOpcode(pIemCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
4523 }
4524 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_FEATURE_ECX_MONITOR))
4525 {
4526 Log2(("monitor: Not in CPUID\n"));
4527 return iemRaiseUndefinedOpcode(pIemCpu);
4528 }
4529
4530 /*
4531 * Gather the operands and validate them.
4532 */
4533 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4534 RTGCPTR GCPtrMem = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
4535 uint32_t uEcx = pCtx->ecx;
4536 uint32_t uEdx = pCtx->edx;
4537/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
4538 * \#GP first. */
4539 if (uEcx != 0)
4540 {
4541 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx));
4542 return iemRaiseGeneralProtectionFault0(pIemCpu);
4543 }
4544
4545 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
4546 if (rcStrict != VINF_SUCCESS)
4547 return rcStrict;
4548
4549 RTGCPHYS GCPhysMem;
4550 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
4551 if (rcStrict != VINF_SUCCESS)
4552 return rcStrict;
4553
4554 /*
4555 * Call EM to prepare the monitor/wait.
4556 */
4557 rcStrict = EMMonitorWaitPrepare(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);
4558 Assert(rcStrict == VINF_SUCCESS);
4559
4560 iemRegAddToRip(pIemCpu, cbInstr);
4561 return rcStrict;
4562}
4563
4564
4565/**
4566 * Implements 'MWAIT'.
4567 */
4568IEM_CIMPL_DEF_0(iemCImpl_mwait)
4569{
4570 /*
4571 * Permission checks.
4572 */
4573 if (pIemCpu->uCpl != 0)
4574 {
4575 Log2(("mwait: CPL != 0\n"));
4576 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
4577 * EFLAGS.VM then.) */
4578 return iemRaiseUndefinedOpcode(pIemCpu);
4579 }
4580 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_FEATURE_ECX_MONITOR))
4581 {
4582 Log2(("mwait: Not in CPUID\n"));
4583 return iemRaiseUndefinedOpcode(pIemCpu);
4584 }
4585
4586 /*
4587 * Gather the operands and validate them.
4588 */
4589 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4590 uint32_t uEax = pCtx->eax;
4591 uint32_t uEcx = pCtx->ecx;
4592 if (uEcx != 0)
4593 {
4594 /* Only supported extension is break on IRQ when IF=0. */
4595 if (uEcx > 1)
4596 {
4597 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
4598 return iemRaiseGeneralProtectionFault0(pIemCpu);
4599 }
4600 uint32_t fMWaitFeatures = 0;
4601 uint32_t uIgnore = 0;
4602 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 5, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
4603 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
4604 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
4605 {
4606 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
4607 return iemRaiseGeneralProtectionFault0(pIemCpu);
4608 }
4609 }
4610
4611 /*
4612 * Call EM to prepare the monitor/wait.
4613 */
4614 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(IEMCPU_TO_VMCPU(pIemCpu), uEax, uEcx);
4615
4616 iemRegAddToRip(pIemCpu, cbInstr);
4617 return rcStrict;
4618}
4619
4620
4621/**
4622 * Implements 'SWAPGS'.
4623 */
4624IEM_CIMPL_DEF_0(iemCImpl_swapgs)
4625{
4626 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
4627
4628 /*
4629 * Permission checks.
4630 */
4631 if (pIemCpu->uCpl != 0)
4632 {
4633 Log2(("swapgs: CPL != 0\n"));
4634 return iemRaiseUndefinedOpcode(pIemCpu);
4635 }
4636
4637 /*
4638 * Do the job.
4639 */
4640 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4641 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE;
4642 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base;
4643 pCtx->gs.u64Base = uOtherGsBase;
4644
4645 iemRegAddToRip(pIemCpu, cbInstr);
4646 return VINF_SUCCESS;
4647}
4648
4649
4650/**
4651 * Implements 'CPUID'.
4652 */
4653IEM_CIMPL_DEF_0(iemCImpl_cpuid)
4654{
4655 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4656
4657 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
4658 pCtx->rax &= UINT32_C(0xffffffff);
4659 pCtx->rbx &= UINT32_C(0xffffffff);
4660 pCtx->rcx &= UINT32_C(0xffffffff);
4661 pCtx->rdx &= UINT32_C(0xffffffff);
4662
4663 iemRegAddToRip(pIemCpu, cbInstr);
4664 return VINF_SUCCESS;
4665}
4666
4667
4668/**
4669 * Implements 'AAD'.
4670 *
4671 * @param enmEffOpSize The effective operand size.
4672 */
4673IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
4674{
4675 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4676
4677 uint16_t const ax = pCtx->ax;
4678 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
4679 pCtx->ax = al;
4680 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
4681 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
4682 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
4683
4684 iemRegAddToRip(pIemCpu, cbInstr);
4685 return VINF_SUCCESS;
4686}
4687
4688
4689/**
4690 * Implements 'AAM'.
4691 *
4692 * @param bImm The immediate operand. Cannot be 0.
4693 */
4694IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
4695{
4696 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4697 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
4698
4699 uint16_t const ax = pCtx->ax;
4700 uint8_t const al = (uint8_t)ax % bImm;
4701 uint8_t const ah = (uint8_t)ax / bImm;
4702 pCtx->ax = (ah << 8) + al;
4703 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
4704 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
4705 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
4706
4707 iemRegAddToRip(pIemCpu, cbInstr);
4708 return VINF_SUCCESS;
4709}
4710
4711
4712
4713
4714/*
4715 * Instantiate the various string operation combinations.
4716 */
4717#define OP_SIZE 8
4718#define ADDR_SIZE 16
4719#include "IEMAllCImplStrInstr.cpp.h"
4720#define OP_SIZE 8
4721#define ADDR_SIZE 32
4722#include "IEMAllCImplStrInstr.cpp.h"
4723#define OP_SIZE 8
4724#define ADDR_SIZE 64
4725#include "IEMAllCImplStrInstr.cpp.h"
4726
4727#define OP_SIZE 16
4728#define ADDR_SIZE 16
4729#include "IEMAllCImplStrInstr.cpp.h"
4730#define OP_SIZE 16
4731#define ADDR_SIZE 32
4732#include "IEMAllCImplStrInstr.cpp.h"
4733#define OP_SIZE 16
4734#define ADDR_SIZE 64
4735#include "IEMAllCImplStrInstr.cpp.h"
4736
4737#define OP_SIZE 32
4738#define ADDR_SIZE 16
4739#include "IEMAllCImplStrInstr.cpp.h"
4740#define OP_SIZE 32
4741#define ADDR_SIZE 32
4742#include "IEMAllCImplStrInstr.cpp.h"
4743#define OP_SIZE 32
4744#define ADDR_SIZE 64
4745#include "IEMAllCImplStrInstr.cpp.h"
4746
4747#define OP_SIZE 64
4748#define ADDR_SIZE 32
4749#include "IEMAllCImplStrInstr.cpp.h"
4750#define OP_SIZE 64
4751#define ADDR_SIZE 64
4752#include "IEMAllCImplStrInstr.cpp.h"
4753
4754
4755/**
4756 * Implements 'FINIT' and 'FNINIT'.
4757 *
4758 * @param fCheckXcpts Whether to check for umasked pending exceptions or
4759 * not.
4760 */
4761IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
4762{
4763 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4764
4765 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4766 return iemRaiseDeviceNotAvailable(pIemCpu);
4767
4768 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
4769 if (fCheckXcpts && TODO )
4770 return iemRaiseMathFault(pIemCpu);
4771 */
4772
4773 if (iemFRegIsFxSaveFormat(pIemCpu))
4774 {
4775 pCtx->fpu.FCW = 0x37f;
4776 pCtx->fpu.FSW = 0;
4777 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
4778 pCtx->fpu.FPUDP = 0;
4779 pCtx->fpu.DS = 0; //??
4780 pCtx->fpu.Rsrvd2= 0;
4781 pCtx->fpu.FPUIP = 0;
4782 pCtx->fpu.CS = 0; //??
4783 pCtx->fpu.Rsrvd1= 0;
4784 pCtx->fpu.FOP = 0;
4785 }
4786 else
4787 {
4788 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
4789 pFpu->FCW = 0x37f;
4790 pFpu->FSW = 0;
4791 pFpu->FTW = 0xffff; /* 11 - empty */
4792 pFpu->FPUOO = 0; //??
4793 pFpu->FPUOS = 0; //??
4794 pFpu->FPUIP = 0;
4795 pFpu->CS = 0; //??
4796 pFpu->FOP = 0;
4797 }
4798
4799 iemHlpUsedFpu(pIemCpu);
4800 iemRegAddToRip(pIemCpu, cbInstr);
4801 return VINF_SUCCESS;
4802}
4803
4804
4805/**
4806 * Implements 'FXSAVE'.
4807 *
4808 * @param iEffSeg The effective segment.
4809 * @param GCPtrEff The address of the image.
4810 * @param enmEffOpSize The operand size (only REX.W really matters).
4811 */
4812IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4813{
4814 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4815
4816 /*
4817 * Raise exceptions.
4818 */
4819 if (pCtx->cr0 & X86_CR0_EM)
4820 return iemRaiseUndefinedOpcode(pIemCpu);
4821 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4822 return iemRaiseDeviceNotAvailable(pIemCpu);
4823 if (GCPtrEff & 15)
4824 {
4825 /** @todo CPU/VM detection possible! \#AC might not be signal for
4826 * all/any misalignment sizes, intel says its an implementation detail. */
4827 if ( (pCtx->cr0 & X86_CR0_AM)
4828 && pCtx->eflags.Bits.u1AC
4829 && pIemCpu->uCpl == 3)
4830 return iemRaiseAlignmentCheckException(pIemCpu);
4831 return iemRaiseGeneralProtectionFault0(pIemCpu);
4832 }
4833 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4834
4835 /*
4836 * Access the memory.
4837 */
4838 void *pvMem512;
4839 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4840 if (rcStrict != VINF_SUCCESS)
4841 return rcStrict;
4842 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
4843
4844 /*
4845 * Store the registers.
4846 */
4847 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4848 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
4849
4850 /* common for all formats */
4851 pDst->FCW = pCtx->fpu.FCW;
4852 pDst->FSW = pCtx->fpu.FSW;
4853 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
4854 pDst->FOP = pCtx->fpu.FOP;
4855 pDst->MXCSR = pCtx->fpu.MXCSR;
4856 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
4857 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
4858 {
4859 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
4860 * them for now... */
4861 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4862 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4863 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
4864 pDst->aRegs[i].au32[3] = 0;
4865 }
4866
4867 /* FPU IP, CS, DP and DS. */
4868 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
4869 * state information. :-/
4870 * Storing zeros now to prevent any potential leakage of host info. */
4871 pDst->FPUIP = 0;
4872 pDst->CS = 0;
4873 pDst->Rsrvd1 = 0;
4874 pDst->FPUDP = 0;
4875 pDst->DS = 0;
4876 pDst->Rsrvd2 = 0;
4877
4878 /* XMM registers. */
4879 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4880 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4881 || pIemCpu->uCpl != 0)
4882 {
4883 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4884 for (uint32_t i = 0; i < cXmmRegs; i++)
4885 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
4886 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
4887 * right? */
4888 }
4889
4890 /*
4891 * Commit the memory.
4892 */
4893 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4894 if (rcStrict != VINF_SUCCESS)
4895 return rcStrict;
4896
4897 iemRegAddToRip(pIemCpu, cbInstr);
4898 return VINF_SUCCESS;
4899}
4900
4901
4902/**
4903 * Implements 'FXRSTOR'.
4904 *
4905 * @param GCPtrEff The address of the image.
4906 * @param enmEffOpSize The operand size (only REX.W really matters).
4907 */
4908IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4909{
4910 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4911
4912 /*
4913 * Raise exceptions.
4914 */
4915 if (pCtx->cr0 & X86_CR0_EM)
4916 return iemRaiseUndefinedOpcode(pIemCpu);
4917 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4918 return iemRaiseDeviceNotAvailable(pIemCpu);
4919 if (GCPtrEff & 15)
4920 {
4921 /** @todo CPU/VM detection possible! \#AC might not be signal for
4922 * all/any misalignment sizes, intel says its an implementation detail. */
4923 if ( (pCtx->cr0 & X86_CR0_AM)
4924 && pCtx->eflags.Bits.u1AC
4925 && pIemCpu->uCpl == 3)
4926 return iemRaiseAlignmentCheckException(pIemCpu);
4927 return iemRaiseGeneralProtectionFault0(pIemCpu);
4928 }
4929 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4930
4931 /*
4932 * Access the memory.
4933 */
4934 void *pvMem512;
4935 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
4936 if (rcStrict != VINF_SUCCESS)
4937 return rcStrict;
4938 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
4939
4940 /*
4941 * Check the state for stuff which will GP(0).
4942 */
4943 uint32_t const fMXCSR = pSrc->MXCSR;
4944 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
4945 if (fMXCSR & ~fMXCSR_MASK)
4946 {
4947 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
4948 return iemRaiseGeneralProtectionFault0(pIemCpu);
4949 }
4950
4951 /*
4952 * Load the registers.
4953 */
4954 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4955 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4956
4957 /* common for all formats */
4958 pCtx->fpu.FCW = pSrc->FCW;
4959 pCtx->fpu.FSW = pSrc->FSW;
4960 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4961 pCtx->fpu.FOP = pSrc->FOP;
4962 pCtx->fpu.MXCSR = fMXCSR;
4963 /* (MXCSR_MASK is read-only) */
4964 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4965 {
4966 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4967 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4968 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4969 pCtx->fpu.aRegs[i].au32[3] = 0;
4970 }
4971
4972 /* FPU IP, CS, DP and DS. */
4973 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4974 {
4975 pCtx->fpu.FPUIP = pSrc->FPUIP;
4976 pCtx->fpu.CS = pSrc->CS;
4977 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4978 pCtx->fpu.FPUDP = pSrc->FPUDP;
4979 pCtx->fpu.DS = pSrc->DS;
4980 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4981 }
4982 else
4983 {
4984 pCtx->fpu.FPUIP = pSrc->FPUIP;
4985 pCtx->fpu.CS = pSrc->CS;
4986 pCtx->fpu.Rsrvd1 = 0;
4987 pCtx->fpu.FPUDP = pSrc->FPUDP;
4988 pCtx->fpu.DS = pSrc->DS;
4989 pCtx->fpu.Rsrvd2 = 0;
4990 }
4991
4992 /* XMM registers. */
4993 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4994 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4995 || pIemCpu->uCpl != 0)
4996 {
4997 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4998 for (uint32_t i = 0; i < cXmmRegs; i++)
4999 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
5000 }
5001
5002 /*
5003 * Commit the memory.
5004 */
5005 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
5006 if (rcStrict != VINF_SUCCESS)
5007 return rcStrict;
5008
5009 iemHlpUsedFpu(pIemCpu);
5010 iemRegAddToRip(pIemCpu, cbInstr);
5011 return VINF_SUCCESS;
5012}
5013
5014
5015/**
5016 * Commmon routine for fnstenv and fnsave.
5017 *
5018 * @param uPtr Where to store the state.
5019 * @param pCtx The CPU context.
5020 */
5021static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
5022{
5023 if (enmEffOpSize == IEMMODE_16BIT)
5024 {
5025 uPtr.pu16[0] = pCtx->fpu.FCW;
5026 uPtr.pu16[1] = pCtx->fpu.FSW;
5027 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
5028 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5029 {
5030 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
5031 * protected mode or long mode and we save it in real mode? And vice
5032 * versa? And with 32-bit operand size? I think CPU is storing the
5033 * effective address ((CS << 4) + IP) in the offset register and not
5034 * doing any address calculations here. */
5035 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
5036 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
5037 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
5038 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
5039 }
5040 else
5041 {
5042 uPtr.pu16[3] = pCtx->fpu.FPUIP;
5043 uPtr.pu16[4] = pCtx->fpu.CS;
5044 uPtr.pu16[5] = pCtx->fpu.FPUDP;
5045 uPtr.pu16[6] = pCtx->fpu.DS;
5046 }
5047 }
5048 else
5049 {
5050 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
5051 uPtr.pu16[0*2] = pCtx->fpu.FCW;
5052 uPtr.pu16[1*2] = pCtx->fpu.FSW;
5053 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
5054 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5055 {
5056 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
5057 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
5058 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
5059 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
5060 }
5061 else
5062 {
5063 uPtr.pu32[3] = pCtx->fpu.FPUIP;
5064 uPtr.pu16[4*2] = pCtx->fpu.CS;
5065 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
5066 uPtr.pu32[5] = pCtx->fpu.FPUDP;
5067 uPtr.pu16[6*2] = pCtx->fpu.DS;
5068 }
5069 }
5070}
5071
5072
5073/**
5074 * Commmon routine for fldenv and frstor
5075 *
5076 * @param uPtr Where to store the state.
5077 * @param pCtx The CPU context.
5078 */
5079static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
5080{
5081 if (enmEffOpSize == IEMMODE_16BIT)
5082 {
5083 pCtx->fpu.FCW = uPtr.pu16[0];
5084 pCtx->fpu.FSW = uPtr.pu16[1];
5085 pCtx->fpu.FTW = uPtr.pu16[2];
5086 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5087 {
5088 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
5089 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
5090 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
5091 pCtx->fpu.CS = 0;
5092 pCtx->fpu.Rsrvd1= 0;
5093 pCtx->fpu.DS = 0;
5094 pCtx->fpu.Rsrvd2= 0;
5095 }
5096 else
5097 {
5098 pCtx->fpu.FPUIP = uPtr.pu16[3];
5099 pCtx->fpu.CS = uPtr.pu16[4];
5100 pCtx->fpu.Rsrvd1= 0;
5101 pCtx->fpu.FPUDP = uPtr.pu16[5];
5102 pCtx->fpu.DS = uPtr.pu16[6];
5103 pCtx->fpu.Rsrvd2= 0;
5104 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
5105 }
5106 }
5107 else
5108 {
5109 pCtx->fpu.FCW = uPtr.pu16[0*2];
5110 pCtx->fpu.FSW = uPtr.pu16[1*2];
5111 pCtx->fpu.FTW = uPtr.pu16[2*2];
5112 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5113 {
5114 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
5115 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
5116 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
5117 pCtx->fpu.CS = 0;
5118 pCtx->fpu.Rsrvd1= 0;
5119 pCtx->fpu.DS = 0;
5120 pCtx->fpu.Rsrvd2= 0;
5121 }
5122 else
5123 {
5124 pCtx->fpu.FPUIP = uPtr.pu32[3];
5125 pCtx->fpu.CS = uPtr.pu16[4*2];
5126 pCtx->fpu.Rsrvd1= 0;
5127 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
5128 pCtx->fpu.FPUDP = uPtr.pu32[5];
5129 pCtx->fpu.DS = uPtr.pu16[6*2];
5130 pCtx->fpu.Rsrvd2= 0;
5131 }
5132 }
5133
5134 /* Make adjustments. */
5135 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
5136 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
5137 iemFpuRecalcExceptionStatus(pCtx);
5138 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
5139 * exceptions are pending after loading the saved state? */
5140}
5141
5142
5143/**
5144 * Implements 'FNSTENV'.
5145 *
5146 * @param enmEffOpSize The operand size (only REX.W really matters).
5147 * @param iEffSeg The effective segment register for @a GCPtrEff.
5148 * @param GCPtrEffDst The address of the image.
5149 */
5150IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5151{
5152 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5153 RTPTRUNION uPtr;
5154 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
5155 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
5156 if (rcStrict != VINF_SUCCESS)
5157 return rcStrict;
5158
5159 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
5160
5161 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
5162 if (rcStrict != VINF_SUCCESS)
5163 return rcStrict;
5164
5165 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
5166 iemRegAddToRip(pIemCpu, cbInstr);
5167 return VINF_SUCCESS;
5168}
5169
5170
5171/**
5172 * Implements 'FNSAVE'.
5173 *
5174 * @param GCPtrEffDst The address of the image.
5175 * @param enmEffOpSize The operand size.
5176 */
5177IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5178{
5179 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5180 RTPTRUNION uPtr;
5181 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
5182 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
5183 if (rcStrict != VINF_SUCCESS)
5184 return rcStrict;
5185
5186 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
5187 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
5188 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
5189 {
5190 paRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
5191 paRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
5192 paRegs[i].au16[4] = pCtx->fpu.aRegs[i].au16[4];
5193 }
5194
5195 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
5196 if (rcStrict != VINF_SUCCESS)
5197 return rcStrict;
5198
5199 /*
5200 * Re-initialize the FPU.
5201 */
5202 pCtx->fpu.FCW = 0x37f;
5203 pCtx->fpu.FSW = 0;
5204 pCtx->fpu.FTW = 0x00; /* 0 - empty */
5205 pCtx->fpu.FPUDP = 0;
5206 pCtx->fpu.DS = 0;
5207 pCtx->fpu.Rsrvd2= 0;
5208 pCtx->fpu.FPUIP = 0;
5209 pCtx->fpu.CS = 0;
5210 pCtx->fpu.Rsrvd1= 0;
5211 pCtx->fpu.FOP = 0;
5212
5213 iemHlpUsedFpu(pIemCpu);
5214 iemRegAddToRip(pIemCpu, cbInstr);
5215 return VINF_SUCCESS;
5216}
5217
5218
5219
5220/**
5221 * Implements 'FLDENV'.
5222 *
5223 * @param enmEffOpSize The operand size (only REX.W really matters).
5224 * @param iEffSeg The effective segment register for @a GCPtrEff.
5225 * @param GCPtrEffSrc The address of the image.
5226 */
5227IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
5228{
5229 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5230 RTCPTRUNION uPtr;
5231 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
5232 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
5233 if (rcStrict != VINF_SUCCESS)
5234 return rcStrict;
5235
5236 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
5237
5238 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
5239 if (rcStrict != VINF_SUCCESS)
5240 return rcStrict;
5241
5242 iemHlpUsedFpu(pIemCpu);
5243 iemRegAddToRip(pIemCpu, cbInstr);
5244 return VINF_SUCCESS;
5245}
5246
5247
5248/**
5249 * Implements 'FRSTOR'.
5250 *
5251 * @param GCPtrEffSrc The address of the image.
5252 * @param enmEffOpSize The operand size.
5253 */
5254IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
5255{
5256 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5257 RTCPTRUNION uPtr;
5258 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
5259 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
5260 if (rcStrict != VINF_SUCCESS)
5261 return rcStrict;
5262
5263 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
5264 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
5265 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
5266 {
5267 pCtx->fpu.aRegs[i].au32[0] = paRegs[i].au32[0];
5268 pCtx->fpu.aRegs[i].au32[1] = paRegs[i].au32[1];
5269 pCtx->fpu.aRegs[i].au32[2] = paRegs[i].au16[4];
5270 pCtx->fpu.aRegs[i].au32[3] = 0;
5271 }
5272
5273 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
5274 if (rcStrict != VINF_SUCCESS)
5275 return rcStrict;
5276
5277 iemHlpUsedFpu(pIemCpu);
5278 iemRegAddToRip(pIemCpu, cbInstr);
5279 return VINF_SUCCESS;
5280}
5281
5282
5283/**
5284 * Implements 'FLDCW'.
5285 *
5286 * @param u16Fcw The new FCW.
5287 */
5288IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
5289{
5290 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5291
5292 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
5293 /** @todo Testcase: Try see what happens when trying to set undefined bits
5294 * (other than 6 and 7). Currently ignoring them. */
5295 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
5296 * according to FSW. (This is was is currently implemented.) */
5297 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
5298 iemFpuRecalcExceptionStatus(pCtx);
5299
5300 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
5301 iemHlpUsedFpu(pIemCpu);
5302 iemRegAddToRip(pIemCpu, cbInstr);
5303 return VINF_SUCCESS;
5304}
5305
5306
5307
5308/**
5309 * Implements the underflow case of fxch.
5310 *
5311 * @param iStReg The other stack register.
5312 */
5313IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
5314{
5315 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5316
5317 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
5318 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
5319 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
5320
5321 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
5322 * registers are read as QNaN and then exchanged. This could be
5323 * wrong... */
5324 if (pCtx->fpu.FCW & X86_FCW_IM)
5325 {
5326 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
5327 {
5328 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
5329 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
5330 else
5331 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
5332 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
5333 }
5334 else
5335 {
5336 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
5337 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
5338 }
5339 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5340 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5341 }
5342 else
5343 {
5344 /* raise underflow exception, don't change anything. */
5345 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
5346 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5347 }
5348
5349 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5350 iemHlpUsedFpu(pIemCpu);
5351 iemRegAddToRip(pIemCpu, cbInstr);
5352 return VINF_SUCCESS;
5353}
5354
5355
5356/**
5357 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
5358 *
5359 * @param cToAdd 1 or 7.
5360 */
5361IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
5362{
5363 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5364 Assert(iStReg < 8);
5365
5366 /*
5367 * Raise exceptions.
5368 */
5369 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
5370 return iemRaiseDeviceNotAvailable(pIemCpu);
5371 uint16_t u16Fsw = pCtx->fpu.FSW;
5372 if (u16Fsw & X86_FSW_ES)
5373 return iemRaiseMathFault(pIemCpu);
5374
5375 /*
5376 * Check if any of the register accesses causes #SF + #IA.
5377 */
5378 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
5379 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
5380 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
5381 {
5382 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
5383 pCtx->fpu.FSW &= ~X86_FSW_C1;
5384 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
5385 if ( !(u16Fsw & X86_FSW_IE)
5386 || (pCtx->fpu.FCW & X86_FCW_IM) )
5387 {
5388 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
5389 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
5390 }
5391 }
5392 else if (pCtx->fpu.FCW & X86_FCW_IM)
5393 {
5394 /* Masked underflow. */
5395 pCtx->fpu.FSW &= ~X86_FSW_C1;
5396 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5397 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
5398 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
5399 }
5400 else
5401 {
5402 /* Raise underflow - don't touch EFLAGS or TOP. */
5403 pCtx->fpu.FSW &= ~X86_FSW_C1;
5404 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5405 fPop = false;
5406 }
5407
5408 /*
5409 * Pop if necessary.
5410 */
5411 if (fPop)
5412 {
5413 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
5414 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
5415 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
5416 }
5417
5418 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5419 iemHlpUsedFpu(pIemCpu);
5420 iemRegAddToRip(pIemCpu, cbInstr);
5421 return VINF_SUCCESS;
5422}
5423
5424/** @} */
5425
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette