VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 46385

Last change on this file since 46385 was 46384, checked in by vboxsync, 12 years ago

IEM: Long mode debugging.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 157.8 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 46384 2013-06-04 14:07:49Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 X86EFLAGS Efl;
38 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
39 if ( (pCtx->cr0 & X86_CR0_PE)
40 && ( pIemCpu->uCpl > Efl.Bits.u2IOPL
41 || Efl.Bits.u1VM) )
42 {
43 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
44 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n"));
45 }
46 return VINF_SUCCESS;
47}
48
49
50#if 0
51/**
52 * Calculates the parity bit.
53 *
54 * @returns true if the bit is set, false if not.
55 * @param u8Result The least significant byte of the result.
56 */
57static bool iemHlpCalcParityFlag(uint8_t u8Result)
58{
59 /*
60 * Parity is set if the number of bits in the least significant byte of
61 * the result is even.
62 */
63 uint8_t cBits;
64 cBits = u8Result & 1; /* 0 */
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1;
71 u8Result >>= 1;
72 cBits += u8Result & 1; /* 4 */
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 u8Result >>= 1;
78 cBits += u8Result & 1;
79 return !(cBits & 1);
80}
81#endif /* not used */
82
83
84/**
85 * Updates the specified flags according to a 8-bit result.
86 *
87 * @param pIemCpu The IEM state of the calling EMT.
88 * @param u8Result The result to set the flags according to.
89 * @param fToUpdate The flags to update.
90 * @param fUndefined The flags that are specified as undefined.
91 */
92static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
93{
94 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
95
96 uint32_t fEFlags = pCtx->eflags.u;
97 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
98 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
99 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
100}
101
102
103/**
104 * Loads a NULL data selector into a selector register, both the hidden and
105 * visible parts, in protected mode.
106 *
107 * @param pSReg Pointer to the segment register.
108 * @param uRpl The RPL.
109 */
110static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg, RTSEL uRpl)
111{
112 /** @todo Testcase: write a testcase checking what happends when loading a NULL
113 * data selector in protected mode. */
114 pSReg->Sel = uRpl;
115 pSReg->ValidSel = uRpl;
116 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
117 pSReg->u64Base = 0;
118 pSReg->u32Limit = 0;
119 pSReg->Attr.u = 0;
120}
121
122
123/**
124 * Helper used by iret.
125 *
126 * @param uCpl The new CPL.
127 * @param pSReg Pointer to the segment register.
128 */
129static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
130{
131#ifdef VBOX_WITH_RAW_MODE_NOT_R0
132 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
133 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
134#else
135 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
136#endif
137
138 if ( uCpl > pSReg->Attr.n.u2Dpl
139 && pSReg->Attr.n.u1DescType /* code or data, not system */
140 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
141 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
142 iemHlpLoadNullDataSelectorProt(pSReg, 0);
143}
144
145
146/**
147 * Indicates that we have modified the FPU state.
148 *
149 * @param pIemCpu The IEM state of the calling EMT.
150 */
151DECLINLINE(void) iemHlpUsedFpu(PIEMCPU pIemCpu)
152{
153 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
154}
155
156/** @} */
157
158/** @name C Implementations
159 * @{
160 */
161
162/**
163 * Implements a 16-bit popa.
164 */
165IEM_CIMPL_DEF_0(iemCImpl_popa_16)
166{
167 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
168 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
169 RTGCPTR GCPtrLast = GCPtrStart + 15;
170 VBOXSTRICTRC rcStrict;
171
172 /*
173 * The docs are a bit hard to comprehend here, but it looks like we wrap
174 * around in real mode as long as none of the individual "popa" crosses the
175 * end of the stack segment. In protected mode we check the whole access
176 * in one go. For efficiency, only do the word-by-word thing if we're in
177 * danger of wrapping around.
178 */
179 /** @todo do popa boundary / wrap-around checks. */
180 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
181 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
182 {
183 /* word-by-word */
184 RTUINT64U TmpRsp;
185 TmpRsp.u = pCtx->rsp;
186 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
187 if (rcStrict == VINF_SUCCESS)
188 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
189 if (rcStrict == VINF_SUCCESS)
190 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
191 if (rcStrict == VINF_SUCCESS)
192 {
193 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
194 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
195 }
196 if (rcStrict == VINF_SUCCESS)
197 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
198 if (rcStrict == VINF_SUCCESS)
199 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
200 if (rcStrict == VINF_SUCCESS)
201 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
202 if (rcStrict == VINF_SUCCESS)
203 {
204 pCtx->rsp = TmpRsp.u;
205 iemRegAddToRip(pIemCpu, cbInstr);
206 }
207 }
208 else
209 {
210 uint16_t const *pa16Mem = NULL;
211 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
212 if (rcStrict == VINF_SUCCESS)
213 {
214 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
215 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
216 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
217 /* skip sp */
218 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
219 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
220 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
221 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
222 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
223 if (rcStrict == VINF_SUCCESS)
224 {
225 iemRegAddToRsp(pCtx, 16);
226 iemRegAddToRip(pIemCpu, cbInstr);
227 }
228 }
229 }
230 return rcStrict;
231}
232
233
234/**
235 * Implements a 32-bit popa.
236 */
237IEM_CIMPL_DEF_0(iemCImpl_popa_32)
238{
239 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
240 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
241 RTGCPTR GCPtrLast = GCPtrStart + 31;
242 VBOXSTRICTRC rcStrict;
243
244 /*
245 * The docs are a bit hard to comprehend here, but it looks like we wrap
246 * around in real mode as long as none of the individual "popa" crosses the
247 * end of the stack segment. In protected mode we check the whole access
248 * in one go. For efficiency, only do the word-by-word thing if we're in
249 * danger of wrapping around.
250 */
251 /** @todo do popa boundary / wrap-around checks. */
252 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
253 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
254 {
255 /* word-by-word */
256 RTUINT64U TmpRsp;
257 TmpRsp.u = pCtx->rsp;
258 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
261 if (rcStrict == VINF_SUCCESS)
262 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
263 if (rcStrict == VINF_SUCCESS)
264 {
265 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
266 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
267 }
268 if (rcStrict == VINF_SUCCESS)
269 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
270 if (rcStrict == VINF_SUCCESS)
271 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
272 if (rcStrict == VINF_SUCCESS)
273 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
274 if (rcStrict == VINF_SUCCESS)
275 {
276#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
277 pCtx->rdi &= UINT32_MAX;
278 pCtx->rsi &= UINT32_MAX;
279 pCtx->rbp &= UINT32_MAX;
280 pCtx->rbx &= UINT32_MAX;
281 pCtx->rdx &= UINT32_MAX;
282 pCtx->rcx &= UINT32_MAX;
283 pCtx->rax &= UINT32_MAX;
284#endif
285 pCtx->rsp = TmpRsp.u;
286 iemRegAddToRip(pIemCpu, cbInstr);
287 }
288 }
289 else
290 {
291 uint32_t const *pa32Mem;
292 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
293 if (rcStrict == VINF_SUCCESS)
294 {
295 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
296 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
297 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
298 /* skip esp */
299 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
300 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
301 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
302 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
303 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
304 if (rcStrict == VINF_SUCCESS)
305 {
306 iemRegAddToRsp(pCtx, 32);
307 iemRegAddToRip(pIemCpu, cbInstr);
308 }
309 }
310 }
311 return rcStrict;
312}
313
314
315/**
316 * Implements a 16-bit pusha.
317 */
318IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
319{
320 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
321 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
322 RTGCPTR GCPtrBottom = GCPtrTop - 15;
323 VBOXSTRICTRC rcStrict;
324
325 /*
326 * The docs are a bit hard to comprehend here, but it looks like we wrap
327 * around in real mode as long as none of the individual "pushd" crosses the
328 * end of the stack segment. In protected mode we check the whole access
329 * in one go. For efficiency, only do the word-by-word thing if we're in
330 * danger of wrapping around.
331 */
332 /** @todo do pusha boundary / wrap-around checks. */
333 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
334 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
335 {
336 /* word-by-word */
337 RTUINT64U TmpRsp;
338 TmpRsp.u = pCtx->rsp;
339 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
342 if (rcStrict == VINF_SUCCESS)
343 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
344 if (rcStrict == VINF_SUCCESS)
345 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
346 if (rcStrict == VINF_SUCCESS)
347 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
348 if (rcStrict == VINF_SUCCESS)
349 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
350 if (rcStrict == VINF_SUCCESS)
351 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
352 if (rcStrict == VINF_SUCCESS)
353 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
354 if (rcStrict == VINF_SUCCESS)
355 {
356 pCtx->rsp = TmpRsp.u;
357 iemRegAddToRip(pIemCpu, cbInstr);
358 }
359 }
360 else
361 {
362 GCPtrBottom--;
363 uint16_t *pa16Mem = NULL;
364 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
365 if (rcStrict == VINF_SUCCESS)
366 {
367 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
368 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
369 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
370 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
371 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
372 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
373 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
374 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
375 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
376 if (rcStrict == VINF_SUCCESS)
377 {
378 iemRegSubFromRsp(pCtx, 16);
379 iemRegAddToRip(pIemCpu, cbInstr);
380 }
381 }
382 }
383 return rcStrict;
384}
385
386
387/**
388 * Implements a 32-bit pusha.
389 */
390IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
391{
392 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
393 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
394 RTGCPTR GCPtrBottom = GCPtrTop - 31;
395 VBOXSTRICTRC rcStrict;
396
397 /*
398 * The docs are a bit hard to comprehend here, but it looks like we wrap
399 * around in real mode as long as none of the individual "pusha" crosses the
400 * end of the stack segment. In protected mode we check the whole access
401 * in one go. For efficiency, only do the word-by-word thing if we're in
402 * danger of wrapping around.
403 */
404 /** @todo do pusha boundary / wrap-around checks. */
405 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
406 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
407 {
408 /* word-by-word */
409 RTUINT64U TmpRsp;
410 TmpRsp.u = pCtx->rsp;
411 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
416 if (rcStrict == VINF_SUCCESS)
417 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
418 if (rcStrict == VINF_SUCCESS)
419 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
420 if (rcStrict == VINF_SUCCESS)
421 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
422 if (rcStrict == VINF_SUCCESS)
423 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
424 if (rcStrict == VINF_SUCCESS)
425 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
426 if (rcStrict == VINF_SUCCESS)
427 {
428 pCtx->rsp = TmpRsp.u;
429 iemRegAddToRip(pIemCpu, cbInstr);
430 }
431 }
432 else
433 {
434 GCPtrBottom--;
435 uint32_t *pa32Mem;
436 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
437 if (rcStrict == VINF_SUCCESS)
438 {
439 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
440 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
441 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
442 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
443 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
444 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
445 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
446 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
447 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
448 if (rcStrict == VINF_SUCCESS)
449 {
450 iemRegSubFromRsp(pCtx, 32);
451 iemRegAddToRip(pIemCpu, cbInstr);
452 }
453 }
454 }
455 return rcStrict;
456}
457
458
459/**
460 * Implements pushf.
461 *
462 *
463 * @param enmEffOpSize The effective operand size.
464 */
465IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
466{
467 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
468
469 /*
470 * If we're in V8086 mode some care is required (which is why we're in
471 * doing this in a C implementation).
472 */
473 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
474 if ( (fEfl & X86_EFL_VM)
475 && X86_EFL_GET_IOPL(fEfl) != 3 )
476 {
477 Assert(pCtx->cr0 & X86_CR0_PE);
478 if ( enmEffOpSize != IEMMODE_16BIT
479 || !(pCtx->cr4 & X86_CR4_VME))
480 return iemRaiseGeneralProtectionFault0(pIemCpu);
481 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
482 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
483 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
484 }
485
486 /*
487 * Ok, clear RF and VM and push the flags.
488 */
489 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
490
491 VBOXSTRICTRC rcStrict;
492 switch (enmEffOpSize)
493 {
494 case IEMMODE_16BIT:
495 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
496 break;
497 case IEMMODE_32BIT:
498 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
499 break;
500 case IEMMODE_64BIT:
501 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
502 break;
503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
504 }
505 if (rcStrict != VINF_SUCCESS)
506 return rcStrict;
507
508 iemRegAddToRip(pIemCpu, cbInstr);
509 return VINF_SUCCESS;
510}
511
512
513/**
514 * Implements popf.
515 *
516 * @param enmEffOpSize The effective operand size.
517 */
518IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
519{
520 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
521 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
522 uint32_t const fEflOld = IEMMISC_GET_EFL(pIemCpu, pCtx);
523 VBOXSTRICTRC rcStrict;
524 uint32_t fEflNew;
525
526 /*
527 * V8086 is special as usual.
528 */
529 if (fEflOld & X86_EFL_VM)
530 {
531 /*
532 * Almost anything goes if IOPL is 3.
533 */
534 if (X86_EFL_GET_IOPL(fEflOld) == 3)
535 {
536 switch (enmEffOpSize)
537 {
538 case IEMMODE_16BIT:
539 {
540 uint16_t u16Value;
541 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
542 if (rcStrict != VINF_SUCCESS)
543 return rcStrict;
544 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
545 break;
546 }
547 case IEMMODE_32BIT:
548 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
549 if (rcStrict != VINF_SUCCESS)
550 return rcStrict;
551 break;
552 IEM_NOT_REACHED_DEFAULT_CASE_RET();
553 }
554
555 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
556 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
557 }
558 /*
559 * Interrupt flag virtualization with CR4.VME=1.
560 */
561 else if ( enmEffOpSize == IEMMODE_16BIT
562 && (pCtx->cr4 & X86_CR4_VME) )
563 {
564 uint16_t u16Value;
565 RTUINT64U TmpRsp;
566 TmpRsp.u = pCtx->rsp;
567 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
568 if (rcStrict != VINF_SUCCESS)
569 return rcStrict;
570
571 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
572 * or before? */
573 if ( ( (u16Value & X86_EFL_IF)
574 && (fEflOld & X86_EFL_VIP))
575 || (u16Value & X86_EFL_TF) )
576 return iemRaiseGeneralProtectionFault0(pIemCpu);
577
578 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
579 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
580 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
581 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
582
583 pCtx->rsp = TmpRsp.u;
584 }
585 else
586 return iemRaiseGeneralProtectionFault0(pIemCpu);
587
588 }
589 /*
590 * Not in V8086 mode.
591 */
592 else
593 {
594 /* Pop the flags. */
595 switch (enmEffOpSize)
596 {
597 case IEMMODE_16BIT:
598 {
599 uint16_t u16Value;
600 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
601 if (rcStrict != VINF_SUCCESS)
602 return rcStrict;
603 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
604 break;
605 }
606 case IEMMODE_32BIT:
607 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
608 if (rcStrict != VINF_SUCCESS)
609 return rcStrict;
610 break;
611 case IEMMODE_64BIT:
612 {
613 uint64_t u64Value;
614 rcStrict = iemMemStackPopU64(pIemCpu, &u64Value);
615 if (rcStrict != VINF_SUCCESS)
616 return rcStrict;
617 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
618 break;
619 }
620 IEM_NOT_REACHED_DEFAULT_CASE_RET();
621 }
622
623 /* Merge them with the current flags. */
624 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
625 || pIemCpu->uCpl == 0)
626 {
627 fEflNew &= X86_EFL_POPF_BITS;
628 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
629 }
630 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
631 {
632 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
633 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
634 }
635 else
636 {
637 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
638 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
639 }
640 }
641
642 /*
643 * Commit the flags.
644 */
645 Assert(fEflNew & RT_BIT_32(1));
646 IEMMISC_SET_EFL(pIemCpu, pCtx, fEflNew);
647 iemRegAddToRip(pIemCpu, cbInstr);
648
649 return VINF_SUCCESS;
650}
651
652
653/**
654 * Implements an indirect call.
655 *
656 * @param uNewPC The new program counter (RIP) value (loaded from the
657 * operand).
658 * @param enmEffOpSize The effective operand size.
659 */
660IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
661{
662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
663 uint16_t uOldPC = pCtx->ip + cbInstr;
664 if (uNewPC > pCtx->cs.u32Limit)
665 return iemRaiseGeneralProtectionFault0(pIemCpu);
666
667 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
668 if (rcStrict != VINF_SUCCESS)
669 return rcStrict;
670
671 pCtx->rip = uNewPC;
672 return VINF_SUCCESS;
673
674}
675
676
677/**
678 * Implements a 16-bit relative call.
679 *
680 * @param offDisp The displacment offset.
681 */
682IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
683{
684 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
685 uint16_t uOldPC = pCtx->ip + cbInstr;
686 uint16_t uNewPC = uOldPC + offDisp;
687 if (uNewPC > pCtx->cs.u32Limit)
688 return iemRaiseGeneralProtectionFault0(pIemCpu);
689
690 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
691 if (rcStrict != VINF_SUCCESS)
692 return rcStrict;
693
694 pCtx->rip = uNewPC;
695 return VINF_SUCCESS;
696}
697
698
699/**
700 * Implements a 32-bit indirect call.
701 *
702 * @param uNewPC The new program counter (RIP) value (loaded from the
703 * operand).
704 * @param enmEffOpSize The effective operand size.
705 */
706IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
707{
708 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
709 uint32_t uOldPC = pCtx->eip + cbInstr;
710 if (uNewPC > pCtx->cs.u32Limit)
711 return iemRaiseGeneralProtectionFault0(pIemCpu);
712
713 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
714 if (rcStrict != VINF_SUCCESS)
715 return rcStrict;
716
717 pCtx->rip = uNewPC;
718 return VINF_SUCCESS;
719
720}
721
722
723/**
724 * Implements a 32-bit relative call.
725 *
726 * @param offDisp The displacment offset.
727 */
728IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
729{
730 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
731 uint32_t uOldPC = pCtx->eip + cbInstr;
732 uint32_t uNewPC = uOldPC + offDisp;
733 if (uNewPC > pCtx->cs.u32Limit)
734 return iemRaiseGeneralProtectionFault0(pIemCpu);
735
736 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
737 if (rcStrict != VINF_SUCCESS)
738 return rcStrict;
739
740 pCtx->rip = uNewPC;
741 return VINF_SUCCESS;
742}
743
744
745/**
746 * Implements a 64-bit indirect call.
747 *
748 * @param uNewPC The new program counter (RIP) value (loaded from the
749 * operand).
750 * @param enmEffOpSize The effective operand size.
751 */
752IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
753{
754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
755 uint64_t uOldPC = pCtx->rip + cbInstr;
756 if (!IEM_IS_CANONICAL(uNewPC))
757 return iemRaiseGeneralProtectionFault0(pIemCpu);
758
759 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
760 if (rcStrict != VINF_SUCCESS)
761 return rcStrict;
762
763 pCtx->rip = uNewPC;
764 return VINF_SUCCESS;
765
766}
767
768
769/**
770 * Implements a 64-bit relative call.
771 *
772 * @param offDisp The displacment offset.
773 */
774IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
775{
776 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
777 uint64_t uOldPC = pCtx->rip + cbInstr;
778 uint64_t uNewPC = uOldPC + offDisp;
779 if (!IEM_IS_CANONICAL(uNewPC))
780 return iemRaiseNotCanonical(pIemCpu);
781
782 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
783 if (rcStrict != VINF_SUCCESS)
784 return rcStrict;
785
786 pCtx->rip = uNewPC;
787 return VINF_SUCCESS;
788}
789
790
791/**
792 * Implements far jumps and calls thru task segments (TSS).
793 *
794 * @param uSel The selector.
795 * @param enmBranch The kind of branching we're performing.
796 * @param enmEffOpSize The effective operand size.
797 * @param pDesc The descriptor corrsponding to @a uSel. The type is
798 * call gate.
799 */
800IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
801{
802 /* Call various functions to do the work. */
803 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
804}
805
806
807/**
808 * Implements far jumps and calls thru task gates.
809 *
810 * @param uSel The selector.
811 * @param enmBranch The kind of branching we're performing.
812 * @param enmEffOpSize The effective operand size.
813 * @param pDesc The descriptor corrsponding to @a uSel. The type is
814 * call gate.
815 */
816IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
817{
818 /* Call various functions to do the work. */
819 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
820}
821
822
823/**
824 * Implements far jumps and calls thru call gates.
825 *
826 * @param uSel The selector.
827 * @param enmBranch The kind of branching we're performing.
828 * @param enmEffOpSize The effective operand size.
829 * @param pDesc The descriptor corrsponding to @a uSel. The type is
830 * call gate.
831 */
832IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
833{
834 /* Call various functions to do the work. */
835 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
836}
837
838
839/**
840 * Implements far jumps and calls thru system selectors.
841 *
842 * @param uSel The selector.
843 * @param enmBranch The kind of branching we're performing.
844 * @param enmEffOpSize The effective operand size.
845 * @param pDesc The descriptor corrsponding to @a uSel.
846 */
847IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
848{
849 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
850 Assert((uSel & X86_SEL_MASK_OFF_RPL));
851
852 if (IEM_IS_LONG_MODE(pIemCpu))
853 switch (pDesc->Legacy.Gen.u4Type)
854 {
855 case AMD64_SEL_TYPE_SYS_CALL_GATE:
856 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
857
858 default:
859 case AMD64_SEL_TYPE_SYS_LDT:
860 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
861 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
862 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
863 case AMD64_SEL_TYPE_SYS_INT_GATE:
864 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
865 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
866
867 }
868
869 switch (pDesc->Legacy.Gen.u4Type)
870 {
871 case X86_SEL_TYPE_SYS_286_CALL_GATE:
872 case X86_SEL_TYPE_SYS_386_CALL_GATE:
873 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
874
875 case X86_SEL_TYPE_SYS_TASK_GATE:
876 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
877
878 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
879 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
880 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
881
882 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
883 Log(("branch %04x -> busy 286 TSS\n", uSel));
884 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
885
886 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
887 Log(("branch %04x -> busy 386 TSS\n", uSel));
888 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
889
890 default:
891 case X86_SEL_TYPE_SYS_LDT:
892 case X86_SEL_TYPE_SYS_286_INT_GATE:
893 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
894 case X86_SEL_TYPE_SYS_386_INT_GATE:
895 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
896 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
897 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
898 }
899}
900
901
902/**
903 * Implements far jumps.
904 *
905 * @param uSel The selector.
906 * @param offSeg The segment offset.
907 * @param enmEffOpSize The effective operand size.
908 */
909IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
910{
911 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
912 NOREF(cbInstr);
913 Assert(offSeg <= UINT32_MAX);
914
915 /*
916 * Real mode and V8086 mode are easy. The only snag seems to be that
917 * CS.limit doesn't change and the limit check is done against the current
918 * limit.
919 */
920 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
921 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
922 {
923 if (offSeg > pCtx->cs.u32Limit)
924 return iemRaiseGeneralProtectionFault0(pIemCpu);
925
926 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
927 pCtx->rip = offSeg;
928 else
929 pCtx->rip = offSeg & UINT16_MAX;
930 pCtx->cs.Sel = uSel;
931 pCtx->cs.ValidSel = uSel;
932 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
933 pCtx->cs.u64Base = (uint32_t)uSel << 4;
934 return VINF_SUCCESS;
935 }
936
937 /*
938 * Protected mode. Need to parse the specified descriptor...
939 */
940 if (!(uSel & X86_SEL_MASK_OFF_RPL))
941 {
942 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
943 return iemRaiseGeneralProtectionFault0(pIemCpu);
944 }
945
946 /* Fetch the descriptor. */
947 IEMSELDESC Desc;
948 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
949 if (rcStrict != VINF_SUCCESS)
950 return rcStrict;
951
952 /* Is it there? */
953 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
954 {
955 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
956 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
957 }
958
959 /*
960 * Deal with it according to its type. We do the standard code selectors
961 * here and dispatch the system selectors to worker functions.
962 */
963 if (!Desc.Legacy.Gen.u1DescType)
964 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
965
966 /* Only code segments. */
967 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
968 {
969 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
970 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
971 }
972
973 /* L vs D. */
974 if ( Desc.Legacy.Gen.u1Long
975 && Desc.Legacy.Gen.u1DefBig
976 && IEM_IS_LONG_MODE(pIemCpu))
977 {
978 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
979 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
980 }
981
982 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
983 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
984 {
985 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
986 {
987 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
988 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
989 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
990 }
991 }
992 else
993 {
994 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
995 {
996 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
997 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
998 }
999 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1000 {
1001 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1002 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1003 }
1004 }
1005
1006 /* Chop the high bits if 16-bit (Intel says so). */
1007 if (enmEffOpSize == IEMMODE_16BIT)
1008 offSeg &= UINT16_MAX;
1009
1010 /* Limit check. (Should alternatively check for non-canonical addresses
1011 here, but that is ruled out by offSeg being 32-bit, right?) */
1012 uint64_t u64Base;
1013 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1014 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1015 u64Base = 0;
1016 else
1017 {
1018 if (offSeg > cbLimit)
1019 {
1020 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1021 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1022 }
1023 u64Base = X86DESC_BASE(&Desc.Legacy);
1024 }
1025
1026 /*
1027 * Ok, everything checked out fine. Now set the accessed bit before
1028 * committing the result into CS, CSHID and RIP.
1029 */
1030 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1031 {
1032 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1033 if (rcStrict != VINF_SUCCESS)
1034 return rcStrict;
1035 /** @todo check what VT-x and AMD-V does. */
1036 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1037 }
1038
1039 /* commit */
1040 pCtx->rip = offSeg;
1041 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1042 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1043 pCtx->cs.ValidSel = pCtx->cs.Sel;
1044 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1045 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1046 pCtx->cs.u32Limit = cbLimit;
1047 pCtx->cs.u64Base = u64Base;
1048 /** @todo check if the hidden bits are loaded correctly for 64-bit
1049 * mode. */
1050 return VINF_SUCCESS;
1051}
1052
1053
1054/**
1055 * Implements far calls.
1056 *
1057 * This very similar to iemCImpl_FarJmp.
1058 *
1059 * @param uSel The selector.
1060 * @param offSeg The segment offset.
1061 * @param enmEffOpSize The operand size (in case we need it).
1062 */
1063IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1064{
1065 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1066 VBOXSTRICTRC rcStrict;
1067 uint64_t uNewRsp;
1068 RTPTRUNION uPtrRet;
1069
1070 /*
1071 * Real mode and V8086 mode are easy. The only snag seems to be that
1072 * CS.limit doesn't change and the limit check is done against the current
1073 * limit.
1074 */
1075 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1076 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1077 {
1078 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1079
1080 /* Check stack first - may #SS(0). */
1081 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1082 &uPtrRet.pv, &uNewRsp);
1083 if (rcStrict != VINF_SUCCESS)
1084 return rcStrict;
1085
1086 /* Check the target address range. */
1087 if (offSeg > UINT32_MAX)
1088 return iemRaiseGeneralProtectionFault0(pIemCpu);
1089
1090 /* Everything is fine, push the return address. */
1091 if (enmEffOpSize == IEMMODE_16BIT)
1092 {
1093 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1094 uPtrRet.pu16[1] = pCtx->cs.Sel;
1095 }
1096 else
1097 {
1098 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1099 uPtrRet.pu16[3] = pCtx->cs.Sel;
1100 }
1101 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1102 if (rcStrict != VINF_SUCCESS)
1103 return rcStrict;
1104
1105 /* Branch. */
1106 pCtx->rip = offSeg;
1107 pCtx->cs.Sel = uSel;
1108 pCtx->cs.ValidSel = uSel;
1109 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1110 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1111 return VINF_SUCCESS;
1112 }
1113
1114 /*
1115 * Protected mode. Need to parse the specified descriptor...
1116 */
1117 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1118 {
1119 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1120 return iemRaiseGeneralProtectionFault0(pIemCpu);
1121 }
1122
1123 /* Fetch the descriptor. */
1124 IEMSELDESC Desc;
1125 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1126 if (rcStrict != VINF_SUCCESS)
1127 return rcStrict;
1128
1129 /*
1130 * Deal with it according to its type. We do the standard code selectors
1131 * here and dispatch the system selectors to worker functions.
1132 */
1133 if (!Desc.Legacy.Gen.u1DescType)
1134 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1135
1136 /* Only code segments. */
1137 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1138 {
1139 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1140 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1141 }
1142
1143 /* L vs D. */
1144 if ( Desc.Legacy.Gen.u1Long
1145 && Desc.Legacy.Gen.u1DefBig
1146 && IEM_IS_LONG_MODE(pIemCpu))
1147 {
1148 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1149 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1150 }
1151
1152 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1153 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1154 {
1155 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1156 {
1157 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1158 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1159 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1160 }
1161 }
1162 else
1163 {
1164 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1165 {
1166 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1167 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1168 }
1169 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1170 {
1171 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1172 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1173 }
1174 }
1175
1176 /* Is it there? */
1177 if (!Desc.Legacy.Gen.u1Present)
1178 {
1179 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1180 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1181 }
1182
1183 /* Check stack first - may #SS(0). */
1184 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1185 * 16-bit code cause a two or four byte CS to be pushed? */
1186 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1187 enmEffOpSize == IEMMODE_64BIT ? 8+8
1188 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1189 &uPtrRet.pv, &uNewRsp);
1190 if (rcStrict != VINF_SUCCESS)
1191 return rcStrict;
1192
1193 /* Chop the high bits if 16-bit (Intel says so). */
1194 if (enmEffOpSize == IEMMODE_16BIT)
1195 offSeg &= UINT16_MAX;
1196
1197 /* Limit / canonical check. */
1198 uint64_t u64Base;
1199 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1200 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1201 {
1202 if (!IEM_IS_CANONICAL(offSeg))
1203 {
1204 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1205 return iemRaiseNotCanonical(pIemCpu);
1206 }
1207 u64Base = 0;
1208 }
1209 else
1210 {
1211 if (offSeg > cbLimit)
1212 {
1213 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1214 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1215 }
1216 u64Base = X86DESC_BASE(&Desc.Legacy);
1217 }
1218
1219 /*
1220 * Now set the accessed bit before
1221 * writing the return address to the stack and committing the result into
1222 * CS, CSHID and RIP.
1223 */
1224 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1225 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1226 {
1227 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1228 if (rcStrict != VINF_SUCCESS)
1229 return rcStrict;
1230 /** @todo check what VT-x and AMD-V does. */
1231 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1232 }
1233
1234 /* stack */
1235 if (enmEffOpSize == IEMMODE_16BIT)
1236 {
1237 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1238 uPtrRet.pu16[1] = pCtx->cs.Sel;
1239 }
1240 else if (enmEffOpSize == IEMMODE_32BIT)
1241 {
1242 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1243 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1244 }
1245 else
1246 {
1247 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1248 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1249 }
1250 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1251 if (rcStrict != VINF_SUCCESS)
1252 return rcStrict;
1253
1254 /* commit */
1255 pCtx->rip = offSeg;
1256 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1257 pCtx->cs.Sel |= pIemCpu->uCpl;
1258 pCtx->cs.ValidSel = pCtx->cs.Sel;
1259 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1260 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1261 pCtx->cs.u32Limit = cbLimit;
1262 pCtx->cs.u64Base = u64Base;
1263 /** @todo check if the hidden bits are loaded correctly for 64-bit
1264 * mode. */
1265 return VINF_SUCCESS;
1266}
1267
1268
1269/**
1270 * Implements retf.
1271 *
1272 * @param enmEffOpSize The effective operand size.
1273 * @param cbPop The amount of arguments to pop from the stack
1274 * (bytes).
1275 */
1276IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1277{
1278 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1279 VBOXSTRICTRC rcStrict;
1280 RTCPTRUNION uPtrFrame;
1281 uint64_t uNewRsp;
1282 uint64_t uNewRip;
1283 uint16_t uNewCs;
1284 NOREF(cbInstr);
1285
1286 /*
1287 * Read the stack values first.
1288 */
1289 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1290 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1291 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1292 if (rcStrict != VINF_SUCCESS)
1293 return rcStrict;
1294 if (enmEffOpSize == IEMMODE_16BIT)
1295 {
1296 uNewRip = uPtrFrame.pu16[0];
1297 uNewCs = uPtrFrame.pu16[1];
1298 }
1299 else if (enmEffOpSize == IEMMODE_32BIT)
1300 {
1301 uNewRip = uPtrFrame.pu32[0];
1302 uNewCs = uPtrFrame.pu16[2];
1303 }
1304 else
1305 {
1306 uNewRip = uPtrFrame.pu64[0];
1307 uNewCs = uPtrFrame.pu16[4];
1308 }
1309
1310 /*
1311 * Real mode and V8086 mode are easy.
1312 */
1313 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1314 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1315 {
1316 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1317 /** @todo check how this is supposed to work if sp=0xfffe. */
1318
1319 /* Check the limit of the new EIP. */
1320 /** @todo Intel pseudo code only does the limit check for 16-bit
1321 * operands, AMD does not make any distinction. What is right? */
1322 if (uNewRip > pCtx->cs.u32Limit)
1323 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1324
1325 /* commit the operation. */
1326 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1327 if (rcStrict != VINF_SUCCESS)
1328 return rcStrict;
1329 pCtx->rip = uNewRip;
1330 pCtx->cs.Sel = uNewCs;
1331 pCtx->cs.ValidSel = uNewCs;
1332 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1333 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1334 /** @todo do we load attribs and limit as well? */
1335 if (cbPop)
1336 iemRegAddToRsp(pCtx, cbPop);
1337 return VINF_SUCCESS;
1338 }
1339
1340 /*
1341 * Protected mode is complicated, of course.
1342 */
1343 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
1344 {
1345 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1346 return iemRaiseGeneralProtectionFault0(pIemCpu);
1347 }
1348
1349 /* Fetch the descriptor. */
1350 IEMSELDESC DescCs;
1351 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1352 if (rcStrict != VINF_SUCCESS)
1353 return rcStrict;
1354
1355 /* Can only return to a code selector. */
1356 if ( !DescCs.Legacy.Gen.u1DescType
1357 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1358 {
1359 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1360 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1361 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1362 }
1363
1364 /* L vs D. */
1365 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1366 && DescCs.Legacy.Gen.u1DefBig
1367 && IEM_IS_LONG_MODE(pIemCpu))
1368 {
1369 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1370 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1371 }
1372
1373 /* DPL/RPL/CPL checks. */
1374 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1375 {
1376 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1377 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1378 }
1379
1380 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1381 {
1382 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1383 {
1384 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1385 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1386 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1387 }
1388 }
1389 else
1390 {
1391 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1392 {
1393 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1394 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1395 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1396 }
1397 }
1398
1399 /* Is it there? */
1400 if (!DescCs.Legacy.Gen.u1Present)
1401 {
1402 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1403 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1404 }
1405
1406 /*
1407 * Return to outer privilege? (We'll typically have entered via a call gate.)
1408 */
1409 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1410 {
1411 /* Read the return pointer, it comes before the parameters. */
1412 RTCPTRUNION uPtrStack;
1413 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1414 if (rcStrict != VINF_SUCCESS)
1415 return rcStrict;
1416 uint16_t uNewOuterSs;
1417 uint64_t uNewOuterRsp;
1418 if (enmEffOpSize == IEMMODE_16BIT)
1419 {
1420 uNewOuterRsp = uPtrFrame.pu16[0];
1421 uNewOuterSs = uPtrFrame.pu16[1];
1422 }
1423 else if (enmEffOpSize == IEMMODE_32BIT)
1424 {
1425 uNewOuterRsp = uPtrFrame.pu32[0];
1426 uNewOuterSs = uPtrFrame.pu16[2];
1427 }
1428 else
1429 {
1430 uNewOuterRsp = uPtrFrame.pu64[0];
1431 uNewOuterSs = uPtrFrame.pu16[4];
1432 }
1433
1434 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1435 and read the selector. */
1436 IEMSELDESC DescSs;
1437 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
1438 {
1439 if ( !DescCs.Legacy.Gen.u1Long
1440 || (uNewOuterSs & X86_SEL_RPL) == 3)
1441 {
1442 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1443 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1444 return iemRaiseGeneralProtectionFault0(pIemCpu);
1445 }
1446 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1447 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1448 }
1449 else
1450 {
1451 /* Fetch the descriptor for the new stack segment. */
1452 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1453 if (rcStrict != VINF_SUCCESS)
1454 return rcStrict;
1455 }
1456
1457 /* Check that RPL of stack and code selectors match. */
1458 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1459 {
1460 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1461 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1462 }
1463
1464 /* Must be a writable data segment. */
1465 if ( !DescSs.Legacy.Gen.u1DescType
1466 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1467 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1468 {
1469 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1470 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1471 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1472 }
1473
1474 /* L vs D. (Not mentioned by intel.) */
1475 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1476 && DescSs.Legacy.Gen.u1DefBig
1477 && IEM_IS_LONG_MODE(pIemCpu))
1478 {
1479 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1480 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1481 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1482 }
1483
1484 /* DPL/RPL/CPL checks. */
1485 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1486 {
1487 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1488 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1489 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1490 }
1491
1492 /* Is it there? */
1493 if (!DescSs.Legacy.Gen.u1Present)
1494 {
1495 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1496 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1497 }
1498
1499 /* Calc SS limit.*/
1500 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
1501
1502 /* Is RIP canonical or within CS.limit? */
1503 uint64_t u64Base;
1504 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1505
1506 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1507 {
1508 if (!IEM_IS_CANONICAL(uNewRip))
1509 {
1510 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1511 return iemRaiseNotCanonical(pIemCpu);
1512 }
1513 u64Base = 0;
1514 }
1515 else
1516 {
1517 if (uNewRip > cbLimitCs)
1518 {
1519 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1520 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1521 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1522 }
1523 u64Base = X86DESC_BASE(&DescCs.Legacy);
1524 }
1525
1526 /*
1527 * Now set the accessed bit before
1528 * writing the return address to the stack and committing the result into
1529 * CS, CSHID and RIP.
1530 */
1531 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1532 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1533 {
1534 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1535 if (rcStrict != VINF_SUCCESS)
1536 return rcStrict;
1537 /** @todo check what VT-x and AMD-V does. */
1538 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1539 }
1540 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1541 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1542 {
1543 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1544 if (rcStrict != VINF_SUCCESS)
1545 return rcStrict;
1546 /** @todo check what VT-x and AMD-V does. */
1547 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1548 }
1549
1550 /* commit */
1551 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1552 if (rcStrict != VINF_SUCCESS)
1553 return rcStrict;
1554 if (enmEffOpSize == IEMMODE_16BIT)
1555 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1556 else
1557 pCtx->rip = uNewRip;
1558 pCtx->cs.Sel = uNewCs;
1559 pCtx->cs.ValidSel = uNewCs;
1560 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1561 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1562 pCtx->cs.u32Limit = cbLimitCs;
1563 pCtx->cs.u64Base = u64Base;
1564 pCtx->rsp = uNewRsp;
1565 pCtx->ss.Sel = uNewOuterSs;
1566 pCtx->ss.ValidSel = uNewOuterSs;
1567 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1568 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
1569 pCtx->ss.u32Limit = cbLimitSs;
1570 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1571 pCtx->ss.u64Base = 0;
1572 else
1573 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
1574
1575 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1576 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
1577 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
1578 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
1579 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
1580
1581 /** @todo check if the hidden bits are loaded correctly for 64-bit
1582 * mode. */
1583
1584 if (cbPop)
1585 iemRegAddToRsp(pCtx, cbPop);
1586
1587 /* Done! */
1588 }
1589 /*
1590 * Return to the same privilege level
1591 */
1592 else
1593 {
1594 /* Limit / canonical check. */
1595 uint64_t u64Base;
1596 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1597
1598 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1599 {
1600 if (!IEM_IS_CANONICAL(uNewRip))
1601 {
1602 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1603 return iemRaiseNotCanonical(pIemCpu);
1604 }
1605 u64Base = 0;
1606 }
1607 else
1608 {
1609 if (uNewRip > cbLimitCs)
1610 {
1611 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1612 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1613 }
1614 u64Base = X86DESC_BASE(&DescCs.Legacy);
1615 }
1616
1617 /*
1618 * Now set the accessed bit before
1619 * writing the return address to the stack and committing the result into
1620 * CS, CSHID and RIP.
1621 */
1622 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1623 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1624 {
1625 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1626 if (rcStrict != VINF_SUCCESS)
1627 return rcStrict;
1628 /** @todo check what VT-x and AMD-V does. */
1629 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1630 }
1631
1632 /* commit */
1633 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1634 if (rcStrict != VINF_SUCCESS)
1635 return rcStrict;
1636 if (enmEffOpSize == IEMMODE_16BIT)
1637 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1638 else
1639 pCtx->rip = uNewRip;
1640 pCtx->cs.Sel = uNewCs;
1641 pCtx->cs.ValidSel = uNewCs;
1642 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1643 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1644 pCtx->cs.u32Limit = cbLimitCs;
1645 pCtx->cs.u64Base = u64Base;
1646 /** @todo check if the hidden bits are loaded correctly for 64-bit
1647 * mode. */
1648 if (cbPop)
1649 iemRegAddToRsp(pCtx, cbPop);
1650 }
1651 return VINF_SUCCESS;
1652}
1653
1654
1655/**
1656 * Implements retn.
1657 *
1658 * We're doing this in C because of the \#GP that might be raised if the popped
1659 * program counter is out of bounds.
1660 *
1661 * @param enmEffOpSize The effective operand size.
1662 * @param cbPop The amount of arguments to pop from the stack
1663 * (bytes).
1664 */
1665IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1666{
1667 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1668 NOREF(cbInstr);
1669
1670 /* Fetch the RSP from the stack. */
1671 VBOXSTRICTRC rcStrict;
1672 RTUINT64U NewRip;
1673 RTUINT64U NewRsp;
1674 NewRsp.u = pCtx->rsp;
1675 switch (enmEffOpSize)
1676 {
1677 case IEMMODE_16BIT:
1678 NewRip.u = 0;
1679 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1680 break;
1681 case IEMMODE_32BIT:
1682 NewRip.u = 0;
1683 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1684 break;
1685 case IEMMODE_64BIT:
1686 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1687 break;
1688 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1689 }
1690 if (rcStrict != VINF_SUCCESS)
1691 return rcStrict;
1692
1693 /* Check the new RSP before loading it. */
1694 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1695 * of it. The canonical test is performed here and for call. */
1696 if (enmEffOpSize != IEMMODE_64BIT)
1697 {
1698 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
1699 {
1700 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
1701 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1702 }
1703 }
1704 else
1705 {
1706 if (!IEM_IS_CANONICAL(NewRip.u))
1707 {
1708 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1709 return iemRaiseNotCanonical(pIemCpu);
1710 }
1711 }
1712
1713 /* Commit it. */
1714 pCtx->rip = NewRip.u;
1715 pCtx->rsp = NewRsp.u;
1716 if (cbPop)
1717 iemRegAddToRsp(pCtx, cbPop);
1718
1719 return VINF_SUCCESS;
1720}
1721
1722
1723/**
1724 * Implements enter.
1725 *
1726 * We're doing this in C because the instruction is insane, even for the
1727 * u8NestingLevel=0 case dealing with the stack is tedious.
1728 *
1729 * @param enmEffOpSize The effective operand size.
1730 */
1731IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
1732{
1733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1734
1735 /* Push RBP, saving the old value in TmpRbp. */
1736 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
1737 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
1738 RTUINT64U NewRbp;
1739 VBOXSTRICTRC rcStrict;
1740 if (enmEffOpSize == IEMMODE_64BIT)
1741 {
1742 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
1743 NewRbp = NewRsp;
1744 }
1745 else if (pCtx->ss.Attr.n.u1DefBig)
1746 {
1747 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
1748 NewRbp = NewRsp;
1749 }
1750 else
1751 {
1752 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
1753 NewRbp = TmpRbp;
1754 NewRbp.Words.w0 = NewRsp.Words.w0;
1755 }
1756 if (rcStrict != VINF_SUCCESS)
1757 return rcStrict;
1758
1759 /* Copy the parameters (aka nesting levels by Intel). */
1760 cParameters &= 0x1f;
1761 if (cParameters > 0)
1762 {
1763 switch (enmEffOpSize)
1764 {
1765 case IEMMODE_16BIT:
1766 if (pCtx->ss.Attr.n.u1DefBig)
1767 TmpRbp.DWords.dw0 -= 2;
1768 else
1769 TmpRbp.Words.w0 -= 2;
1770 do
1771 {
1772 uint16_t u16Tmp;
1773 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
1774 if (rcStrict != VINF_SUCCESS)
1775 break;
1776 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
1777 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1778 break;
1779
1780 case IEMMODE_32BIT:
1781 if (pCtx->ss.Attr.n.u1DefBig)
1782 TmpRbp.DWords.dw0 -= 4;
1783 else
1784 TmpRbp.Words.w0 -= 4;
1785 do
1786 {
1787 uint32_t u32Tmp;
1788 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
1789 if (rcStrict != VINF_SUCCESS)
1790 break;
1791 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
1792 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1793 break;
1794
1795 case IEMMODE_64BIT:
1796 TmpRbp.u -= 8;
1797 do
1798 {
1799 uint64_t u64Tmp;
1800 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
1801 if (rcStrict != VINF_SUCCESS)
1802 break;
1803 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
1804 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1805 break;
1806
1807 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1808 }
1809 if (rcStrict != VINF_SUCCESS)
1810 return VINF_SUCCESS;
1811
1812 /* Push the new RBP */
1813 if (enmEffOpSize == IEMMODE_64BIT)
1814 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
1815 else if (pCtx->ss.Attr.n.u1DefBig)
1816 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
1817 else
1818 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
1819 if (rcStrict != VINF_SUCCESS)
1820 return rcStrict;
1821
1822 }
1823
1824 /* Recalc RSP. */
1825 iemRegSubFromRspEx(&NewRsp, cbFrame, pCtx);
1826
1827 /** @todo Should probe write access at the new RSP according to AMD. */
1828
1829 /* Commit it. */
1830 pCtx->rbp = NewRbp.u;
1831 pCtx->rsp = NewRsp.u;
1832 iemRegAddToRip(pIemCpu, cbInstr);
1833
1834 return VINF_SUCCESS;
1835}
1836
1837
1838
1839/**
1840 * Implements leave.
1841 *
1842 * We're doing this in C because messing with the stack registers is annoying
1843 * since they depends on SS attributes.
1844 *
1845 * @param enmEffOpSize The effective operand size.
1846 */
1847IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1848{
1849 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1850
1851 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1852 RTUINT64U NewRsp;
1853 if (pCtx->ss.Attr.n.u1Long)
1854 NewRsp.u = pCtx->rbp;
1855 else if (pCtx->ss.Attr.n.u1DefBig)
1856 NewRsp.u = pCtx->ebp;
1857 else
1858 {
1859 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1860 NewRsp.u = pCtx->rsp;
1861 NewRsp.Words.w0 = pCtx->bp;
1862 }
1863
1864 /* Pop RBP according to the operand size. */
1865 VBOXSTRICTRC rcStrict;
1866 RTUINT64U NewRbp;
1867 switch (enmEffOpSize)
1868 {
1869 case IEMMODE_16BIT:
1870 NewRbp.u = pCtx->rbp;
1871 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1872 break;
1873 case IEMMODE_32BIT:
1874 NewRbp.u = 0;
1875 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1876 break;
1877 case IEMMODE_64BIT:
1878 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1879 break;
1880 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1881 }
1882 if (rcStrict != VINF_SUCCESS)
1883 return rcStrict;
1884
1885
1886 /* Commit it. */
1887 pCtx->rbp = NewRbp.u;
1888 pCtx->rsp = NewRsp.u;
1889 iemRegAddToRip(pIemCpu, cbInstr);
1890
1891 return VINF_SUCCESS;
1892}
1893
1894
1895/**
1896 * Implements int3 and int XX.
1897 *
1898 * @param u8Int The interrupt vector number.
1899 * @param fIsBpInstr Is it the breakpoint instruction.
1900 */
1901IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1902{
1903 Assert(pIemCpu->cXcptRecursions == 0);
1904 return iemRaiseXcptOrInt(pIemCpu,
1905 cbInstr,
1906 u8Int,
1907 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1908 0,
1909 0);
1910}
1911
1912
1913/**
1914 * Implements iret for real mode and V8086 mode.
1915 *
1916 * @param enmEffOpSize The effective operand size.
1917 */
1918IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1919{
1920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1921 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1922 X86EFLAGS Efl;
1923 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
1924 NOREF(cbInstr);
1925
1926 /*
1927 * iret throws an exception if VME isn't enabled.
1928 */
1929 if ( pCtx->eflags.Bits.u1VM
1930 && !(pCtx->cr4 & X86_CR4_VME))
1931 return iemRaiseGeneralProtectionFault0(pIemCpu);
1932
1933 /*
1934 * Do the stack bits, but don't commit RSP before everything checks
1935 * out right.
1936 */
1937 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1938 VBOXSTRICTRC rcStrict;
1939 RTCPTRUNION uFrame;
1940 uint16_t uNewCs;
1941 uint32_t uNewEip;
1942 uint32_t uNewFlags;
1943 uint64_t uNewRsp;
1944 if (enmEffOpSize == IEMMODE_32BIT)
1945 {
1946 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1947 if (rcStrict != VINF_SUCCESS)
1948 return rcStrict;
1949 uNewEip = uFrame.pu32[0];
1950 uNewCs = (uint16_t)uFrame.pu32[1];
1951 uNewFlags = uFrame.pu32[2];
1952 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1953 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1954 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1955 | X86_EFL_ID;
1956 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1957 }
1958 else
1959 {
1960 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1961 if (rcStrict != VINF_SUCCESS)
1962 return rcStrict;
1963 uNewEip = uFrame.pu16[0];
1964 uNewCs = uFrame.pu16[1];
1965 uNewFlags = uFrame.pu16[2];
1966 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1967 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1968 uNewFlags |= Efl.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1969 /** @todo The intel pseudo code does not indicate what happens to
1970 * reserved flags. We just ignore them. */
1971 }
1972 /** @todo Check how this is supposed to work if sp=0xfffe. */
1973
1974 /*
1975 * Check the limit of the new EIP.
1976 */
1977 /** @todo Only the AMD pseudo code check the limit here, what's
1978 * right? */
1979 if (uNewEip > pCtx->cs.u32Limit)
1980 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1981
1982 /*
1983 * V8086 checks and flag adjustments
1984 */
1985 if (Efl.Bits.u1VM)
1986 {
1987 if (Efl.Bits.u2IOPL == 3)
1988 {
1989 /* Preserve IOPL and clear RF. */
1990 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1991 uNewFlags |= Efl.u & (X86_EFL_IOPL);
1992 }
1993 else if ( enmEffOpSize == IEMMODE_16BIT
1994 && ( !(uNewFlags & X86_EFL_IF)
1995 || !Efl.Bits.u1VIP )
1996 && !(uNewFlags & X86_EFL_TF) )
1997 {
1998 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1999 uNewFlags &= ~X86_EFL_VIF;
2000 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2001 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2002 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2003 }
2004 else
2005 return iemRaiseGeneralProtectionFault0(pIemCpu);
2006 }
2007
2008 /*
2009 * Commit the operation.
2010 */
2011 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
2012 if (rcStrict != VINF_SUCCESS)
2013 return rcStrict;
2014 pCtx->rip = uNewEip;
2015 pCtx->cs.Sel = uNewCs;
2016 pCtx->cs.ValidSel = uNewCs;
2017 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2018 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2019 /** @todo do we load attribs and limit as well? */
2020 Assert(uNewFlags & X86_EFL_1);
2021 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2022
2023 return VINF_SUCCESS;
2024}
2025
2026
2027/**
2028 * Loads a segment register when entering V8086 mode.
2029 *
2030 * @param pSReg The segment register.
2031 * @param uSeg The segment to load.
2032 */
2033static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2034{
2035 pSReg->Sel = uSeg;
2036 pSReg->ValidSel = uSeg;
2037 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2038 pSReg->u64Base = (uint32_t)uSeg << 4;
2039 pSReg->u32Limit = 0xffff;
2040 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2041 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2042 * IRET'ing to V8086. */
2043}
2044
2045
2046/**
2047 * Implements iret for protected mode returning to V8086 mode.
2048 *
2049 * @param pCtx Pointer to the CPU context.
2050 * @param uNewEip The new EIP.
2051 * @param uNewCs The new CS.
2052 * @param uNewFlags The new EFLAGS.
2053 * @param uNewRsp The RSP after the initial IRET frame.
2054 */
2055IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
2056 uint32_t, uNewFlags, uint64_t, uNewRsp)
2057{
2058#if 0
2059 if (!LogIs6Enabled())
2060 {
2061 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
2062 RTLogFlags(NULL, "enabled");
2063 return VERR_IEM_RESTART_INSTRUCTION;
2064 }
2065#endif
2066
2067 /*
2068 * Pop the V8086 specific frame bits off the stack.
2069 */
2070 VBOXSTRICTRC rcStrict;
2071 RTCPTRUNION uFrame;
2072 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 24, &uFrame.pv, &uNewRsp);
2073 if (rcStrict != VINF_SUCCESS)
2074 return rcStrict;
2075 uint32_t uNewEsp = uFrame.pu32[0];
2076 uint16_t uNewSs = uFrame.pu32[1];
2077 uint16_t uNewEs = uFrame.pu32[2];
2078 uint16_t uNewDs = uFrame.pu32[3];
2079 uint16_t uNewFs = uFrame.pu32[4];
2080 uint16_t uNewGs = uFrame.pu32[5];
2081 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2082 if (rcStrict != VINF_SUCCESS)
2083 return rcStrict;
2084
2085 /*
2086 * Commit the operation.
2087 */
2088 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
2089 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
2090 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
2091 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
2092 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
2093 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
2094 pCtx->rip = uNewEip;
2095 pCtx->rsp = uNewEsp;
2096 pCtx->rflags.u = uNewFlags;
2097 pIemCpu->uCpl = 3;
2098
2099 return VINF_SUCCESS;
2100}
2101
2102
2103/**
2104 * Implements iret for protected mode returning via a nested task.
2105 *
2106 * @param enmEffOpSize The effective operand size.
2107 */
2108IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2109{
2110 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2111}
2112
2113
2114/**
2115 * Implements iret for protected mode
2116 *
2117 * @param enmEffOpSize The effective operand size.
2118 */
2119IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
2120{
2121 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2122 NOREF(cbInstr);
2123
2124 /*
2125 * Nested task return.
2126 */
2127 if (pCtx->eflags.Bits.u1NT)
2128 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
2129
2130 /*
2131 * Normal return.
2132 *
2133 * Do the stack bits, but don't commit RSP before everything checks
2134 * out right.
2135 */
2136 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2137 VBOXSTRICTRC rcStrict;
2138 RTCPTRUNION uFrame;
2139 uint16_t uNewCs;
2140 uint32_t uNewEip;
2141 uint32_t uNewFlags;
2142 uint64_t uNewRsp;
2143 if (enmEffOpSize == IEMMODE_32BIT)
2144 {
2145 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2146 if (rcStrict != VINF_SUCCESS)
2147 return rcStrict;
2148 uNewEip = uFrame.pu32[0];
2149 uNewCs = (uint16_t)uFrame.pu32[1];
2150 uNewFlags = uFrame.pu32[2];
2151 }
2152 else
2153 {
2154 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2155 if (rcStrict != VINF_SUCCESS)
2156 return rcStrict;
2157 uNewEip = uFrame.pu16[0];
2158 uNewCs = uFrame.pu16[1];
2159 uNewFlags = uFrame.pu16[2];
2160 }
2161 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2162 if (rcStrict != VINF_SUCCESS)
2163 return rcStrict;
2164
2165 /*
2166 * We're hopefully not returning to V8086 mode...
2167 */
2168 if ( (uNewFlags & X86_EFL_VM)
2169 && pIemCpu->uCpl == 0)
2170 {
2171 Assert(enmEffOpSize == IEMMODE_32BIT);
2172 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
2173 }
2174
2175 /*
2176 * Protected mode.
2177 */
2178 /* Read the CS descriptor. */
2179 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2180 {
2181 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
2182 return iemRaiseGeneralProtectionFault0(pIemCpu);
2183 }
2184
2185 IEMSELDESC DescCS;
2186 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
2187 if (rcStrict != VINF_SUCCESS)
2188 {
2189 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
2190 return rcStrict;
2191 }
2192
2193 /* Must be a code descriptor. */
2194 if (!DescCS.Legacy.Gen.u1DescType)
2195 {
2196 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2197 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2198 }
2199 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2200 {
2201 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2202 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2203 }
2204
2205 /* Privilege checks. */
2206 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2207 {
2208 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
2209 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2210 }
2211 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2212 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2213 {
2214 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
2215 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2216 }
2217
2218 /* Present? */
2219 if (!DescCS.Legacy.Gen.u1Present)
2220 {
2221 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2222 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2223 }
2224
2225 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2226
2227 /*
2228 * Return to outer level?
2229 */
2230 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2231 {
2232 uint16_t uNewSS;
2233 uint32_t uNewESP;
2234 if (enmEffOpSize == IEMMODE_32BIT)
2235 {
2236 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2237 if (rcStrict != VINF_SUCCESS)
2238 return rcStrict;
2239 uNewESP = uFrame.pu32[0];
2240 uNewSS = (uint16_t)uFrame.pu32[1];
2241 }
2242 else
2243 {
2244 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2245 if (rcStrict != VINF_SUCCESS)
2246 return rcStrict;
2247 uNewESP = uFrame.pu16[0];
2248 uNewSS = uFrame.pu16[1];
2249 }
2250 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2251 if (rcStrict != VINF_SUCCESS)
2252 return rcStrict;
2253
2254 /* Read the SS descriptor. */
2255 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2256 {
2257 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2258 return iemRaiseGeneralProtectionFault0(pIemCpu);
2259 }
2260
2261 IEMSELDESC DescSS;
2262 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2263 if (rcStrict != VINF_SUCCESS)
2264 {
2265 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2266 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2267 return rcStrict;
2268 }
2269
2270 /* Privilege checks. */
2271 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2272 {
2273 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2274 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2275 }
2276 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2277 {
2278 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2279 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2280 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2281 }
2282
2283 /* Must be a writeable data segment descriptor. */
2284 if (!DescSS.Legacy.Gen.u1DescType)
2285 {
2286 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2287 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2288 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2289 }
2290 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2291 {
2292 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2293 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2294 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2295 }
2296
2297 /* Present? */
2298 if (!DescSS.Legacy.Gen.u1Present)
2299 {
2300 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2301 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2302 }
2303
2304 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2305
2306 /* Check EIP. */
2307 if (uNewEip > cbLimitCS)
2308 {
2309 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2310 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2311 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2312 }
2313
2314 /*
2315 * Commit the changes, marking CS and SS accessed first since
2316 * that may fail.
2317 */
2318 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2319 {
2320 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2321 if (rcStrict != VINF_SUCCESS)
2322 return rcStrict;
2323 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2324 }
2325 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2326 {
2327 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2328 if (rcStrict != VINF_SUCCESS)
2329 return rcStrict;
2330 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2331 }
2332
2333 pCtx->rip = uNewEip;
2334 pCtx->cs.Sel = uNewCs;
2335 pCtx->cs.ValidSel = uNewCs;
2336 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2337 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2338 pCtx->cs.u32Limit = cbLimitCS;
2339 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2340 pCtx->rsp = uNewESP;
2341 pCtx->ss.Sel = uNewSS;
2342 pCtx->ss.ValidSel = uNewSS;
2343 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2344 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2345 pCtx->ss.u32Limit = cbLimitSs;
2346 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2347
2348 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2349 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2350 if (enmEffOpSize != IEMMODE_16BIT)
2351 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2352 if (pIemCpu->uCpl == 0)
2353 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2354 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2355 fEFlagsMask |= X86_EFL_IF;
2356 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
2357 fEFlagsNew &= ~fEFlagsMask;
2358 fEFlagsNew |= uNewFlags & fEFlagsMask;
2359 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
2360
2361 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2362 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2363 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2364 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2365 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2366
2367 /* Done! */
2368
2369 }
2370 /*
2371 * Return to the same level.
2372 */
2373 else
2374 {
2375 /* Check EIP. */
2376 if (uNewEip > cbLimitCS)
2377 {
2378 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2379 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2380 }
2381
2382 /*
2383 * Commit the changes, marking CS first since it may fail.
2384 */
2385 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2386 {
2387 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2388 if (rcStrict != VINF_SUCCESS)
2389 return rcStrict;
2390 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2391 }
2392
2393 pCtx->rip = uNewEip;
2394 pCtx->cs.Sel = uNewCs;
2395 pCtx->cs.ValidSel = uNewCs;
2396 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2397 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2398 pCtx->cs.u32Limit = cbLimitCS;
2399 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2400 pCtx->rsp = uNewRsp;
2401
2402 X86EFLAGS NewEfl;
2403 NewEfl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
2404 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2405 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2406 if (enmEffOpSize != IEMMODE_16BIT)
2407 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2408 if (pIemCpu->uCpl == 0)
2409 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2410 else if (pIemCpu->uCpl <= NewEfl.Bits.u2IOPL)
2411 fEFlagsMask |= X86_EFL_IF;
2412 NewEfl.u &= ~fEFlagsMask;
2413 NewEfl.u |= fEFlagsMask & uNewFlags;
2414 IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u);
2415 /* Done! */
2416 }
2417 return VINF_SUCCESS;
2418}
2419
2420
2421/**
2422 * Implements iret for long mode
2423 *
2424 * @param enmEffOpSize The effective operand size.
2425 */
2426IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2427{
2428 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2429 //VBOXSTRICTRC rcStrict;
2430 //uint64_t uNewRsp;
2431
2432 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2433 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2434}
2435
2436
2437/**
2438 * Implements iret.
2439 *
2440 * @param enmEffOpSize The effective operand size.
2441 */
2442IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2443{
2444 /*
2445 * Call a mode specific worker.
2446 */
2447 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2448 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2449 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2450 if (IEM_IS_LONG_MODE(pIemCpu))
2451 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2452
2453 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2454}
2455
2456
2457/**
2458 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2459 *
2460 * @param iSegReg The segment register number (valid).
2461 * @param uSel The new selector value.
2462 */
2463IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2464{
2465 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2466 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2467 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2468
2469 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2470
2471 /*
2472 * Real mode and V8086 mode are easy.
2473 */
2474 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2475 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2476 {
2477 *pSel = uSel;
2478 pHid->u64Base = (uint32_t)uSel << 4;
2479 pHid->ValidSel = uSel;
2480 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2481#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2482 /** @todo Does the CPU actually load limits and attributes in the
2483 * real/V8086 mode segment load case? It doesn't for CS in far
2484 * jumps... Affects unreal mode. */
2485 pHid->u32Limit = 0xffff;
2486 pHid->Attr.u = 0;
2487 pHid->Attr.n.u1Present = 1;
2488 pHid->Attr.n.u1DescType = 1;
2489 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2490 ? X86_SEL_TYPE_RW
2491 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2492#endif
2493 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2494 iemRegAddToRip(pIemCpu, cbInstr);
2495 return VINF_SUCCESS;
2496 }
2497
2498 /*
2499 * Protected mode.
2500 *
2501 * Check if it's a null segment selector value first, that's OK for DS, ES,
2502 * FS and GS. If not null, then we have to load and parse the descriptor.
2503 */
2504 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2505 {
2506 if (iSegReg == X86_SREG_SS)
2507 {
2508 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2509 || pIemCpu->uCpl != 0
2510 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2511 {
2512 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2513 return iemRaiseGeneralProtectionFault0(pIemCpu);
2514 }
2515
2516 /* In 64-bit kernel mode, the stack can be 0 because of the way
2517 interrupts are dispatched when in kernel ctx. Just load the
2518 selector value into the register and leave the hidden bits
2519 as is. */
2520 *pSel = uSel;
2521 pHid->ValidSel = uSel;
2522 iemRegAddToRip(pIemCpu, cbInstr);
2523 return VINF_SUCCESS;
2524 }
2525
2526 *pSel = uSel; /* Not RPL, remember :-) */
2527 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2528 && iSegReg != X86_SREG_FS
2529 && iSegReg != X86_SREG_GS)
2530 {
2531 /** @todo figure out what this actually does, it works. Needs
2532 * testcase! */
2533 pHid->Attr.u = 0;
2534 pHid->Attr.n.u1Present = 1;
2535 pHid->Attr.n.u1Long = 1;
2536 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2537 pHid->Attr.n.u2Dpl = 3;
2538 pHid->u32Limit = 0;
2539 pHid->u64Base = 0;
2540 pHid->ValidSel = uSel;
2541 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2542 }
2543 else
2544 iemHlpLoadNullDataSelectorProt(pHid, uSel);
2545 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2546 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2547
2548 iemRegAddToRip(pIemCpu, cbInstr);
2549 return VINF_SUCCESS;
2550 }
2551
2552 /* Fetch the descriptor. */
2553 IEMSELDESC Desc;
2554 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2555 if (rcStrict != VINF_SUCCESS)
2556 return rcStrict;
2557
2558 /* Check GPs first. */
2559 if (!Desc.Legacy.Gen.u1DescType)
2560 {
2561 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2562 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2563 }
2564 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2565 {
2566 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2567 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2568 {
2569 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2570 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2571 }
2572 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2573 {
2574 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2575 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2576 }
2577 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2578 {
2579 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2580 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2581 }
2582 }
2583 else
2584 {
2585 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2586 {
2587 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2588 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2589 }
2590 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2591 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2592 {
2593#if 0 /* this is what intel says. */
2594 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2595 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2596 {
2597 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2598 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2599 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2600 }
2601#else /* this is what makes more sense. */
2602 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2603 {
2604 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2605 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2606 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2607 }
2608 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2609 {
2610 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2611 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2612 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2613 }
2614#endif
2615 }
2616 }
2617
2618 /* Is it there? */
2619 if (!Desc.Legacy.Gen.u1Present)
2620 {
2621 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2622 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2623 }
2624
2625 /* The base and limit. */
2626 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2627 uint64_t u64Base;
2628 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2629 && iSegReg < X86_SREG_FS)
2630 u64Base = 0;
2631 else
2632 u64Base = X86DESC_BASE(&Desc.Legacy);
2633
2634 /*
2635 * Ok, everything checked out fine. Now set the accessed bit before
2636 * committing the result into the registers.
2637 */
2638 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2639 {
2640 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2641 if (rcStrict != VINF_SUCCESS)
2642 return rcStrict;
2643 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2644 }
2645
2646 /* commit */
2647 *pSel = uSel;
2648 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2649 pHid->u32Limit = cbLimit;
2650 pHid->u64Base = u64Base;
2651 pHid->ValidSel = uSel;
2652 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2653
2654 /** @todo check if the hidden bits are loaded correctly for 64-bit
2655 * mode. */
2656 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2657
2658 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2659 iemRegAddToRip(pIemCpu, cbInstr);
2660 return VINF_SUCCESS;
2661}
2662
2663
2664/**
2665 * Implements 'mov SReg, r/m'.
2666 *
2667 * @param iSegReg The segment register number (valid).
2668 * @param uSel The new selector value.
2669 */
2670IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2671{
2672 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2673 if (rcStrict == VINF_SUCCESS)
2674 {
2675 if (iSegReg == X86_SREG_SS)
2676 {
2677 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2678 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2679 }
2680 }
2681 return rcStrict;
2682}
2683
2684
2685/**
2686 * Implements 'pop SReg'.
2687 *
2688 * @param iSegReg The segment register number (valid).
2689 * @param enmEffOpSize The efficient operand size (valid).
2690 */
2691IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2692{
2693 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2694 VBOXSTRICTRC rcStrict;
2695
2696 /*
2697 * Read the selector off the stack and join paths with mov ss, reg.
2698 */
2699 RTUINT64U TmpRsp;
2700 TmpRsp.u = pCtx->rsp;
2701 switch (enmEffOpSize)
2702 {
2703 case IEMMODE_16BIT:
2704 {
2705 uint16_t uSel;
2706 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2707 if (rcStrict == VINF_SUCCESS)
2708 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2709 break;
2710 }
2711
2712 case IEMMODE_32BIT:
2713 {
2714 uint32_t u32Value;
2715 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2716 if (rcStrict == VINF_SUCCESS)
2717 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2718 break;
2719 }
2720
2721 case IEMMODE_64BIT:
2722 {
2723 uint64_t u64Value;
2724 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2725 if (rcStrict == VINF_SUCCESS)
2726 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2727 break;
2728 }
2729 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2730 }
2731
2732 /*
2733 * Commit the stack on success.
2734 */
2735 if (rcStrict == VINF_SUCCESS)
2736 {
2737 pCtx->rsp = TmpRsp.u;
2738 if (iSegReg == X86_SREG_SS)
2739 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2740 }
2741 return rcStrict;
2742}
2743
2744
2745/**
2746 * Implements lgs, lfs, les, lds & lss.
2747 */
2748IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2749 uint16_t, uSel,
2750 uint64_t, offSeg,
2751 uint8_t, iSegReg,
2752 uint8_t, iGReg,
2753 IEMMODE, enmEffOpSize)
2754{
2755 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2756 VBOXSTRICTRC rcStrict;
2757
2758 /*
2759 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2760 */
2761 /** @todo verify and test that mov, pop and lXs works the segment
2762 * register loading in the exact same way. */
2763 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2764 if (rcStrict == VINF_SUCCESS)
2765 {
2766 switch (enmEffOpSize)
2767 {
2768 case IEMMODE_16BIT:
2769 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2770 break;
2771 case IEMMODE_32BIT:
2772 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2773 break;
2774 case IEMMODE_64BIT:
2775 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2776 break;
2777 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2778 }
2779 }
2780
2781 return rcStrict;
2782}
2783
2784
2785/**
2786 * Implements lgdt.
2787 *
2788 * @param iEffSeg The segment of the new ldtr contents
2789 * @param GCPtrEffSrc The address of the new ldtr contents.
2790 * @param enmEffOpSize The effective operand size.
2791 */
2792IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2793{
2794 if (pIemCpu->uCpl != 0)
2795 return iemRaiseGeneralProtectionFault0(pIemCpu);
2796 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2797
2798 /*
2799 * Fetch the limit and base address.
2800 */
2801 uint16_t cbLimit;
2802 RTGCPTR GCPtrBase;
2803 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2804 if (rcStrict == VINF_SUCCESS)
2805 {
2806 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2807 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2808 else
2809 {
2810 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2811 pCtx->gdtr.cbGdt = cbLimit;
2812 pCtx->gdtr.pGdt = GCPtrBase;
2813 }
2814 if (rcStrict == VINF_SUCCESS)
2815 iemRegAddToRip(pIemCpu, cbInstr);
2816 }
2817 return rcStrict;
2818}
2819
2820
2821/**
2822 * Implements sgdt.
2823 *
2824 * @param iEffSeg The segment where to store the gdtr content.
2825 * @param GCPtrEffDst The address where to store the gdtr content.
2826 * @param enmEffOpSize The effective operand size.
2827 */
2828IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
2829{
2830 /*
2831 * Join paths with sidt.
2832 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
2833 * you really must know.
2834 */
2835 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2836 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
2837 if (rcStrict == VINF_SUCCESS)
2838 iemRegAddToRip(pIemCpu, cbInstr);
2839 return rcStrict;
2840}
2841
2842
2843/**
2844 * Implements lidt.
2845 *
2846 * @param iEffSeg The segment of the new ldtr contents
2847 * @param GCPtrEffSrc The address of the new ldtr contents.
2848 * @param enmEffOpSize The effective operand size.
2849 */
2850IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2851{
2852 if (pIemCpu->uCpl != 0)
2853 return iemRaiseGeneralProtectionFault0(pIemCpu);
2854 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2855
2856 /*
2857 * Fetch the limit and base address.
2858 */
2859 uint16_t cbLimit;
2860 RTGCPTR GCPtrBase;
2861 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2862 if (rcStrict == VINF_SUCCESS)
2863 {
2864 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2865 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2866 else
2867 {
2868 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2869 pCtx->idtr.cbIdt = cbLimit;
2870 pCtx->idtr.pIdt = GCPtrBase;
2871 }
2872 iemRegAddToRip(pIemCpu, cbInstr);
2873 }
2874 return rcStrict;
2875}
2876
2877
2878/**
2879 * Implements sidt.
2880 *
2881 * @param iEffSeg The segment where to store the idtr content.
2882 * @param GCPtrEffDst The address where to store the idtr content.
2883 * @param enmEffOpSize The effective operand size.
2884 */
2885IEM_CIMPL_DEF_3(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
2886{
2887 /*
2888 * Join paths with sgdt.
2889 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
2890 * you really must know.
2891 */
2892 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2893 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
2894 if (rcStrict == VINF_SUCCESS)
2895 iemRegAddToRip(pIemCpu, cbInstr);
2896 return rcStrict;
2897}
2898
2899
2900/**
2901 * Implements lldt.
2902 *
2903 * @param uNewLdt The new LDT selector value.
2904 */
2905IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2906{
2907 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2908
2909 /*
2910 * Check preconditions.
2911 */
2912 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2913 {
2914 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2915 return iemRaiseUndefinedOpcode(pIemCpu);
2916 }
2917 if (pIemCpu->uCpl != 0)
2918 {
2919 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2920 return iemRaiseGeneralProtectionFault0(pIemCpu);
2921 }
2922 if (uNewLdt & X86_SEL_LDT)
2923 {
2924 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2925 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2926 }
2927
2928 /*
2929 * Now, loading a NULL selector is easy.
2930 */
2931 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2932 {
2933 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2934 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2935 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
2936 else
2937 pCtx->ldtr.Sel = uNewLdt;
2938 pCtx->ldtr.ValidSel = uNewLdt;
2939 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2940 if (IEM_IS_GUEST_CPU_AMD(pIemCpu) && !IEM_VERIFICATION_ENABLED(pIemCpu))
2941 pCtx->ldtr.Attr.u = 0;
2942 else
2943 {
2944 pCtx->ldtr.u64Base = 0;
2945 pCtx->ldtr.u32Limit = 0;
2946 }
2947
2948 iemRegAddToRip(pIemCpu, cbInstr);
2949 return VINF_SUCCESS;
2950 }
2951
2952 /*
2953 * Read the descriptor.
2954 */
2955 IEMSELDESC Desc;
2956 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2957 if (rcStrict != VINF_SUCCESS)
2958 return rcStrict;
2959
2960 /* Check GPs first. */
2961 if (Desc.Legacy.Gen.u1DescType)
2962 {
2963 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2964 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2965 }
2966 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2967 {
2968 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2969 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2970 }
2971 uint64_t u64Base;
2972 if (!IEM_IS_LONG_MODE(pIemCpu))
2973 u64Base = X86DESC_BASE(&Desc.Legacy);
2974 else
2975 {
2976 if (Desc.Long.Gen.u5Zeros)
2977 {
2978 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2979 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2980 }
2981
2982 u64Base = X86DESC64_BASE(&Desc.Long);
2983 if (!IEM_IS_CANONICAL(u64Base))
2984 {
2985 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2986 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2987 }
2988 }
2989
2990 /* NP */
2991 if (!Desc.Legacy.Gen.u1Present)
2992 {
2993 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2994 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2995 }
2996
2997 /*
2998 * It checks out alright, update the registers.
2999 */
3000/** @todo check if the actual value is loaded or if the RPL is dropped */
3001 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3002 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
3003 else
3004 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
3005 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
3006 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3007 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3008 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3009 pCtx->ldtr.u64Base = u64Base;
3010
3011 iemRegAddToRip(pIemCpu, cbInstr);
3012 return VINF_SUCCESS;
3013}
3014
3015
3016/**
3017 * Implements lldt.
3018 *
3019 * @param uNewLdt The new LDT selector value.
3020 */
3021IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
3022{
3023 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3024
3025 /*
3026 * Check preconditions.
3027 */
3028 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3029 {
3030 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
3031 return iemRaiseUndefinedOpcode(pIemCpu);
3032 }
3033 if (pIemCpu->uCpl != 0)
3034 {
3035 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
3036 return iemRaiseGeneralProtectionFault0(pIemCpu);
3037 }
3038 if (uNewTr & X86_SEL_LDT)
3039 {
3040 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
3041 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
3042 }
3043 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
3044 {
3045 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
3046 return iemRaiseGeneralProtectionFault0(pIemCpu);
3047 }
3048
3049 /*
3050 * Read the descriptor.
3051 */
3052 IEMSELDESC Desc;
3053 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
3054 if (rcStrict != VINF_SUCCESS)
3055 return rcStrict;
3056
3057 /* Check GPs first. */
3058 if (Desc.Legacy.Gen.u1DescType)
3059 {
3060 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
3061 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3062 }
3063 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
3064 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3065 || IEM_IS_LONG_MODE(pIemCpu)) )
3066 {
3067 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
3068 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3069 }
3070 uint64_t u64Base;
3071 if (!IEM_IS_LONG_MODE(pIemCpu))
3072 u64Base = X86DESC_BASE(&Desc.Legacy);
3073 else
3074 {
3075 if (Desc.Long.Gen.u5Zeros)
3076 {
3077 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
3078 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3079 }
3080
3081 u64Base = X86DESC64_BASE(&Desc.Long);
3082 if (!IEM_IS_CANONICAL(u64Base))
3083 {
3084 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
3085 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3086 }
3087 }
3088
3089 /* NP */
3090 if (!Desc.Legacy.Gen.u1Present)
3091 {
3092 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
3093 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
3094 }
3095
3096 /*
3097 * Set it busy.
3098 * Note! Intel says this should lock down the whole descriptor, but we'll
3099 * restrict our selves to 32-bit for now due to lack of inline
3100 * assembly and such.
3101 */
3102 void *pvDesc;
3103 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
3104 if (rcStrict != VINF_SUCCESS)
3105 return rcStrict;
3106 switch ((uintptr_t)pvDesc & 3)
3107 {
3108 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
3109 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
3110 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
3111 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
3112 }
3113 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
3114 if (rcStrict != VINF_SUCCESS)
3115 return rcStrict;
3116 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3117
3118 /*
3119 * It checks out alright, update the registers.
3120 */
3121/** @todo check if the actual value is loaded or if the RPL is dropped */
3122 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3123 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
3124 else
3125 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
3126 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
3127 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3128 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3129 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3130 pCtx->tr.u64Base = u64Base;
3131
3132 iemRegAddToRip(pIemCpu, cbInstr);
3133 return VINF_SUCCESS;
3134}
3135
3136
3137/**
3138 * Implements mov GReg,CRx.
3139 *
3140 * @param iGReg The general register to store the CRx value in.
3141 * @param iCrReg The CRx register to read (valid).
3142 */
3143IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
3144{
3145 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3146 if (pIemCpu->uCpl != 0)
3147 return iemRaiseGeneralProtectionFault0(pIemCpu);
3148 Assert(!pCtx->eflags.Bits.u1VM);
3149
3150 /* read it */
3151 uint64_t crX;
3152 switch (iCrReg)
3153 {
3154 case 0: crX = pCtx->cr0; break;
3155 case 2: crX = pCtx->cr2; break;
3156 case 3: crX = pCtx->cr3; break;
3157 case 4: crX = pCtx->cr4; break;
3158 case 8:
3159 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3160 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
3161 else
3162 crX = 0xff;
3163 break;
3164 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3165 }
3166
3167 /* store it */
3168 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3169 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
3170 else
3171 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
3172
3173 iemRegAddToRip(pIemCpu, cbInstr);
3174 return VINF_SUCCESS;
3175}
3176
3177
3178/**
3179 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
3180 *
3181 * @param iCrReg The CRx register to write (valid).
3182 * @param uNewCrX The new value.
3183 */
3184IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
3185{
3186 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3187 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3188 VBOXSTRICTRC rcStrict;
3189 int rc;
3190
3191 /*
3192 * Try store it.
3193 * Unfortunately, CPUM only does a tiny bit of the work.
3194 */
3195 switch (iCrReg)
3196 {
3197 case 0:
3198 {
3199 /*
3200 * Perform checks.
3201 */
3202 uint64_t const uOldCrX = pCtx->cr0;
3203 uNewCrX |= X86_CR0_ET; /* hardcoded */
3204
3205 /* Check for reserved bits. */
3206 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
3207 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
3208 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
3209 if (uNewCrX & ~(uint64_t)fValid)
3210 {
3211 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3212 return iemRaiseGeneralProtectionFault0(pIemCpu);
3213 }
3214
3215 /* Check for invalid combinations. */
3216 if ( (uNewCrX & X86_CR0_PG)
3217 && !(uNewCrX & X86_CR0_PE) )
3218 {
3219 Log(("Trying to set CR0.PG without CR0.PE\n"));
3220 return iemRaiseGeneralProtectionFault0(pIemCpu);
3221 }
3222
3223 if ( !(uNewCrX & X86_CR0_CD)
3224 && (uNewCrX & X86_CR0_NW) )
3225 {
3226 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
3227 return iemRaiseGeneralProtectionFault0(pIemCpu);
3228 }
3229
3230 /* Long mode consistency checks. */
3231 if ( (uNewCrX & X86_CR0_PG)
3232 && !(uOldCrX & X86_CR0_PG)
3233 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3234 {
3235 if (!(pCtx->cr4 & X86_CR4_PAE))
3236 {
3237 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
3238 return iemRaiseGeneralProtectionFault0(pIemCpu);
3239 }
3240 if (pCtx->cs.Attr.n.u1Long)
3241 {
3242 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
3243 return iemRaiseGeneralProtectionFault0(pIemCpu);
3244 }
3245 }
3246
3247 /** @todo check reserved PDPTR bits as AMD states. */
3248
3249 /*
3250 * Change CR0.
3251 */
3252 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3253 CPUMSetGuestCR0(pVCpu, uNewCrX);
3254 else
3255 pCtx->cr0 = uNewCrX;
3256 Assert(pCtx->cr0 == uNewCrX);
3257
3258 /*
3259 * Change EFER.LMA if entering or leaving long mode.
3260 */
3261 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
3262 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3263 {
3264 uint64_t NewEFER = pCtx->msrEFER;
3265 if (uNewCrX & X86_CR0_PG)
3266 NewEFER |= MSR_K6_EFER_LMA;
3267 else
3268 NewEFER &= ~MSR_K6_EFER_LMA;
3269
3270 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3271 CPUMSetGuestEFER(pVCpu, NewEFER);
3272 else
3273 pCtx->msrEFER = NewEFER;
3274 Assert(pCtx->msrEFER == NewEFER);
3275 }
3276
3277 /*
3278 * Inform PGM.
3279 */
3280 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3281 {
3282 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3283 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3284 {
3285 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3286 AssertRCReturn(rc, rc);
3287 /* ignore informational status codes */
3288 }
3289 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3290 }
3291 else
3292 rcStrict = VINF_SUCCESS;
3293
3294#ifdef IN_RC
3295 /* Return to ring-3 for rescheduling if WP or AM changes. */
3296 if ( rcStrict == VINF_SUCCESS
3297 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
3298 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
3299 rcStrict = VINF_EM_RESCHEDULE;
3300#endif
3301 break;
3302 }
3303
3304 /*
3305 * CR2 can be changed without any restrictions.
3306 */
3307 case 2:
3308 pCtx->cr2 = uNewCrX;
3309 rcStrict = VINF_SUCCESS;
3310 break;
3311
3312 /*
3313 * CR3 is relatively simple, although AMD and Intel have different
3314 * accounts of how setting reserved bits are handled. We take intel's
3315 * word for the lower bits and AMD's for the high bits (63:52).
3316 */
3317 /** @todo Testcase: Setting reserved bits in CR3, especially before
3318 * enabling paging. */
3319 case 3:
3320 {
3321 /* check / mask the value. */
3322 if (uNewCrX & UINT64_C(0xfff0000000000000))
3323 {
3324 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3325 return iemRaiseGeneralProtectionFault0(pIemCpu);
3326 }
3327
3328 uint64_t fValid;
3329 if ( (pCtx->cr4 & X86_CR4_PAE)
3330 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3331 fValid = UINT64_C(0x000ffffffffff014);
3332 else if (pCtx->cr4 & X86_CR4_PAE)
3333 fValid = UINT64_C(0xfffffff4);
3334 else
3335 fValid = UINT64_C(0xfffff014);
3336 if (uNewCrX & ~fValid)
3337 {
3338 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3339 uNewCrX, uNewCrX & ~fValid));
3340 uNewCrX &= fValid;
3341 }
3342
3343 /** @todo If we're in PAE mode we should check the PDPTRs for
3344 * invalid bits. */
3345
3346 /* Make the change. */
3347 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3348 {
3349 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3350 AssertRCSuccessReturn(rc, rc);
3351 }
3352 else
3353 pCtx->cr3 = uNewCrX;
3354
3355 /* Inform PGM. */
3356 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3357 {
3358 if (pCtx->cr0 & X86_CR0_PG)
3359 {
3360 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
3361 AssertRCReturn(rc, rc);
3362 /* ignore informational status codes */
3363 }
3364 }
3365 rcStrict = VINF_SUCCESS;
3366 break;
3367 }
3368
3369 /*
3370 * CR4 is a bit more tedious as there are bits which cannot be cleared
3371 * under some circumstances and such.
3372 */
3373 case 4:
3374 {
3375 uint64_t const uOldCrX = pCtx->cr4;
3376
3377 /* reserved bits */
3378 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3379 | X86_CR4_TSD | X86_CR4_DE
3380 | X86_CR4_PSE | X86_CR4_PAE
3381 | X86_CR4_MCE | X86_CR4_PGE
3382 | X86_CR4_PCE | X86_CR4_OSFSXR
3383 | X86_CR4_OSXMMEEXCPT;
3384 //if (xxx)
3385 // fValid |= X86_CR4_VMXE;
3386 //if (xxx)
3387 // fValid |= X86_CR4_OSXSAVE;
3388 if (uNewCrX & ~(uint64_t)fValid)
3389 {
3390 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3391 return iemRaiseGeneralProtectionFault0(pIemCpu);
3392 }
3393
3394 /* long mode checks. */
3395 if ( (uOldCrX & X86_CR4_PAE)
3396 && !(uNewCrX & X86_CR4_PAE)
3397 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3398 {
3399 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3400 return iemRaiseGeneralProtectionFault0(pIemCpu);
3401 }
3402
3403
3404 /*
3405 * Change it.
3406 */
3407 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3408 {
3409 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3410 AssertRCSuccessReturn(rc, rc);
3411 }
3412 else
3413 pCtx->cr4 = uNewCrX;
3414 Assert(pCtx->cr4 == uNewCrX);
3415
3416 /*
3417 * Notify SELM and PGM.
3418 */
3419 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3420 {
3421 /* SELM - VME may change things wrt to the TSS shadowing. */
3422 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3423 {
3424 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
3425 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
3426#ifdef VBOX_WITH_RAW_MODE
3427 if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu)))
3428 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3429#endif
3430 }
3431
3432 /* PGM - flushing and mode. */
3433 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
3434 {
3435 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3436 AssertRCReturn(rc, rc);
3437 /* ignore informational status codes */
3438 }
3439 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3440 }
3441 else
3442 rcStrict = VINF_SUCCESS;
3443 break;
3444 }
3445
3446 /*
3447 * CR8 maps to the APIC TPR.
3448 */
3449 case 8:
3450 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3451 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
3452 else
3453 rcStrict = VINF_SUCCESS;
3454 break;
3455
3456 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3457 }
3458
3459 /*
3460 * Advance the RIP on success.
3461 */
3462 if (RT_SUCCESS(rcStrict))
3463 {
3464 if (rcStrict != VINF_SUCCESS)
3465 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3466 iemRegAddToRip(pIemCpu, cbInstr);
3467 }
3468
3469 return rcStrict;
3470}
3471
3472
3473/**
3474 * Implements mov CRx,GReg.
3475 *
3476 * @param iCrReg The CRx register to write (valid).
3477 * @param iGReg The general register to load the DRx value from.
3478 */
3479IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3480{
3481 if (pIemCpu->uCpl != 0)
3482 return iemRaiseGeneralProtectionFault0(pIemCpu);
3483 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3484
3485 /*
3486 * Read the new value from the source register and call common worker.
3487 */
3488 uint64_t uNewCrX;
3489 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3490 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3491 else
3492 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3493 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3494}
3495
3496
3497/**
3498 * Implements 'LMSW r/m16'
3499 *
3500 * @param u16NewMsw The new value.
3501 */
3502IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3503{
3504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3505
3506 if (pIemCpu->uCpl != 0)
3507 return iemRaiseGeneralProtectionFault0(pIemCpu);
3508 Assert(!pCtx->eflags.Bits.u1VM);
3509
3510 /*
3511 * Compose the new CR0 value and call common worker.
3512 */
3513 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3514 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3515 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3516}
3517
3518
3519/**
3520 * Implements 'CLTS'.
3521 */
3522IEM_CIMPL_DEF_0(iemCImpl_clts)
3523{
3524 if (pIemCpu->uCpl != 0)
3525 return iemRaiseGeneralProtectionFault0(pIemCpu);
3526
3527 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3528 uint64_t uNewCr0 = pCtx->cr0;
3529 uNewCr0 &= ~X86_CR0_TS;
3530 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3531}
3532
3533
3534/**
3535 * Implements mov GReg,DRx.
3536 *
3537 * @param iGReg The general register to store the DRx value in.
3538 * @param iDrReg The DRx register to read (0-7).
3539 */
3540IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3541{
3542 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3543
3544 /*
3545 * Check preconditions.
3546 */
3547
3548 /* Raise GPs. */
3549 if (pIemCpu->uCpl != 0)
3550 return iemRaiseGeneralProtectionFault0(pIemCpu);
3551 Assert(!pCtx->eflags.Bits.u1VM);
3552
3553 if ( (iDrReg == 4 || iDrReg == 5)
3554 && (pCtx->cr4 & X86_CR4_DE) )
3555 {
3556 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3557 return iemRaiseGeneralProtectionFault0(pIemCpu);
3558 }
3559
3560 /* Raise #DB if general access detect is enabled. */
3561 if (pCtx->dr[7] & X86_DR7_GD)
3562 {
3563 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3564 return iemRaiseDebugException(pIemCpu);
3565 }
3566
3567 /*
3568 * Read the debug register and store it in the specified general register.
3569 */
3570 uint64_t drX;
3571 switch (iDrReg)
3572 {
3573 case 0: drX = pCtx->dr[0]; break;
3574 case 1: drX = pCtx->dr[1]; break;
3575 case 2: drX = pCtx->dr[2]; break;
3576 case 3: drX = pCtx->dr[3]; break;
3577 case 6:
3578 case 4:
3579 drX = pCtx->dr[6];
3580 drX &= ~RT_BIT_32(12);
3581 drX |= UINT32_C(0xffff0ff0);
3582 break;
3583 case 7:
3584 case 5:
3585 drX = pCtx->dr[7];
3586 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3587 drX |= RT_BIT_32(10);
3588 break;
3589 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3590 }
3591
3592 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3593 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3594 else
3595 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3596
3597 iemRegAddToRip(pIemCpu, cbInstr);
3598 return VINF_SUCCESS;
3599}
3600
3601
3602/**
3603 * Implements mov DRx,GReg.
3604 *
3605 * @param iDrReg The DRx register to write (valid).
3606 * @param iGReg The general register to load the DRx value from.
3607 */
3608IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3609{
3610 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3611
3612 /*
3613 * Check preconditions.
3614 */
3615 if (pIemCpu->uCpl != 0)
3616 return iemRaiseGeneralProtectionFault0(pIemCpu);
3617 Assert(!pCtx->eflags.Bits.u1VM);
3618
3619 if ( (iDrReg == 4 || iDrReg == 5)
3620 && (pCtx->cr4 & X86_CR4_DE) )
3621 {
3622 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3623 return iemRaiseGeneralProtectionFault0(pIemCpu);
3624 }
3625
3626 /* Raise #DB if general access detect is enabled. */
3627 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3628 * \#GP? */
3629 if (pCtx->dr[7] & X86_DR7_GD)
3630 {
3631 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3632 return iemRaiseDebugException(pIemCpu);
3633 }
3634
3635 /*
3636 * Read the new value from the source register.
3637 */
3638 uint64_t uNewDrX;
3639 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3640 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3641 else
3642 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3643
3644 /*
3645 * Adjust it.
3646 */
3647 switch (iDrReg)
3648 {
3649 case 0:
3650 case 1:
3651 case 2:
3652 case 3:
3653 /* nothing to adjust */
3654 break;
3655
3656 case 6:
3657 case 4:
3658 if (uNewDrX & UINT64_C(0xffffffff00000000))
3659 {
3660 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3661 return iemRaiseGeneralProtectionFault0(pIemCpu);
3662 }
3663 uNewDrX &= ~RT_BIT_32(12);
3664 uNewDrX |= UINT32_C(0xffff0ff0);
3665 break;
3666
3667 case 7:
3668 case 5:
3669 if (uNewDrX & UINT64_C(0xffffffff00000000))
3670 {
3671 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3672 return iemRaiseGeneralProtectionFault0(pIemCpu);
3673 }
3674 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3675 uNewDrX |= RT_BIT_32(10);
3676 break;
3677
3678 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3679 }
3680
3681 /*
3682 * Do the actual setting.
3683 */
3684 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3685 {
3686 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3687 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3688 }
3689 else
3690 pCtx->dr[iDrReg] = uNewDrX;
3691
3692 iemRegAddToRip(pIemCpu, cbInstr);
3693 return VINF_SUCCESS;
3694}
3695
3696
3697/**
3698 * Implements 'INVLPG m'.
3699 *
3700 * @param GCPtrPage The effective address of the page to invalidate.
3701 * @remarks Updates the RIP.
3702 */
3703IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3704{
3705 /* ring-0 only. */
3706 if (pIemCpu->uCpl != 0)
3707 return iemRaiseGeneralProtectionFault0(pIemCpu);
3708 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3709
3710 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3711 iemRegAddToRip(pIemCpu, cbInstr);
3712
3713 if (rc == VINF_SUCCESS)
3714 return VINF_SUCCESS;
3715 if (rc == VINF_PGM_SYNC_CR3)
3716 return iemSetPassUpStatus(pIemCpu, rc);
3717
3718 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3719 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3720 return rc;
3721}
3722
3723
3724/**
3725 * Implements RDTSC.
3726 */
3727IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3728{
3729 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3730
3731 /*
3732 * Check preconditions.
3733 */
3734 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3735 return iemRaiseUndefinedOpcode(pIemCpu);
3736
3737 if ( (pCtx->cr4 & X86_CR4_TSD)
3738 && pIemCpu->uCpl != 0)
3739 {
3740 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3741 return iemRaiseGeneralProtectionFault0(pIemCpu);
3742 }
3743
3744 /*
3745 * Do the job.
3746 */
3747 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3748 pCtx->rax = (uint32_t)uTicks;
3749 pCtx->rdx = uTicks >> 32;
3750#ifdef IEM_VERIFICATION_MODE_FULL
3751 pIemCpu->fIgnoreRaxRdx = true;
3752#endif
3753
3754 iemRegAddToRip(pIemCpu, cbInstr);
3755 return VINF_SUCCESS;
3756}
3757
3758
3759/**
3760 * Implements RDMSR.
3761 */
3762IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3763{
3764 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3765
3766 /*
3767 * Check preconditions.
3768 */
3769 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3770 return iemRaiseUndefinedOpcode(pIemCpu);
3771 if (pIemCpu->uCpl != 0)
3772 return iemRaiseGeneralProtectionFault0(pIemCpu);
3773
3774 /*
3775 * Do the job.
3776 */
3777 RTUINT64U uValue;
3778 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3779 if (rc != VINF_SUCCESS)
3780 {
3781 Log(("IEM: rdmsr(%#x) -> GP(0)\n", pCtx->ecx));
3782 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3783 return iemRaiseGeneralProtectionFault0(pIemCpu);
3784 }
3785
3786 pCtx->rax = uValue.s.Lo;
3787 pCtx->rdx = uValue.s.Hi;
3788
3789 iemRegAddToRip(pIemCpu, cbInstr);
3790 return VINF_SUCCESS;
3791}
3792
3793
3794/**
3795 * Implements WRMSR.
3796 */
3797IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
3798{
3799 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3800
3801 /*
3802 * Check preconditions.
3803 */
3804 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3805 return iemRaiseUndefinedOpcode(pIemCpu);
3806 if (pIemCpu->uCpl != 0)
3807 return iemRaiseGeneralProtectionFault0(pIemCpu);
3808
3809 /*
3810 * Do the job.
3811 */
3812 RTUINT64U uValue;
3813 uValue.s.Lo = pCtx->eax;
3814 uValue.s.Hi = pCtx->edx;
3815
3816 int rc;
3817 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3818 rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
3819 else
3820 {
3821 CPUMCTX CtxTmp = *pCtx;
3822 rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
3823 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
3824 *pCtx = *pCtx2;
3825 *pCtx2 = CtxTmp;
3826 }
3827 if (rc != VINF_SUCCESS)
3828 {
3829 Log(("IEM: wrmsr(%#x,%#x`%08x) -> GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
3830 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3831 return iemRaiseGeneralProtectionFault0(pIemCpu);
3832 }
3833
3834 iemRegAddToRip(pIemCpu, cbInstr);
3835 return VINF_SUCCESS;
3836}
3837
3838
3839/**
3840 * Implements 'IN eAX, port'.
3841 *
3842 * @param u16Port The source port.
3843 * @param cbReg The register size.
3844 */
3845IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3846{
3847 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3848
3849 /*
3850 * CPL check
3851 */
3852 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3853 if (rcStrict != VINF_SUCCESS)
3854 return rcStrict;
3855
3856 /*
3857 * Perform the I/O.
3858 */
3859 uint32_t u32Value;
3860 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3861 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);
3862 else
3863 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3864 if (IOM_SUCCESS(rcStrict))
3865 {
3866 switch (cbReg)
3867 {
3868 case 1: pCtx->al = (uint8_t)u32Value; break;
3869 case 2: pCtx->ax = (uint16_t)u32Value; break;
3870 case 4: pCtx->rax = u32Value; break;
3871 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3872 }
3873 iemRegAddToRip(pIemCpu, cbInstr);
3874 pIemCpu->cPotentialExits++;
3875 if (rcStrict != VINF_SUCCESS)
3876 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3877 }
3878
3879 return rcStrict;
3880}
3881
3882
3883/**
3884 * Implements 'IN eAX, DX'.
3885 *
3886 * @param cbReg The register size.
3887 */
3888IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3889{
3890 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3891}
3892
3893
3894/**
3895 * Implements 'OUT port, eAX'.
3896 *
3897 * @param u16Port The destination port.
3898 * @param cbReg The register size.
3899 */
3900IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3901{
3902 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3903
3904 /*
3905 * CPL check
3906 */
3907 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3908 if (rcStrict != VINF_SUCCESS)
3909 return rcStrict;
3910
3911 /*
3912 * Perform the I/O.
3913 */
3914 uint32_t u32Value;
3915 switch (cbReg)
3916 {
3917 case 1: u32Value = pCtx->al; break;
3918 case 2: u32Value = pCtx->ax; break;
3919 case 4: u32Value = pCtx->eax; break;
3920 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3921 }
3922 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3923 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);
3924 else
3925 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3926 if (IOM_SUCCESS(rcStrict))
3927 {
3928 iemRegAddToRip(pIemCpu, cbInstr);
3929 pIemCpu->cPotentialExits++;
3930 if (rcStrict != VINF_SUCCESS)
3931 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3932 }
3933 return rcStrict;
3934}
3935
3936
3937/**
3938 * Implements 'OUT DX, eAX'.
3939 *
3940 * @param cbReg The register size.
3941 */
3942IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3943{
3944 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3945}
3946
3947
3948/**
3949 * Implements 'CLI'.
3950 */
3951IEM_CIMPL_DEF_0(iemCImpl_cli)
3952{
3953 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3954 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3955 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3956 uint32_t const fEflOld = fEfl;
3957 if (pCtx->cr0 & X86_CR0_PE)
3958 {
3959 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
3960 if (!(fEfl & X86_EFL_VM))
3961 {
3962 if (pIemCpu->uCpl <= uIopl)
3963 fEfl &= ~X86_EFL_IF;
3964 else if ( pIemCpu->uCpl == 3
3965 && (pCtx->cr4 & X86_CR4_PVI) )
3966 fEfl &= ~X86_EFL_VIF;
3967 else
3968 return iemRaiseGeneralProtectionFault0(pIemCpu);
3969 }
3970 /* V8086 */
3971 else if (uIopl == 3)
3972 fEfl &= ~X86_EFL_IF;
3973 else if ( uIopl < 3
3974 && (pCtx->cr4 & X86_CR4_VME) )
3975 fEfl &= ~X86_EFL_VIF;
3976 else
3977 return iemRaiseGeneralProtectionFault0(pIemCpu);
3978 }
3979 /* real mode */
3980 else
3981 fEfl &= ~X86_EFL_IF;
3982
3983 /* Commit. */
3984 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3985 iemRegAddToRip(pIemCpu, cbInstr);
3986 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
3987 return VINF_SUCCESS;
3988}
3989
3990
3991/**
3992 * Implements 'STI'.
3993 */
3994IEM_CIMPL_DEF_0(iemCImpl_sti)
3995{
3996 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3997 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3998 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3999 uint32_t const fEflOld = fEfl;
4000
4001 if (pCtx->cr0 & X86_CR0_PE)
4002 {
4003 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
4004 if (!(fEfl & X86_EFL_VM))
4005 {
4006 if (pIemCpu->uCpl <= uIopl)
4007 fEfl |= X86_EFL_IF;
4008 else if ( pIemCpu->uCpl == 3
4009 && (pCtx->cr4 & X86_CR4_PVI)
4010 && !(fEfl & X86_EFL_VIP) )
4011 fEfl |= X86_EFL_VIF;
4012 else
4013 return iemRaiseGeneralProtectionFault0(pIemCpu);
4014 }
4015 /* V8086 */
4016 else if (uIopl == 3)
4017 fEfl |= X86_EFL_IF;
4018 else if ( uIopl < 3
4019 && (pCtx->cr4 & X86_CR4_VME)
4020 && !(fEfl & X86_EFL_VIP) )
4021 fEfl |= X86_EFL_VIF;
4022 else
4023 return iemRaiseGeneralProtectionFault0(pIemCpu);
4024 }
4025 /* real mode */
4026 else
4027 fEfl |= X86_EFL_IF;
4028
4029 /* Commit. */
4030 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
4031 iemRegAddToRip(pIemCpu, cbInstr);
4032 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_VERIFICATION_ENABLED(pIemCpu))
4033 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4034 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
4035 return VINF_SUCCESS;
4036}
4037
4038
4039/**
4040 * Implements 'HLT'.
4041 */
4042IEM_CIMPL_DEF_0(iemCImpl_hlt)
4043{
4044 if (pIemCpu->uCpl != 0)
4045 return iemRaiseGeneralProtectionFault0(pIemCpu);
4046 iemRegAddToRip(pIemCpu, cbInstr);
4047 return VINF_EM_HALT;
4048}
4049
4050
4051/**
4052 * Implements 'CPUID'.
4053 */
4054IEM_CIMPL_DEF_0(iemCImpl_cpuid)
4055{
4056 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4057
4058 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
4059 pCtx->rax &= UINT32_C(0xffffffff);
4060 pCtx->rbx &= UINT32_C(0xffffffff);
4061 pCtx->rcx &= UINT32_C(0xffffffff);
4062 pCtx->rdx &= UINT32_C(0xffffffff);
4063
4064 iemRegAddToRip(pIemCpu, cbInstr);
4065 return VINF_SUCCESS;
4066}
4067
4068
4069/**
4070 * Implements 'AAD'.
4071 *
4072 * @param enmEffOpSize The effective operand size.
4073 */
4074IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
4075{
4076 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4077
4078 uint16_t const ax = pCtx->ax;
4079 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
4080 pCtx->ax = al;
4081 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
4082 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
4083 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
4084
4085 iemRegAddToRip(pIemCpu, cbInstr);
4086 return VINF_SUCCESS;
4087}
4088
4089
4090/**
4091 * Implements 'AAM'.
4092 *
4093 * @param bImm The immediate operand. Cannot be 0.
4094 */
4095IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
4096{
4097 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4098 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
4099
4100 uint16_t const ax = pCtx->ax;
4101 uint8_t const al = (uint8_t)ax % bImm;
4102 uint8_t const ah = (uint8_t)ax / bImm;
4103 pCtx->ax = (ah << 8) + al;
4104 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
4105 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
4106 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
4107
4108 iemRegAddToRip(pIemCpu, cbInstr);
4109 return VINF_SUCCESS;
4110}
4111
4112
4113
4114
4115/*
4116 * Instantiate the various string operation combinations.
4117 */
4118#define OP_SIZE 8
4119#define ADDR_SIZE 16
4120#include "IEMAllCImplStrInstr.cpp.h"
4121#define OP_SIZE 8
4122#define ADDR_SIZE 32
4123#include "IEMAllCImplStrInstr.cpp.h"
4124#define OP_SIZE 8
4125#define ADDR_SIZE 64
4126#include "IEMAllCImplStrInstr.cpp.h"
4127
4128#define OP_SIZE 16
4129#define ADDR_SIZE 16
4130#include "IEMAllCImplStrInstr.cpp.h"
4131#define OP_SIZE 16
4132#define ADDR_SIZE 32
4133#include "IEMAllCImplStrInstr.cpp.h"
4134#define OP_SIZE 16
4135#define ADDR_SIZE 64
4136#include "IEMAllCImplStrInstr.cpp.h"
4137
4138#define OP_SIZE 32
4139#define ADDR_SIZE 16
4140#include "IEMAllCImplStrInstr.cpp.h"
4141#define OP_SIZE 32
4142#define ADDR_SIZE 32
4143#include "IEMAllCImplStrInstr.cpp.h"
4144#define OP_SIZE 32
4145#define ADDR_SIZE 64
4146#include "IEMAllCImplStrInstr.cpp.h"
4147
4148#define OP_SIZE 64
4149#define ADDR_SIZE 32
4150#include "IEMAllCImplStrInstr.cpp.h"
4151#define OP_SIZE 64
4152#define ADDR_SIZE 64
4153#include "IEMAllCImplStrInstr.cpp.h"
4154
4155
4156/**
4157 * Implements 'FINIT' and 'FNINIT'.
4158 *
4159 * @param fCheckXcpts Whether to check for umasked pending exceptions or
4160 * not.
4161 */
4162IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
4163{
4164 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4165
4166 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4167 return iemRaiseDeviceNotAvailable(pIemCpu);
4168
4169 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
4170 if (fCheckXcpts && TODO )
4171 return iemRaiseMathFault(pIemCpu);
4172 */
4173
4174 if (iemFRegIsFxSaveFormat(pIemCpu))
4175 {
4176 pCtx->fpu.FCW = 0x37f;
4177 pCtx->fpu.FSW = 0;
4178 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
4179 pCtx->fpu.FPUDP = 0;
4180 pCtx->fpu.DS = 0; //??
4181 pCtx->fpu.Rsrvd2= 0;
4182 pCtx->fpu.FPUIP = 0;
4183 pCtx->fpu.CS = 0; //??
4184 pCtx->fpu.Rsrvd1= 0;
4185 pCtx->fpu.FOP = 0;
4186 }
4187 else
4188 {
4189 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
4190 pFpu->FCW = 0x37f;
4191 pFpu->FSW = 0;
4192 pFpu->FTW = 0xffff; /* 11 - empty */
4193 pFpu->FPUOO = 0; //??
4194 pFpu->FPUOS = 0; //??
4195 pFpu->FPUIP = 0;
4196 pFpu->CS = 0; //??
4197 pFpu->FOP = 0;
4198 }
4199
4200 iemHlpUsedFpu(pIemCpu);
4201 iemRegAddToRip(pIemCpu, cbInstr);
4202 return VINF_SUCCESS;
4203}
4204
4205
4206/**
4207 * Implements 'FXSAVE'.
4208 *
4209 * @param iEffSeg The effective segment.
4210 * @param GCPtrEff The address of the image.
4211 * @param enmEffOpSize The operand size (only REX.W really matters).
4212 */
4213IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4214{
4215 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4216
4217 /*
4218 * Raise exceptions.
4219 */
4220 if (pCtx->cr0 & X86_CR0_EM)
4221 return iemRaiseUndefinedOpcode(pIemCpu);
4222 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4223 return iemRaiseDeviceNotAvailable(pIemCpu);
4224 if (GCPtrEff & 15)
4225 {
4226 /** @todo CPU/VM detection possible! \#AC might not be signal for
4227 * all/any misalignment sizes, intel says its an implementation detail. */
4228 if ( (pCtx->cr0 & X86_CR0_AM)
4229 && pCtx->eflags.Bits.u1AC
4230 && pIemCpu->uCpl == 3)
4231 return iemRaiseAlignmentCheckException(pIemCpu);
4232 return iemRaiseGeneralProtectionFault0(pIemCpu);
4233 }
4234 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4235
4236 /*
4237 * Access the memory.
4238 */
4239 void *pvMem512;
4240 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4241 if (rcStrict != VINF_SUCCESS)
4242 return rcStrict;
4243 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
4244
4245 /*
4246 * Store the registers.
4247 */
4248 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4249 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
4250
4251 /* common for all formats */
4252 pDst->FCW = pCtx->fpu.FCW;
4253 pDst->FSW = pCtx->fpu.FSW;
4254 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
4255 pDst->FOP = pCtx->fpu.FOP;
4256 pDst->MXCSR = pCtx->fpu.MXCSR;
4257 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
4258 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
4259 {
4260 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
4261 * them for now... */
4262 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4263 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4264 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
4265 pDst->aRegs[i].au32[3] = 0;
4266 }
4267
4268 /* FPU IP, CS, DP and DS. */
4269 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
4270 * state information. :-/
4271 * Storing zeros now to prevent any potential leakage of host info. */
4272 pDst->FPUIP = 0;
4273 pDst->CS = 0;
4274 pDst->Rsrvd1 = 0;
4275 pDst->FPUDP = 0;
4276 pDst->DS = 0;
4277 pDst->Rsrvd2 = 0;
4278
4279 /* XMM registers. */
4280 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4281 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4282 || pIemCpu->uCpl != 0)
4283 {
4284 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4285 for (uint32_t i = 0; i < cXmmRegs; i++)
4286 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
4287 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
4288 * right? */
4289 }
4290
4291 /*
4292 * Commit the memory.
4293 */
4294 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4295 if (rcStrict != VINF_SUCCESS)
4296 return rcStrict;
4297
4298 iemRegAddToRip(pIemCpu, cbInstr);
4299 return VINF_SUCCESS;
4300}
4301
4302
4303/**
4304 * Implements 'FXRSTOR'.
4305 *
4306 * @param GCPtrEff The address of the image.
4307 * @param enmEffOpSize The operand size (only REX.W really matters).
4308 */
4309IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4310{
4311 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4312
4313 /*
4314 * Raise exceptions.
4315 */
4316 if (pCtx->cr0 & X86_CR0_EM)
4317 return iemRaiseUndefinedOpcode(pIemCpu);
4318 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4319 return iemRaiseDeviceNotAvailable(pIemCpu);
4320 if (GCPtrEff & 15)
4321 {
4322 /** @todo CPU/VM detection possible! \#AC might not be signal for
4323 * all/any misalignment sizes, intel says its an implementation detail. */
4324 if ( (pCtx->cr0 & X86_CR0_AM)
4325 && pCtx->eflags.Bits.u1AC
4326 && pIemCpu->uCpl == 3)
4327 return iemRaiseAlignmentCheckException(pIemCpu);
4328 return iemRaiseGeneralProtectionFault0(pIemCpu);
4329 }
4330 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4331
4332 /*
4333 * Access the memory.
4334 */
4335 void *pvMem512;
4336 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
4337 if (rcStrict != VINF_SUCCESS)
4338 return rcStrict;
4339 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
4340
4341 /*
4342 * Check the state for stuff which will GP(0).
4343 */
4344 uint32_t const fMXCSR = pSrc->MXCSR;
4345 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
4346 if (fMXCSR & ~fMXCSR_MASK)
4347 {
4348 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
4349 return iemRaiseGeneralProtectionFault0(pIemCpu);
4350 }
4351
4352 /*
4353 * Load the registers.
4354 */
4355 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4356 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4357
4358 /* common for all formats */
4359 pCtx->fpu.FCW = pSrc->FCW;
4360 pCtx->fpu.FSW = pSrc->FSW;
4361 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4362 pCtx->fpu.FOP = pSrc->FOP;
4363 pCtx->fpu.MXCSR = fMXCSR;
4364 /* (MXCSR_MASK is read-only) */
4365 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4366 {
4367 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4368 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4369 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4370 pCtx->fpu.aRegs[i].au32[3] = 0;
4371 }
4372
4373 /* FPU IP, CS, DP and DS. */
4374 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4375 {
4376 pCtx->fpu.FPUIP = pSrc->FPUIP;
4377 pCtx->fpu.CS = pSrc->CS;
4378 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4379 pCtx->fpu.FPUDP = pSrc->FPUDP;
4380 pCtx->fpu.DS = pSrc->DS;
4381 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4382 }
4383 else
4384 {
4385 pCtx->fpu.FPUIP = pSrc->FPUIP;
4386 pCtx->fpu.CS = pSrc->CS;
4387 pCtx->fpu.Rsrvd1 = 0;
4388 pCtx->fpu.FPUDP = pSrc->FPUDP;
4389 pCtx->fpu.DS = pSrc->DS;
4390 pCtx->fpu.Rsrvd2 = 0;
4391 }
4392
4393 /* XMM registers. */
4394 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4395 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4396 || pIemCpu->uCpl != 0)
4397 {
4398 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4399 for (uint32_t i = 0; i < cXmmRegs; i++)
4400 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4401 }
4402
4403 /*
4404 * Commit the memory.
4405 */
4406 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4407 if (rcStrict != VINF_SUCCESS)
4408 return rcStrict;
4409
4410 iemHlpUsedFpu(pIemCpu);
4411 iemRegAddToRip(pIemCpu, cbInstr);
4412 return VINF_SUCCESS;
4413}
4414
4415
4416/**
4417 * Commmon routine for fnstenv and fnsave.
4418 *
4419 * @param uPtr Where to store the state.
4420 * @param pCtx The CPU context.
4421 */
4422static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4423{
4424 if (enmEffOpSize == IEMMODE_16BIT)
4425 {
4426 uPtr.pu16[0] = pCtx->fpu.FCW;
4427 uPtr.pu16[1] = pCtx->fpu.FSW;
4428 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4429 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4430 {
4431 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4432 * protected mode or long mode and we save it in real mode? And vice
4433 * versa? And with 32-bit operand size? I think CPU is storing the
4434 * effective address ((CS << 4) + IP) in the offset register and not
4435 * doing any address calculations here. */
4436 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4437 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4438 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4439 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4440 }
4441 else
4442 {
4443 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4444 uPtr.pu16[4] = pCtx->fpu.CS;
4445 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4446 uPtr.pu16[6] = pCtx->fpu.DS;
4447 }
4448 }
4449 else
4450 {
4451 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4452 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4453 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4454 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4455 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4456 {
4457 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4458 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4459 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4460 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4461 }
4462 else
4463 {
4464 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4465 uPtr.pu16[4*2] = pCtx->fpu.CS;
4466 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4467 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4468 uPtr.pu16[6*2] = pCtx->fpu.DS;
4469 }
4470 }
4471}
4472
4473
4474/**
4475 * Commmon routine for fldenv and frstor
4476 *
4477 * @param uPtr Where to store the state.
4478 * @param pCtx The CPU context.
4479 */
4480static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4481{
4482 if (enmEffOpSize == IEMMODE_16BIT)
4483 {
4484 pCtx->fpu.FCW = uPtr.pu16[0];
4485 pCtx->fpu.FSW = uPtr.pu16[1];
4486 pCtx->fpu.FTW = uPtr.pu16[2];
4487 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4488 {
4489 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4490 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4491 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4492 pCtx->fpu.CS = 0;
4493 pCtx->fpu.Rsrvd1= 0;
4494 pCtx->fpu.DS = 0;
4495 pCtx->fpu.Rsrvd2= 0;
4496 }
4497 else
4498 {
4499 pCtx->fpu.FPUIP = uPtr.pu16[3];
4500 pCtx->fpu.CS = uPtr.pu16[4];
4501 pCtx->fpu.Rsrvd1= 0;
4502 pCtx->fpu.FPUDP = uPtr.pu16[5];
4503 pCtx->fpu.DS = uPtr.pu16[6];
4504 pCtx->fpu.Rsrvd2= 0;
4505 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4506 }
4507 }
4508 else
4509 {
4510 pCtx->fpu.FCW = uPtr.pu16[0*2];
4511 pCtx->fpu.FSW = uPtr.pu16[1*2];
4512 pCtx->fpu.FTW = uPtr.pu16[2*2];
4513 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4514 {
4515 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4516 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4517 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4518 pCtx->fpu.CS = 0;
4519 pCtx->fpu.Rsrvd1= 0;
4520 pCtx->fpu.DS = 0;
4521 pCtx->fpu.Rsrvd2= 0;
4522 }
4523 else
4524 {
4525 pCtx->fpu.FPUIP = uPtr.pu32[3];
4526 pCtx->fpu.CS = uPtr.pu16[4*2];
4527 pCtx->fpu.Rsrvd1= 0;
4528 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4529 pCtx->fpu.FPUDP = uPtr.pu32[5];
4530 pCtx->fpu.DS = uPtr.pu16[6*2];
4531 pCtx->fpu.Rsrvd2= 0;
4532 }
4533 }
4534
4535 /* Make adjustments. */
4536 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4537 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4538 iemFpuRecalcExceptionStatus(pCtx);
4539 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4540 * exceptions are pending after loading the saved state? */
4541}
4542
4543
4544/**
4545 * Implements 'FNSTENV'.
4546 *
4547 * @param enmEffOpSize The operand size (only REX.W really matters).
4548 * @param iEffSeg The effective segment register for @a GCPtrEff.
4549 * @param GCPtrEffDst The address of the image.
4550 */
4551IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4552{
4553 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4554 RTPTRUNION uPtr;
4555 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4556 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4557 if (rcStrict != VINF_SUCCESS)
4558 return rcStrict;
4559
4560 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4561
4562 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4563 if (rcStrict != VINF_SUCCESS)
4564 return rcStrict;
4565
4566 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4567 iemRegAddToRip(pIemCpu, cbInstr);
4568 return VINF_SUCCESS;
4569}
4570
4571
4572/**
4573 * Implements 'FNSAVE'.
4574 *
4575 * @param GCPtrEffDst The address of the image.
4576 * @param enmEffOpSize The operand size.
4577 */
4578IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4579{
4580 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4581 RTPTRUNION uPtr;
4582 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
4583 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4584 if (rcStrict != VINF_SUCCESS)
4585 return rcStrict;
4586
4587 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4588 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
4589 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
4590 {
4591 paRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4592 paRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4593 paRegs[i].au16[4] = pCtx->fpu.aRegs[i].au16[4];
4594 }
4595
4596 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4597 if (rcStrict != VINF_SUCCESS)
4598 return rcStrict;
4599
4600 /*
4601 * Re-initialize the FPU.
4602 */
4603 pCtx->fpu.FCW = 0x37f;
4604 pCtx->fpu.FSW = 0;
4605 pCtx->fpu.FTW = 0x00; /* 0 - empty */
4606 pCtx->fpu.FPUDP = 0;
4607 pCtx->fpu.DS = 0;
4608 pCtx->fpu.Rsrvd2= 0;
4609 pCtx->fpu.FPUIP = 0;
4610 pCtx->fpu.CS = 0;
4611 pCtx->fpu.Rsrvd1= 0;
4612 pCtx->fpu.FOP = 0;
4613
4614 iemHlpUsedFpu(pIemCpu);
4615 iemRegAddToRip(pIemCpu, cbInstr);
4616 return VINF_SUCCESS;
4617}
4618
4619
4620
4621/**
4622 * Implements 'FLDENV'.
4623 *
4624 * @param enmEffOpSize The operand size (only REX.W really matters).
4625 * @param iEffSeg The effective segment register for @a GCPtrEff.
4626 * @param GCPtrEffSrc The address of the image.
4627 */
4628IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4629{
4630 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4631 RTCPTRUNION uPtr;
4632 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4633 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4634 if (rcStrict != VINF_SUCCESS)
4635 return rcStrict;
4636
4637 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4638
4639 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4640 if (rcStrict != VINF_SUCCESS)
4641 return rcStrict;
4642
4643 iemHlpUsedFpu(pIemCpu);
4644 iemRegAddToRip(pIemCpu, cbInstr);
4645 return VINF_SUCCESS;
4646}
4647
4648
4649/**
4650 * Implements 'FRSTOR'.
4651 *
4652 * @param GCPtrEffSrc The address of the image.
4653 * @param enmEffOpSize The operand size.
4654 */
4655IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4656{
4657 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4658 RTCPTRUNION uPtr;
4659 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
4660 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4661 if (rcStrict != VINF_SUCCESS)
4662 return rcStrict;
4663
4664 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4665 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
4666 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
4667 {
4668 pCtx->fpu.aRegs[i].au32[0] = paRegs[i].au32[0];
4669 pCtx->fpu.aRegs[i].au32[1] = paRegs[i].au32[1];
4670 pCtx->fpu.aRegs[i].au32[2] = paRegs[i].au16[4];
4671 pCtx->fpu.aRegs[i].au32[3] = 0;
4672 }
4673
4674 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4675 if (rcStrict != VINF_SUCCESS)
4676 return rcStrict;
4677
4678 iemHlpUsedFpu(pIemCpu);
4679 iemRegAddToRip(pIemCpu, cbInstr);
4680 return VINF_SUCCESS;
4681}
4682
4683
4684/**
4685 * Implements 'FLDCW'.
4686 *
4687 * @param u16Fcw The new FCW.
4688 */
4689IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4690{
4691 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4692
4693 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4694 /** @todo Testcase: Try see what happens when trying to set undefined bits
4695 * (other than 6 and 7). Currently ignoring them. */
4696 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4697 * according to FSW. (This is was is currently implemented.) */
4698 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4699 iemFpuRecalcExceptionStatus(pCtx);
4700
4701 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4702 iemHlpUsedFpu(pIemCpu);
4703 iemRegAddToRip(pIemCpu, cbInstr);
4704 return VINF_SUCCESS;
4705}
4706
4707
4708
4709/**
4710 * Implements the underflow case of fxch.
4711 *
4712 * @param iStReg The other stack register.
4713 */
4714IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4715{
4716 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4717
4718 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4719 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4720 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4721
4722 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4723 * registers are read as QNaN and then exchanged. This could be
4724 * wrong... */
4725 if (pCtx->fpu.FCW & X86_FCW_IM)
4726 {
4727 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4728 {
4729 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4730 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4731 else
4732 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4733 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4734 }
4735 else
4736 {
4737 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4738 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4739 }
4740 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4741 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4742 }
4743 else
4744 {
4745 /* raise underflow exception, don't change anything. */
4746 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
4747 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4748 }
4749
4750 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4751 iemHlpUsedFpu(pIemCpu);
4752 iemRegAddToRip(pIemCpu, cbInstr);
4753 return VINF_SUCCESS;
4754}
4755
4756
4757/**
4758 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
4759 *
4760 * @param cToAdd 1 or 7.
4761 */
4762IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
4763{
4764 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4765 Assert(iStReg < 8);
4766
4767 /*
4768 * Raise exceptions.
4769 */
4770 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4771 return iemRaiseDeviceNotAvailable(pIemCpu);
4772 uint16_t u16Fsw = pCtx->fpu.FSW;
4773 if (u16Fsw & X86_FSW_ES)
4774 return iemRaiseMathFault(pIemCpu);
4775
4776 /*
4777 * Check if any of the register accesses causes #SF + #IA.
4778 */
4779 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
4780 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4781 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
4782 {
4783 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
4784 pCtx->fpu.FSW &= ~X86_FSW_C1;
4785 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
4786 if ( !(u16Fsw & X86_FSW_IE)
4787 || (pCtx->fpu.FCW & X86_FCW_IM) )
4788 {
4789 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4790 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4791 }
4792 }
4793 else if (pCtx->fpu.FCW & X86_FCW_IM)
4794 {
4795 /* Masked underflow. */
4796 pCtx->fpu.FSW &= ~X86_FSW_C1;
4797 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4798 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4799 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
4800 }
4801 else
4802 {
4803 /* Raise underflow - don't touch EFLAGS or TOP. */
4804 pCtx->fpu.FSW &= ~X86_FSW_C1;
4805 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4806 fPop = false;
4807 }
4808
4809 /*
4810 * Pop if necessary.
4811 */
4812 if (fPop)
4813 {
4814 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
4815 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
4816 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
4817 }
4818
4819 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4820 iemHlpUsedFpu(pIemCpu);
4821 iemRegAddToRip(pIemCpu, cbInstr);
4822 return VINF_SUCCESS;
4823}
4824
4825/** @} */
4826
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette