VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 42670

Last change on this file since 42670 was 42670, checked in by vboxsync, 13 years ago

Fixed fnstcw. Implemented fnsave and frstor.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 153.8 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 42670 2012-08-07 23:43:32Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
42 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n"));
43 }
44 return VINF_SUCCESS;
45}
46
47
48#if 0
49/**
50 * Calculates the parity bit.
51 *
52 * @returns true if the bit is set, false if not.
53 * @param u8Result The least significant byte of the result.
54 */
55static bool iemHlpCalcParityFlag(uint8_t u8Result)
56{
57 /*
58 * Parity is set if the number of bits in the least significant byte of
59 * the result is even.
60 */
61 uint8_t cBits;
62 cBits = u8Result & 1; /* 0 */
63 u8Result >>= 1;
64 cBits += u8Result & 1;
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1; /* 4 */
71 u8Result >>= 1;
72 cBits += u8Result & 1;
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 return !(cBits & 1);
78}
79#endif /* not used */
80
81
82/**
83 * Updates the specified flags according to a 8-bit result.
84 *
85 * @param pIemCpu The.
86 * @param u8Result The result to set the flags according to.
87 * @param fToUpdate The flags to update.
88 * @param fUndefined The flags that are specified as undefined.
89 */
90static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
91{
92 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
93
94 uint32_t fEFlags = pCtx->eflags.u;
95 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
96 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
97 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
98}
99
100
101/**
102 * Loads a NULL data selector into a selector register, both the hidden and
103 * visible parts, in protected mode.
104 *
105 * @param pSReg Pointer to the segment register.
106 * @param uRpl The RPL.
107 */
108static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg, RTSEL uRpl)
109{
110 /** @todo Testcase: write a testcase checking what happends when loading a NULL
111 * data selector in protected mode. */
112 pSReg->Sel = uRpl;
113 pSReg->ValidSel = uRpl;
114 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
115 pSReg->u64Base = 0;
116 pSReg->u32Limit = 0;
117 pSReg->Attr.u = 0;
118}
119
120
121/**
122 * Helper used by iret.
123 *
124 * @param uCpl The new CPL.
125 * @param pSReg Pointer to the segment register.
126 */
127static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
128{
129#ifdef VBOX_WITH_RAW_MODE_NOT_R0
130 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
131 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
132#else
133 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
134#endif
135
136 if ( uCpl > pSReg->Attr.n.u2Dpl
137 && pSReg->Attr.n.u1DescType /* code or data, not system */
138 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
139 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
140 iemHlpLoadNullDataSelectorProt(pSReg, 0);
141}
142
143/** @} */
144
145/** @name C Implementations
146 * @{
147 */
148
149/**
150 * Implements a 16-bit popa.
151 */
152IEM_CIMPL_DEF_0(iemCImpl_popa_16)
153{
154 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
155 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
156 RTGCPTR GCPtrLast = GCPtrStart + 15;
157 VBOXSTRICTRC rcStrict;
158
159 /*
160 * The docs are a bit hard to comprehend here, but it looks like we wrap
161 * around in real mode as long as none of the individual "popa" crosses the
162 * end of the stack segment. In protected mode we check the whole access
163 * in one go. For efficiency, only do the word-by-word thing if we're in
164 * danger of wrapping around.
165 */
166 /** @todo do popa boundary / wrap-around checks. */
167 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
168 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
169 {
170 /* word-by-word */
171 RTUINT64U TmpRsp;
172 TmpRsp.u = pCtx->rsp;
173 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
174 if (rcStrict == VINF_SUCCESS)
175 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
176 if (rcStrict == VINF_SUCCESS)
177 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
178 if (rcStrict == VINF_SUCCESS)
179 {
180 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
181 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
182 }
183 if (rcStrict == VINF_SUCCESS)
184 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
185 if (rcStrict == VINF_SUCCESS)
186 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
187 if (rcStrict == VINF_SUCCESS)
188 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
189 if (rcStrict == VINF_SUCCESS)
190 {
191 pCtx->rsp = TmpRsp.u;
192 iemRegAddToRip(pIemCpu, cbInstr);
193 }
194 }
195 else
196 {
197 uint16_t const *pa16Mem = NULL;
198 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
199 if (rcStrict == VINF_SUCCESS)
200 {
201 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
202 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
203 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
204 /* skip sp */
205 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
206 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
207 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
208 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
209 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
210 if (rcStrict == VINF_SUCCESS)
211 {
212 iemRegAddToRsp(pCtx, 16);
213 iemRegAddToRip(pIemCpu, cbInstr);
214 }
215 }
216 }
217 return rcStrict;
218}
219
220
221/**
222 * Implements a 32-bit popa.
223 */
224IEM_CIMPL_DEF_0(iemCImpl_popa_32)
225{
226 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
227 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
228 RTGCPTR GCPtrLast = GCPtrStart + 31;
229 VBOXSTRICTRC rcStrict;
230
231 /*
232 * The docs are a bit hard to comprehend here, but it looks like we wrap
233 * around in real mode as long as none of the individual "popa" crosses the
234 * end of the stack segment. In protected mode we check the whole access
235 * in one go. For efficiency, only do the word-by-word thing if we're in
236 * danger of wrapping around.
237 */
238 /** @todo do popa boundary / wrap-around checks. */
239 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
240 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
241 {
242 /* word-by-word */
243 RTUINT64U TmpRsp;
244 TmpRsp.u = pCtx->rsp;
245 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
246 if (rcStrict == VINF_SUCCESS)
247 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
248 if (rcStrict == VINF_SUCCESS)
249 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
250 if (rcStrict == VINF_SUCCESS)
251 {
252 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
253 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
254 }
255 if (rcStrict == VINF_SUCCESS)
256 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
257 if (rcStrict == VINF_SUCCESS)
258 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
261 if (rcStrict == VINF_SUCCESS)
262 {
263#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
264 pCtx->rdi &= UINT32_MAX;
265 pCtx->rsi &= UINT32_MAX;
266 pCtx->rbp &= UINT32_MAX;
267 pCtx->rbx &= UINT32_MAX;
268 pCtx->rdx &= UINT32_MAX;
269 pCtx->rcx &= UINT32_MAX;
270 pCtx->rax &= UINT32_MAX;
271#endif
272 pCtx->rsp = TmpRsp.u;
273 iemRegAddToRip(pIemCpu, cbInstr);
274 }
275 }
276 else
277 {
278 uint32_t const *pa32Mem;
279 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
280 if (rcStrict == VINF_SUCCESS)
281 {
282 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
283 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
284 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
285 /* skip esp */
286 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
287 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
288 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
289 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
290 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
291 if (rcStrict == VINF_SUCCESS)
292 {
293 iemRegAddToRsp(pCtx, 32);
294 iemRegAddToRip(pIemCpu, cbInstr);
295 }
296 }
297 }
298 return rcStrict;
299}
300
301
302/**
303 * Implements a 16-bit pusha.
304 */
305IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
306{
307 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
308 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
309 RTGCPTR GCPtrBottom = GCPtrTop - 15;
310 VBOXSTRICTRC rcStrict;
311
312 /*
313 * The docs are a bit hard to comprehend here, but it looks like we wrap
314 * around in real mode as long as none of the individual "pushd" crosses the
315 * end of the stack segment. In protected mode we check the whole access
316 * in one go. For efficiency, only do the word-by-word thing if we're in
317 * danger of wrapping around.
318 */
319 /** @todo do pusha boundary / wrap-around checks. */
320 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
321 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
322 {
323 /* word-by-word */
324 RTUINT64U TmpRsp;
325 TmpRsp.u = pCtx->rsp;
326 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
329 if (rcStrict == VINF_SUCCESS)
330 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
331 if (rcStrict == VINF_SUCCESS)
332 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
333 if (rcStrict == VINF_SUCCESS)
334 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
335 if (rcStrict == VINF_SUCCESS)
336 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
337 if (rcStrict == VINF_SUCCESS)
338 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
339 if (rcStrict == VINF_SUCCESS)
340 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
341 if (rcStrict == VINF_SUCCESS)
342 {
343 pCtx->rsp = TmpRsp.u;
344 iemRegAddToRip(pIemCpu, cbInstr);
345 }
346 }
347 else
348 {
349 GCPtrBottom--;
350 uint16_t *pa16Mem = NULL;
351 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
352 if (rcStrict == VINF_SUCCESS)
353 {
354 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
355 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
356 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
357 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
358 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
359 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
360 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
361 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
362 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
363 if (rcStrict == VINF_SUCCESS)
364 {
365 iemRegSubFromRsp(pCtx, 16);
366 iemRegAddToRip(pIemCpu, cbInstr);
367 }
368 }
369 }
370 return rcStrict;
371}
372
373
374/**
375 * Implements a 32-bit pusha.
376 */
377IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
378{
379 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
380 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
381 RTGCPTR GCPtrBottom = GCPtrTop - 31;
382 VBOXSTRICTRC rcStrict;
383
384 /*
385 * The docs are a bit hard to comprehend here, but it looks like we wrap
386 * around in real mode as long as none of the individual "pusha" crosses the
387 * end of the stack segment. In protected mode we check the whole access
388 * in one go. For efficiency, only do the word-by-word thing if we're in
389 * danger of wrapping around.
390 */
391 /** @todo do pusha boundary / wrap-around checks. */
392 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
393 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
394 {
395 /* word-by-word */
396 RTUINT64U TmpRsp;
397 TmpRsp.u = pCtx->rsp;
398 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
399 if (rcStrict == VINF_SUCCESS)
400 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
401 if (rcStrict == VINF_SUCCESS)
402 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
403 if (rcStrict == VINF_SUCCESS)
404 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
405 if (rcStrict == VINF_SUCCESS)
406 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
407 if (rcStrict == VINF_SUCCESS)
408 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
409 if (rcStrict == VINF_SUCCESS)
410 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
411 if (rcStrict == VINF_SUCCESS)
412 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
413 if (rcStrict == VINF_SUCCESS)
414 {
415 pCtx->rsp = TmpRsp.u;
416 iemRegAddToRip(pIemCpu, cbInstr);
417 }
418 }
419 else
420 {
421 GCPtrBottom--;
422 uint32_t *pa32Mem;
423 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
424 if (rcStrict == VINF_SUCCESS)
425 {
426 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
427 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
428 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
429 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
430 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
431 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
432 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
433 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
434 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
435 if (rcStrict == VINF_SUCCESS)
436 {
437 iemRegSubFromRsp(pCtx, 32);
438 iemRegAddToRip(pIemCpu, cbInstr);
439 }
440 }
441 }
442 return rcStrict;
443}
444
445
446/**
447 * Implements pushf.
448 *
449 *
450 * @param enmEffOpSize The effective operand size.
451 */
452IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
453{
454 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
455
456 /*
457 * If we're in V8086 mode some care is required (which is why we're in
458 * doing this in a C implementation).
459 */
460 uint32_t fEfl = pCtx->eflags.u;
461 if ( (fEfl & X86_EFL_VM)
462 && X86_EFL_GET_IOPL(fEfl) != 3 )
463 {
464 Assert(pCtx->cr0 & X86_CR0_PE);
465 if ( enmEffOpSize != IEMMODE_16BIT
466 || !(pCtx->cr4 & X86_CR4_VME))
467 return iemRaiseGeneralProtectionFault0(pIemCpu);
468 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
469 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
470 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
471 }
472
473 /*
474 * Ok, clear RF and VM and push the flags.
475 */
476 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
477
478 VBOXSTRICTRC rcStrict;
479 switch (enmEffOpSize)
480 {
481 case IEMMODE_16BIT:
482 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
483 break;
484 case IEMMODE_32BIT:
485 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
486 break;
487 case IEMMODE_64BIT:
488 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
489 break;
490 IEM_NOT_REACHED_DEFAULT_CASE_RET();
491 }
492 if (rcStrict != VINF_SUCCESS)
493 return rcStrict;
494
495 iemRegAddToRip(pIemCpu, cbInstr);
496 return VINF_SUCCESS;
497}
498
499
500/**
501 * Implements popf.
502 *
503 * @param enmEffOpSize The effective operand size.
504 */
505IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
506{
507 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
508 uint32_t const fEflOld = pCtx->eflags.u;
509 VBOXSTRICTRC rcStrict;
510 uint32_t fEflNew;
511
512 /*
513 * V8086 is special as usual.
514 */
515 if (fEflOld & X86_EFL_VM)
516 {
517 /*
518 * Almost anything goes if IOPL is 3.
519 */
520 if (X86_EFL_GET_IOPL(fEflOld) == 3)
521 {
522 switch (enmEffOpSize)
523 {
524 case IEMMODE_16BIT:
525 {
526 uint16_t u16Value;
527 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
528 if (rcStrict != VINF_SUCCESS)
529 return rcStrict;
530 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
531 break;
532 }
533 case IEMMODE_32BIT:
534 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
535 if (rcStrict != VINF_SUCCESS)
536 return rcStrict;
537 break;
538 IEM_NOT_REACHED_DEFAULT_CASE_RET();
539 }
540
541 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
542 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
543 }
544 /*
545 * Interrupt flag virtualization with CR4.VME=1.
546 */
547 else if ( enmEffOpSize == IEMMODE_16BIT
548 && (pCtx->cr4 & X86_CR4_VME) )
549 {
550 uint16_t u16Value;
551 RTUINT64U TmpRsp;
552 TmpRsp.u = pCtx->rsp;
553 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
554 if (rcStrict != VINF_SUCCESS)
555 return rcStrict;
556
557 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
558 * or before? */
559 if ( ( (u16Value & X86_EFL_IF)
560 && (fEflOld & X86_EFL_VIP))
561 || (u16Value & X86_EFL_TF) )
562 return iemRaiseGeneralProtectionFault0(pIemCpu);
563
564 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
565 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
566 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
567 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
568
569 pCtx->rsp = TmpRsp.u;
570 }
571 else
572 return iemRaiseGeneralProtectionFault0(pIemCpu);
573
574 }
575 /*
576 * Not in V8086 mode.
577 */
578 else
579 {
580 /* Pop the flags. */
581 switch (enmEffOpSize)
582 {
583 case IEMMODE_16BIT:
584 {
585 uint16_t u16Value;
586 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
587 if (rcStrict != VINF_SUCCESS)
588 return rcStrict;
589 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
590 break;
591 }
592 case IEMMODE_32BIT:
593 case IEMMODE_64BIT:
594 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
595 if (rcStrict != VINF_SUCCESS)
596 return rcStrict;
597 break;
598 IEM_NOT_REACHED_DEFAULT_CASE_RET();
599 }
600
601 /* Merge them with the current flags. */
602 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
603 || pIemCpu->uCpl == 0)
604 {
605 fEflNew &= X86_EFL_POPF_BITS;
606 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
607 }
608 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
609 {
610 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
611 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
612 }
613 else
614 {
615 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
616 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
617 }
618 }
619
620 /*
621 * Commit the flags.
622 */
623 Assert(fEflNew & RT_BIT_32(1));
624 pCtx->eflags.u = fEflNew;
625 iemRegAddToRip(pIemCpu, cbInstr);
626
627 return VINF_SUCCESS;
628}
629
630
631/**
632 * Implements an indirect call.
633 *
634 * @param uNewPC The new program counter (RIP) value (loaded from the
635 * operand).
636 * @param enmEffOpSize The effective operand size.
637 */
638IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
639{
640 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
641 uint16_t uOldPC = pCtx->ip + cbInstr;
642 if (uNewPC > pCtx->cs.u32Limit)
643 return iemRaiseGeneralProtectionFault0(pIemCpu);
644
645 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
646 if (rcStrict != VINF_SUCCESS)
647 return rcStrict;
648
649 pCtx->rip = uNewPC;
650 return VINF_SUCCESS;
651
652}
653
654
655/**
656 * Implements a 16-bit relative call.
657 *
658 * @param offDisp The displacment offset.
659 */
660IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
661{
662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
663 uint16_t uOldPC = pCtx->ip + cbInstr;
664 uint16_t uNewPC = uOldPC + offDisp;
665 if (uNewPC > pCtx->cs.u32Limit)
666 return iemRaiseGeneralProtectionFault0(pIemCpu);
667
668 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
669 if (rcStrict != VINF_SUCCESS)
670 return rcStrict;
671
672 pCtx->rip = uNewPC;
673 return VINF_SUCCESS;
674}
675
676
677/**
678 * Implements a 32-bit indirect call.
679 *
680 * @param uNewPC The new program counter (RIP) value (loaded from the
681 * operand).
682 * @param enmEffOpSize The effective operand size.
683 */
684IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
685{
686 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
687 uint32_t uOldPC = pCtx->eip + cbInstr;
688 if (uNewPC > pCtx->cs.u32Limit)
689 return iemRaiseGeneralProtectionFault0(pIemCpu);
690
691 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
692 if (rcStrict != VINF_SUCCESS)
693 return rcStrict;
694
695 pCtx->rip = uNewPC;
696 return VINF_SUCCESS;
697
698}
699
700
701/**
702 * Implements a 32-bit relative call.
703 *
704 * @param offDisp The displacment offset.
705 */
706IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
707{
708 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
709 uint32_t uOldPC = pCtx->eip + cbInstr;
710 uint32_t uNewPC = uOldPC + offDisp;
711 if (uNewPC > pCtx->cs.u32Limit)
712 return iemRaiseGeneralProtectionFault0(pIemCpu);
713
714 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
715 if (rcStrict != VINF_SUCCESS)
716 return rcStrict;
717
718 pCtx->rip = uNewPC;
719 return VINF_SUCCESS;
720}
721
722
723/**
724 * Implements a 64-bit indirect call.
725 *
726 * @param uNewPC The new program counter (RIP) value (loaded from the
727 * operand).
728 * @param enmEffOpSize The effective operand size.
729 */
730IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
731{
732 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
733 uint64_t uOldPC = pCtx->rip + cbInstr;
734 if (!IEM_IS_CANONICAL(uNewPC))
735 return iemRaiseGeneralProtectionFault0(pIemCpu);
736
737 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
738 if (rcStrict != VINF_SUCCESS)
739 return rcStrict;
740
741 pCtx->rip = uNewPC;
742 return VINF_SUCCESS;
743
744}
745
746
747/**
748 * Implements a 64-bit relative call.
749 *
750 * @param offDisp The displacment offset.
751 */
752IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
753{
754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
755 uint64_t uOldPC = pCtx->rip + cbInstr;
756 uint64_t uNewPC = uOldPC + offDisp;
757 if (!IEM_IS_CANONICAL(uNewPC))
758 return iemRaiseNotCanonical(pIemCpu);
759
760 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
761 if (rcStrict != VINF_SUCCESS)
762 return rcStrict;
763
764 pCtx->rip = uNewPC;
765 return VINF_SUCCESS;
766}
767
768
769/**
770 * Implements far jumps and calls thru task segments (TSS).
771 *
772 * @param uSel The selector.
773 * @param enmBranch The kind of branching we're performing.
774 * @param enmEffOpSize The effective operand size.
775 * @param pDesc The descriptor corrsponding to @a uSel. The type is
776 * call gate.
777 */
778IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
779{
780 /* Call various functions to do the work. */
781 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
782}
783
784
785/**
786 * Implements far jumps and calls thru task gates.
787 *
788 * @param uSel The selector.
789 * @param enmBranch The kind of branching we're performing.
790 * @param enmEffOpSize The effective operand size.
791 * @param pDesc The descriptor corrsponding to @a uSel. The type is
792 * call gate.
793 */
794IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
795{
796 /* Call various functions to do the work. */
797 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
798}
799
800
801/**
802 * Implements far jumps and calls thru call gates.
803 *
804 * @param uSel The selector.
805 * @param enmBranch The kind of branching we're performing.
806 * @param enmEffOpSize The effective operand size.
807 * @param pDesc The descriptor corrsponding to @a uSel. The type is
808 * call gate.
809 */
810IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
811{
812 /* Call various functions to do the work. */
813 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
814}
815
816
817/**
818 * Implements far jumps and calls thru system selectors.
819 *
820 * @param uSel The selector.
821 * @param enmBranch The kind of branching we're performing.
822 * @param enmEffOpSize The effective operand size.
823 * @param pDesc The descriptor corrsponding to @a uSel.
824 */
825IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
826{
827 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
828 Assert((uSel & X86_SEL_MASK_OFF_RPL));
829
830 if (IEM_IS_LONG_MODE(pIemCpu))
831 switch (pDesc->Legacy.Gen.u4Type)
832 {
833 case AMD64_SEL_TYPE_SYS_CALL_GATE:
834 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
835
836 default:
837 case AMD64_SEL_TYPE_SYS_LDT:
838 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
839 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
840 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
841 case AMD64_SEL_TYPE_SYS_INT_GATE:
842 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
843 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
844
845 }
846
847 switch (pDesc->Legacy.Gen.u4Type)
848 {
849 case X86_SEL_TYPE_SYS_286_CALL_GATE:
850 case X86_SEL_TYPE_SYS_386_CALL_GATE:
851 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
852
853 case X86_SEL_TYPE_SYS_TASK_GATE:
854 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
855
856 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
857 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
858 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
859
860 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
861 Log(("branch %04x -> busy 286 TSS\n", uSel));
862 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
863
864 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
865 Log(("branch %04x -> busy 386 TSS\n", uSel));
866 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
867
868 default:
869 case X86_SEL_TYPE_SYS_LDT:
870 case X86_SEL_TYPE_SYS_286_INT_GATE:
871 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
872 case X86_SEL_TYPE_SYS_386_INT_GATE:
873 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
874 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
875 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
876 }
877}
878
879
880/**
881 * Implements far jumps.
882 *
883 * @param uSel The selector.
884 * @param offSeg The segment offset.
885 * @param enmEffOpSize The effective operand size.
886 */
887IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
888{
889 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
890 NOREF(cbInstr);
891 Assert(offSeg <= UINT32_MAX);
892
893 /*
894 * Real mode and V8086 mode are easy. The only snag seems to be that
895 * CS.limit doesn't change and the limit check is done against the current
896 * limit.
897 */
898 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
899 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
900 {
901 if (offSeg > pCtx->cs.u32Limit)
902 return iemRaiseGeneralProtectionFault0(pIemCpu);
903
904 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
905 pCtx->rip = offSeg;
906 else
907 pCtx->rip = offSeg & UINT16_MAX;
908 pCtx->cs.Sel = uSel;
909 pCtx->cs.ValidSel = uSel;
910 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
911 pCtx->cs.u64Base = (uint32_t)uSel << 4;
912 return VINF_SUCCESS;
913 }
914
915 /*
916 * Protected mode. Need to parse the specified descriptor...
917 */
918 if (!(uSel & X86_SEL_MASK_OFF_RPL))
919 {
920 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
921 return iemRaiseGeneralProtectionFault0(pIemCpu);
922 }
923
924 /* Fetch the descriptor. */
925 IEMSELDESC Desc;
926 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
927 if (rcStrict != VINF_SUCCESS)
928 return rcStrict;
929
930 /* Is it there? */
931 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
932 {
933 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
934 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
935 }
936
937 /*
938 * Deal with it according to its type. We do the standard code selectors
939 * here and dispatch the system selectors to worker functions.
940 */
941 if (!Desc.Legacy.Gen.u1DescType)
942 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
943
944 /* Only code segments. */
945 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
946 {
947 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
948 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
949 }
950
951 /* L vs D. */
952 if ( Desc.Legacy.Gen.u1Long
953 && Desc.Legacy.Gen.u1DefBig
954 && IEM_IS_LONG_MODE(pIemCpu))
955 {
956 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
957 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
958 }
959
960 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
961 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
962 {
963 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
964 {
965 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
966 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
967 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
968 }
969 }
970 else
971 {
972 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
973 {
974 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
975 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
976 }
977 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
978 {
979 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
980 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
981 }
982 }
983
984 /* Chop the high bits if 16-bit (Intel says so). */
985 if (enmEffOpSize == IEMMODE_16BIT)
986 offSeg &= UINT16_MAX;
987
988 /* Limit check. (Should alternatively check for non-canonical addresses
989 here, but that is ruled out by offSeg being 32-bit, right?) */
990 uint64_t u64Base;
991 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
992 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
993 u64Base = 0;
994 else
995 {
996 if (offSeg > cbLimit)
997 {
998 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
999 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1000 }
1001 u64Base = X86DESC_BASE(&Desc.Legacy);
1002 }
1003
1004 /*
1005 * Ok, everything checked out fine. Now set the accessed bit before
1006 * committing the result into CS, CSHID and RIP.
1007 */
1008 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1009 {
1010 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1011 if (rcStrict != VINF_SUCCESS)
1012 return rcStrict;
1013#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1014 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1015#endif
1016 }
1017
1018 /* commit */
1019 pCtx->rip = offSeg;
1020 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1021 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1022 pCtx->cs.ValidSel = pCtx->cs.Sel;
1023 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1024 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1025 pCtx->cs.u32Limit = cbLimit;
1026 pCtx->cs.u64Base = u64Base;
1027 /** @todo check if the hidden bits are loaded correctly for 64-bit
1028 * mode. */
1029 return VINF_SUCCESS;
1030}
1031
1032
1033/**
1034 * Implements far calls.
1035 *
1036 * This very similar to iemCImpl_FarJmp.
1037 *
1038 * @param uSel The selector.
1039 * @param offSeg The segment offset.
1040 * @param enmEffOpSize The operand size (in case we need it).
1041 */
1042IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1043{
1044 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1045 VBOXSTRICTRC rcStrict;
1046 uint64_t uNewRsp;
1047 RTPTRUNION uPtrRet;
1048
1049 /*
1050 * Real mode and V8086 mode are easy. The only snag seems to be that
1051 * CS.limit doesn't change and the limit check is done against the current
1052 * limit.
1053 */
1054 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1055 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1056 {
1057 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1058
1059 /* Check stack first - may #SS(0). */
1060 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1061 &uPtrRet.pv, &uNewRsp);
1062 if (rcStrict != VINF_SUCCESS)
1063 return rcStrict;
1064
1065 /* Check the target address range. */
1066 if (offSeg > UINT32_MAX)
1067 return iemRaiseGeneralProtectionFault0(pIemCpu);
1068
1069 /* Everything is fine, push the return address. */
1070 if (enmEffOpSize == IEMMODE_16BIT)
1071 {
1072 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1073 uPtrRet.pu16[1] = pCtx->cs.Sel;
1074 }
1075 else
1076 {
1077 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1078 uPtrRet.pu16[3] = pCtx->cs.Sel;
1079 }
1080 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1081 if (rcStrict != VINF_SUCCESS)
1082 return rcStrict;
1083
1084 /* Branch. */
1085 pCtx->rip = offSeg;
1086 pCtx->cs.Sel = uSel;
1087 pCtx->cs.ValidSel = uSel;
1088 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1089 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1090 return VINF_SUCCESS;
1091 }
1092
1093 /*
1094 * Protected mode. Need to parse the specified descriptor...
1095 */
1096 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1097 {
1098 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1099 return iemRaiseGeneralProtectionFault0(pIemCpu);
1100 }
1101
1102 /* Fetch the descriptor. */
1103 IEMSELDESC Desc;
1104 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1105 if (rcStrict != VINF_SUCCESS)
1106 return rcStrict;
1107
1108 /*
1109 * Deal with it according to its type. We do the standard code selectors
1110 * here and dispatch the system selectors to worker functions.
1111 */
1112 if (!Desc.Legacy.Gen.u1DescType)
1113 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1114
1115 /* Only code segments. */
1116 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1117 {
1118 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1119 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1120 }
1121
1122 /* L vs D. */
1123 if ( Desc.Legacy.Gen.u1Long
1124 && Desc.Legacy.Gen.u1DefBig
1125 && IEM_IS_LONG_MODE(pIemCpu))
1126 {
1127 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1128 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1129 }
1130
1131 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1132 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1133 {
1134 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1135 {
1136 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1137 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1138 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1139 }
1140 }
1141 else
1142 {
1143 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1144 {
1145 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1146 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1147 }
1148 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1149 {
1150 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1151 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1152 }
1153 }
1154
1155 /* Is it there? */
1156 if (!Desc.Legacy.Gen.u1Present)
1157 {
1158 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1159 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1160 }
1161
1162 /* Check stack first - may #SS(0). */
1163 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1164 * 16-bit code cause a two or four byte CS to be pushed? */
1165 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1166 enmEffOpSize == IEMMODE_64BIT ? 8+8
1167 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1168 &uPtrRet.pv, &uNewRsp);
1169 if (rcStrict != VINF_SUCCESS)
1170 return rcStrict;
1171
1172 /* Chop the high bits if 16-bit (Intel says so). */
1173 if (enmEffOpSize == IEMMODE_16BIT)
1174 offSeg &= UINT16_MAX;
1175
1176 /* Limit / canonical check. */
1177 uint64_t u64Base;
1178 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1179 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1180 {
1181 if (!IEM_IS_CANONICAL(offSeg))
1182 {
1183 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1184 return iemRaiseNotCanonical(pIemCpu);
1185 }
1186 u64Base = 0;
1187 }
1188 else
1189 {
1190 if (offSeg > cbLimit)
1191 {
1192 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1193 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1194 }
1195 u64Base = X86DESC_BASE(&Desc.Legacy);
1196 }
1197
1198 /*
1199 * Now set the accessed bit before
1200 * writing the return address to the stack and committing the result into
1201 * CS, CSHID and RIP.
1202 */
1203 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1204 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1205 {
1206 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1207 if (rcStrict != VINF_SUCCESS)
1208 return rcStrict;
1209#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1210 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1211#endif
1212 }
1213
1214 /* stack */
1215 if (enmEffOpSize == IEMMODE_16BIT)
1216 {
1217 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1218 uPtrRet.pu16[1] = pCtx->cs.Sel;
1219 }
1220 else if (enmEffOpSize == IEMMODE_32BIT)
1221 {
1222 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1223 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1224 }
1225 else
1226 {
1227 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1228 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1229 }
1230 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1231 if (rcStrict != VINF_SUCCESS)
1232 return rcStrict;
1233
1234 /* commit */
1235 pCtx->rip = offSeg;
1236 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1237 pCtx->cs.Sel |= pIemCpu->uCpl;
1238 pCtx->cs.ValidSel = pCtx->cs.Sel;
1239 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1240 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1241 pCtx->cs.u32Limit = cbLimit;
1242 pCtx->cs.u64Base = u64Base;
1243 /** @todo check if the hidden bits are loaded correctly for 64-bit
1244 * mode. */
1245 return VINF_SUCCESS;
1246}
1247
1248
1249/**
1250 * Implements retf.
1251 *
1252 * @param enmEffOpSize The effective operand size.
1253 * @param cbPop The amount of arguments to pop from the stack
1254 * (bytes).
1255 */
1256IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1257{
1258 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1259 VBOXSTRICTRC rcStrict;
1260 RTCPTRUNION uPtrFrame;
1261 uint64_t uNewRsp;
1262 uint64_t uNewRip;
1263 uint16_t uNewCs;
1264 NOREF(cbInstr);
1265
1266 /*
1267 * Read the stack values first.
1268 */
1269 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1270 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1271 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1272 if (rcStrict != VINF_SUCCESS)
1273 return rcStrict;
1274 if (enmEffOpSize == IEMMODE_16BIT)
1275 {
1276 uNewRip = uPtrFrame.pu16[0];
1277 uNewCs = uPtrFrame.pu16[1];
1278 }
1279 else if (enmEffOpSize == IEMMODE_32BIT)
1280 {
1281 uNewRip = uPtrFrame.pu32[0];
1282 uNewCs = uPtrFrame.pu16[2];
1283 }
1284 else
1285 {
1286 uNewRip = uPtrFrame.pu64[0];
1287 uNewCs = uPtrFrame.pu16[4];
1288 }
1289
1290 /*
1291 * Real mode and V8086 mode are easy.
1292 */
1293 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1294 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1295 {
1296 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1297 /** @todo check how this is supposed to work if sp=0xfffe. */
1298
1299 /* Check the limit of the new EIP. */
1300 /** @todo Intel pseudo code only does the limit check for 16-bit
1301 * operands, AMD does not make any distinction. What is right? */
1302 if (uNewRip > pCtx->cs.u32Limit)
1303 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1304
1305 /* commit the operation. */
1306 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1307 if (rcStrict != VINF_SUCCESS)
1308 return rcStrict;
1309 pCtx->rip = uNewRip;
1310 pCtx->cs.Sel = uNewCs;
1311 pCtx->cs.ValidSel = uNewCs;
1312 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1313 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1314 /** @todo do we load attribs and limit as well? */
1315 if (cbPop)
1316 iemRegAddToRsp(pCtx, cbPop);
1317 return VINF_SUCCESS;
1318 }
1319
1320 /*
1321 * Protected mode is complicated, of course.
1322 */
1323 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
1324 {
1325 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1326 return iemRaiseGeneralProtectionFault0(pIemCpu);
1327 }
1328
1329 /* Fetch the descriptor. */
1330 IEMSELDESC DescCs;
1331 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1332 if (rcStrict != VINF_SUCCESS)
1333 return rcStrict;
1334
1335 /* Can only return to a code selector. */
1336 if ( !DescCs.Legacy.Gen.u1DescType
1337 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1338 {
1339 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1340 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1341 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1342 }
1343
1344 /* L vs D. */
1345 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1346 && DescCs.Legacy.Gen.u1DefBig
1347 && IEM_IS_LONG_MODE(pIemCpu))
1348 {
1349 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1350 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1351 }
1352
1353 /* DPL/RPL/CPL checks. */
1354 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1355 {
1356 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1357 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1358 }
1359
1360 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1361 {
1362 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1363 {
1364 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1365 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1366 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1367 }
1368 }
1369 else
1370 {
1371 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1372 {
1373 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1374 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1375 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1376 }
1377 }
1378
1379 /* Is it there? */
1380 if (!DescCs.Legacy.Gen.u1Present)
1381 {
1382 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1383 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1384 }
1385
1386 /*
1387 * Return to outer privilege? (We'll typically have entered via a call gate.)
1388 */
1389 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1390 {
1391 /* Read the return pointer, it comes before the parameters. */
1392 RTCPTRUNION uPtrStack;
1393 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1394 if (rcStrict != VINF_SUCCESS)
1395 return rcStrict;
1396 uint16_t uNewOuterSs;
1397 uint64_t uNewOuterRsp;
1398 if (enmEffOpSize == IEMMODE_16BIT)
1399 {
1400 uNewOuterRsp = uPtrFrame.pu16[0];
1401 uNewOuterSs = uPtrFrame.pu16[1];
1402 }
1403 else if (enmEffOpSize == IEMMODE_32BIT)
1404 {
1405 uNewOuterRsp = uPtrFrame.pu32[0];
1406 uNewOuterSs = uPtrFrame.pu16[2];
1407 }
1408 else
1409 {
1410 uNewOuterRsp = uPtrFrame.pu64[0];
1411 uNewOuterSs = uPtrFrame.pu16[4];
1412 }
1413
1414 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1415 and read the selector. */
1416 IEMSELDESC DescSs;
1417 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
1418 {
1419 if ( !DescCs.Legacy.Gen.u1Long
1420 || (uNewOuterSs & X86_SEL_RPL) == 3)
1421 {
1422 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1423 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1424 return iemRaiseGeneralProtectionFault0(pIemCpu);
1425 }
1426 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1427 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1428 }
1429 else
1430 {
1431 /* Fetch the descriptor for the new stack segment. */
1432 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1433 if (rcStrict != VINF_SUCCESS)
1434 return rcStrict;
1435 }
1436
1437 /* Check that RPL of stack and code selectors match. */
1438 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1439 {
1440 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1441 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1442 }
1443
1444 /* Must be a writable data segment. */
1445 if ( !DescSs.Legacy.Gen.u1DescType
1446 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1447 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1448 {
1449 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1450 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1451 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1452 }
1453
1454 /* L vs D. (Not mentioned by intel.) */
1455 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1456 && DescSs.Legacy.Gen.u1DefBig
1457 && IEM_IS_LONG_MODE(pIemCpu))
1458 {
1459 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1460 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1461 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1462 }
1463
1464 /* DPL/RPL/CPL checks. */
1465 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1466 {
1467 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1468 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1469 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1470 }
1471
1472 /* Is it there? */
1473 if (!DescSs.Legacy.Gen.u1Present)
1474 {
1475 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1476 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1477 }
1478
1479 /* Calc SS limit.*/
1480 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
1481
1482 /* Is RIP canonical or within CS.limit? */
1483 uint64_t u64Base;
1484 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1485
1486 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1487 {
1488 if (!IEM_IS_CANONICAL(uNewRip))
1489 {
1490 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1491 return iemRaiseNotCanonical(pIemCpu);
1492 }
1493 u64Base = 0;
1494 }
1495 else
1496 {
1497 if (uNewRip > cbLimitCs)
1498 {
1499 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1500 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1501 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1502 }
1503 u64Base = X86DESC_BASE(&DescCs.Legacy);
1504 }
1505
1506 /*
1507 * Now set the accessed bit before
1508 * writing the return address to the stack and committing the result into
1509 * CS, CSHID and RIP.
1510 */
1511 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1512 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1513 {
1514 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1515 if (rcStrict != VINF_SUCCESS)
1516 return rcStrict;
1517#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1518 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1519#endif
1520 }
1521 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1522 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1523 {
1524 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1525 if (rcStrict != VINF_SUCCESS)
1526 return rcStrict;
1527#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1528 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1529#endif
1530 }
1531
1532 /* commit */
1533 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1534 if (rcStrict != VINF_SUCCESS)
1535 return rcStrict;
1536 if (enmEffOpSize == IEMMODE_16BIT)
1537 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1538 else
1539 pCtx->rip = uNewRip;
1540 pCtx->cs.Sel = uNewCs;
1541 pCtx->cs.ValidSel = uNewCs;
1542 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1543 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1544 pCtx->cs.u32Limit = cbLimitCs;
1545 pCtx->cs.u64Base = u64Base;
1546 pCtx->rsp = uNewRsp;
1547 pCtx->ss.Sel = uNewOuterSs;
1548 pCtx->ss.ValidSel = uNewOuterSs;
1549 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1550 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
1551 pCtx->ss.u32Limit = cbLimitSs;
1552 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1553 pCtx->ss.u64Base = 0;
1554 else
1555 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
1556
1557 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1558 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
1559 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
1560 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
1561 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
1562
1563 /** @todo check if the hidden bits are loaded correctly for 64-bit
1564 * mode. */
1565
1566 if (cbPop)
1567 iemRegAddToRsp(pCtx, cbPop);
1568
1569 /* Done! */
1570 }
1571 /*
1572 * Return to the same privilege level
1573 */
1574 else
1575 {
1576 /* Limit / canonical check. */
1577 uint64_t u64Base;
1578 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1579
1580 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1581 {
1582 if (!IEM_IS_CANONICAL(uNewRip))
1583 {
1584 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1585 return iemRaiseNotCanonical(pIemCpu);
1586 }
1587 u64Base = 0;
1588 }
1589 else
1590 {
1591 if (uNewRip > cbLimitCs)
1592 {
1593 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1594 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1595 }
1596 u64Base = X86DESC_BASE(&DescCs.Legacy);
1597 }
1598
1599 /*
1600 * Now set the accessed bit before
1601 * writing the return address to the stack and committing the result into
1602 * CS, CSHID and RIP.
1603 */
1604 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1605 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1606 {
1607 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1608 if (rcStrict != VINF_SUCCESS)
1609 return rcStrict;
1610#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1611 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1612#endif
1613 }
1614
1615 /* commit */
1616 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1617 if (rcStrict != VINF_SUCCESS)
1618 return rcStrict;
1619 if (enmEffOpSize == IEMMODE_16BIT)
1620 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1621 else
1622 pCtx->rip = uNewRip;
1623 pCtx->cs.Sel = uNewCs;
1624 pCtx->cs.ValidSel = uNewCs;
1625 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1626 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1627 pCtx->cs.u32Limit = cbLimitCs;
1628 pCtx->cs.u64Base = u64Base;
1629 /** @todo check if the hidden bits are loaded correctly for 64-bit
1630 * mode. */
1631 if (cbPop)
1632 iemRegAddToRsp(pCtx, cbPop);
1633 }
1634 return VINF_SUCCESS;
1635}
1636
1637
1638/**
1639 * Implements retn.
1640 *
1641 * We're doing this in C because of the \#GP that might be raised if the popped
1642 * program counter is out of bounds.
1643 *
1644 * @param enmEffOpSize The effective operand size.
1645 * @param cbPop The amount of arguments to pop from the stack
1646 * (bytes).
1647 */
1648IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1649{
1650 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1651 NOREF(cbInstr);
1652
1653 /* Fetch the RSP from the stack. */
1654 VBOXSTRICTRC rcStrict;
1655 RTUINT64U NewRip;
1656 RTUINT64U NewRsp;
1657 NewRsp.u = pCtx->rsp;
1658 switch (enmEffOpSize)
1659 {
1660 case IEMMODE_16BIT:
1661 NewRip.u = 0;
1662 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1663 break;
1664 case IEMMODE_32BIT:
1665 NewRip.u = 0;
1666 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1667 break;
1668 case IEMMODE_64BIT:
1669 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1670 break;
1671 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1672 }
1673 if (rcStrict != VINF_SUCCESS)
1674 return rcStrict;
1675
1676 /* Check the new RSP before loading it. */
1677 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1678 * of it. The canonical test is performed here and for call. */
1679 if (enmEffOpSize != IEMMODE_64BIT)
1680 {
1681 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
1682 {
1683 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
1684 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1685 }
1686 }
1687 else
1688 {
1689 if (!IEM_IS_CANONICAL(NewRip.u))
1690 {
1691 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1692 return iemRaiseNotCanonical(pIemCpu);
1693 }
1694 }
1695
1696 /* Commit it. */
1697 pCtx->rip = NewRip.u;
1698 pCtx->rsp = NewRsp.u;
1699 if (cbPop)
1700 iemRegAddToRsp(pCtx, cbPop);
1701
1702 return VINF_SUCCESS;
1703}
1704
1705
1706/**
1707 * Implements enter.
1708 *
1709 * We're doing this in C because the instruction is insane, even for the
1710 * u8NestingLevel=0 case dealing with the stack is tedious.
1711 *
1712 * @param enmEffOpSize The effective operand size.
1713 */
1714IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
1715{
1716 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1717
1718 /* Push RBP, saving the old value in TmpRbp. */
1719 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
1720 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
1721 RTUINT64U NewRbp;
1722 VBOXSTRICTRC rcStrict;
1723 if (enmEffOpSize == IEMMODE_64BIT)
1724 {
1725 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
1726 NewRbp = NewRsp;
1727 }
1728 else if (pCtx->ss.Attr.n.u1DefBig)
1729 {
1730 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
1731 NewRbp = NewRsp;
1732 }
1733 else
1734 {
1735 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
1736 NewRbp = TmpRbp;
1737 NewRbp.Words.w0 = NewRsp.Words.w0;
1738 }
1739 if (rcStrict != VINF_SUCCESS)
1740 return rcStrict;
1741
1742 /* Copy the parameters (aka nesting levels by Intel). */
1743 cParameters &= 0x1f;
1744 if (cParameters > 0)
1745 {
1746 switch (enmEffOpSize)
1747 {
1748 case IEMMODE_16BIT:
1749 if (pCtx->ss.Attr.n.u1DefBig)
1750 TmpRbp.DWords.dw0 -= 2;
1751 else
1752 TmpRbp.Words.w0 -= 2;
1753 do
1754 {
1755 uint16_t u16Tmp;
1756 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
1757 if (rcStrict != VINF_SUCCESS)
1758 break;
1759 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
1760 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1761 break;
1762
1763 case IEMMODE_32BIT:
1764 if (pCtx->ss.Attr.n.u1DefBig)
1765 TmpRbp.DWords.dw0 -= 4;
1766 else
1767 TmpRbp.Words.w0 -= 4;
1768 do
1769 {
1770 uint32_t u32Tmp;
1771 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
1772 if (rcStrict != VINF_SUCCESS)
1773 break;
1774 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
1775 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1776 break;
1777
1778 case IEMMODE_64BIT:
1779 TmpRbp.u -= 8;
1780 do
1781 {
1782 uint64_t u64Tmp;
1783 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
1784 if (rcStrict != VINF_SUCCESS)
1785 break;
1786 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
1787 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1788 break;
1789
1790 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1791 }
1792 if (rcStrict != VINF_SUCCESS)
1793 return VINF_SUCCESS;
1794
1795 /* Push the new RBP */
1796 if (enmEffOpSize == IEMMODE_64BIT)
1797 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
1798 else if (pCtx->ss.Attr.n.u1DefBig)
1799 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
1800 else
1801 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
1802 if (rcStrict != VINF_SUCCESS)
1803 return rcStrict;
1804
1805 }
1806
1807 /* Recalc RSP. */
1808 iemRegSubFromRspEx(&NewRsp, cbFrame, pCtx);
1809
1810 /** @todo Should probe write access at the new RSP according to AMD. */
1811
1812 /* Commit it. */
1813 pCtx->rbp = NewRbp.u;
1814 pCtx->rsp = NewRsp.u;
1815 iemRegAddToRip(pIemCpu, cbInstr);
1816
1817 return VINF_SUCCESS;
1818}
1819
1820
1821
1822/**
1823 * Implements leave.
1824 *
1825 * We're doing this in C because messing with the stack registers is annoying
1826 * since they depends on SS attributes.
1827 *
1828 * @param enmEffOpSize The effective operand size.
1829 */
1830IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1831{
1832 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1833
1834 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1835 RTUINT64U NewRsp;
1836 if (pCtx->ss.Attr.n.u1Long)
1837 NewRsp.u = pCtx->rbp;
1838 else if (pCtx->ss.Attr.n.u1DefBig)
1839 NewRsp.u = pCtx->ebp;
1840 else
1841 {
1842 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1843 NewRsp.u = pCtx->rsp;
1844 NewRsp.Words.w0 = pCtx->bp;
1845 }
1846
1847 /* Pop RBP according to the operand size. */
1848 VBOXSTRICTRC rcStrict;
1849 RTUINT64U NewRbp;
1850 switch (enmEffOpSize)
1851 {
1852 case IEMMODE_16BIT:
1853 NewRbp.u = pCtx->rbp;
1854 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1855 break;
1856 case IEMMODE_32BIT:
1857 NewRbp.u = 0;
1858 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1859 break;
1860 case IEMMODE_64BIT:
1861 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1862 break;
1863 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1864 }
1865 if (rcStrict != VINF_SUCCESS)
1866 return rcStrict;
1867
1868
1869 /* Commit it. */
1870 pCtx->rbp = NewRbp.u;
1871 pCtx->rsp = NewRsp.u;
1872 iemRegAddToRip(pIemCpu, cbInstr);
1873
1874 return VINF_SUCCESS;
1875}
1876
1877
1878/**
1879 * Implements int3 and int XX.
1880 *
1881 * @param u8Int The interrupt vector number.
1882 * @param fIsBpInstr Is it the breakpoint instruction.
1883 */
1884IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1885{
1886 Assert(pIemCpu->cXcptRecursions == 0);
1887 return iemRaiseXcptOrInt(pIemCpu,
1888 cbInstr,
1889 u8Int,
1890 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1891 0,
1892 0);
1893}
1894
1895
1896/**
1897 * Implements iret for real mode and V8086 mode.
1898 *
1899 * @param enmEffOpSize The effective operand size.
1900 */
1901IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1902{
1903 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1904 NOREF(cbInstr);
1905
1906 /*
1907 * iret throws an exception if VME isn't enabled.
1908 */
1909 if ( pCtx->eflags.Bits.u1VM
1910 && !(pCtx->cr4 & X86_CR4_VME))
1911 return iemRaiseGeneralProtectionFault0(pIemCpu);
1912
1913 /*
1914 * Do the stack bits, but don't commit RSP before everything checks
1915 * out right.
1916 */
1917 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1918 VBOXSTRICTRC rcStrict;
1919 RTCPTRUNION uFrame;
1920 uint16_t uNewCs;
1921 uint32_t uNewEip;
1922 uint32_t uNewFlags;
1923 uint64_t uNewRsp;
1924 if (enmEffOpSize == IEMMODE_32BIT)
1925 {
1926 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1927 if (rcStrict != VINF_SUCCESS)
1928 return rcStrict;
1929 uNewEip = uFrame.pu32[0];
1930 uNewCs = (uint16_t)uFrame.pu32[1];
1931 uNewFlags = uFrame.pu32[2];
1932 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1933 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1934 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1935 | X86_EFL_ID;
1936 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1937 }
1938 else
1939 {
1940 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1941 if (rcStrict != VINF_SUCCESS)
1942 return rcStrict;
1943 uNewEip = uFrame.pu16[0];
1944 uNewCs = uFrame.pu16[1];
1945 uNewFlags = uFrame.pu16[2];
1946 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1947 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1948 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1949 /** @todo The intel pseudo code does not indicate what happens to
1950 * reserved flags. We just ignore them. */
1951 }
1952 /** @todo Check how this is supposed to work if sp=0xfffe. */
1953
1954 /*
1955 * Check the limit of the new EIP.
1956 */
1957 /** @todo Only the AMD pseudo code check the limit here, what's
1958 * right? */
1959 if (uNewEip > pCtx->cs.u32Limit)
1960 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1961
1962 /*
1963 * V8086 checks and flag adjustments
1964 */
1965 if (pCtx->eflags.Bits.u1VM)
1966 {
1967 if (pCtx->eflags.Bits.u2IOPL == 3)
1968 {
1969 /* Preserve IOPL and clear RF. */
1970 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1971 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1972 }
1973 else if ( enmEffOpSize == IEMMODE_16BIT
1974 && ( !(uNewFlags & X86_EFL_IF)
1975 || !pCtx->eflags.Bits.u1VIP )
1976 && !(uNewFlags & X86_EFL_TF) )
1977 {
1978 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1979 uNewFlags &= ~X86_EFL_VIF;
1980 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1981 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1982 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1983 }
1984 else
1985 return iemRaiseGeneralProtectionFault0(pIemCpu);
1986 }
1987
1988 /*
1989 * Commit the operation.
1990 */
1991 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1992 if (rcStrict != VINF_SUCCESS)
1993 return rcStrict;
1994 pCtx->rip = uNewEip;
1995 pCtx->cs.Sel = uNewCs;
1996 pCtx->cs.ValidSel = uNewCs;
1997 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1998 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1999 /** @todo do we load attribs and limit as well? */
2000 Assert(uNewFlags & X86_EFL_1);
2001 pCtx->eflags.u = uNewFlags;
2002
2003 return VINF_SUCCESS;
2004}
2005
2006
2007/**
2008 * Implements iret for protected mode returning to V8086 mode.
2009 *
2010 * @param enmEffOpSize The effective operand size.
2011 * @param uNewEip The new EIP.
2012 * @param uNewCs The new CS.
2013 * @param uNewFlags The new EFLAGS.
2014 * @param uNewRsp The RSP after the initial IRET frame.
2015 */
2016IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, IEMMODE, enmEffOpSize, uint32_t, uNewEip, uint16_t, uNewCs,
2017 uint32_t, uNewFlags, uint64_t, uNewRsp)
2018{
2019 /** @todo NT4SP1 0008:8013bd5d cf iret */
2020 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2021}
2022
2023
2024/**
2025 * Implements iret for protected mode returning via a nested task.
2026 *
2027 * @param enmEffOpSize The effective operand size.
2028 */
2029IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2030{
2031 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2032}
2033
2034
2035/**
2036 * Implements iret for protected mode
2037 *
2038 * @param enmEffOpSize The effective operand size.
2039 */
2040IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
2041{
2042 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2043 NOREF(cbInstr);
2044
2045 /*
2046 * Nested task return.
2047 */
2048 if (pCtx->eflags.Bits.u1NT)
2049 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
2050
2051 /*
2052 * Normal return.
2053 *
2054 * Do the stack bits, but don't commit RSP before everything checks
2055 * out right.
2056 */
2057 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2058 VBOXSTRICTRC rcStrict;
2059 RTCPTRUNION uFrame;
2060 uint16_t uNewCs;
2061 uint32_t uNewEip;
2062 uint32_t uNewFlags;
2063 uint64_t uNewRsp;
2064 if (enmEffOpSize == IEMMODE_32BIT)
2065 {
2066 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2067 if (rcStrict != VINF_SUCCESS)
2068 return rcStrict;
2069 uNewEip = uFrame.pu32[0];
2070 uNewCs = (uint16_t)uFrame.pu32[1];
2071 uNewFlags = uFrame.pu32[2];
2072 }
2073 else
2074 {
2075 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2076 if (rcStrict != VINF_SUCCESS)
2077 return rcStrict;
2078 uNewEip = uFrame.pu16[0];
2079 uNewCs = uFrame.pu16[1];
2080 uNewFlags = uFrame.pu16[2];
2081 }
2082 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2083 if (rcStrict != VINF_SUCCESS)
2084 return rcStrict;
2085
2086 /*
2087 * We're hopefully not returning to V8086 mode...
2088 */
2089 if ( (uNewFlags & X86_EFL_VM)
2090 && pIemCpu->uCpl == 0)
2091 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, enmEffOpSize, uNewEip, uNewCs, uNewFlags, uNewRsp);
2092
2093 /*
2094 * Protected mode.
2095 */
2096 /* Read the CS descriptor. */
2097 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2098 {
2099 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
2100 return iemRaiseGeneralProtectionFault0(pIemCpu);
2101 }
2102
2103 IEMSELDESC DescCS;
2104 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
2105 if (rcStrict != VINF_SUCCESS)
2106 {
2107 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
2108 return rcStrict;
2109 }
2110
2111 /* Must be a code descriptor. */
2112 if (!DescCS.Legacy.Gen.u1DescType)
2113 {
2114 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2115 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2116 }
2117 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2118 {
2119 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2120 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2121 }
2122
2123 /* Privilege checks. */
2124 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2125 {
2126 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
2127 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2128 }
2129 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2130 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2131 {
2132 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
2133 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2134 }
2135
2136 /* Present? */
2137 if (!DescCS.Legacy.Gen.u1Present)
2138 {
2139 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2140 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2141 }
2142
2143 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2144
2145 /*
2146 * Return to outer level?
2147 */
2148 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2149 {
2150 uint16_t uNewSS;
2151 uint32_t uNewESP;
2152 if (enmEffOpSize == IEMMODE_32BIT)
2153 {
2154 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2155 if (rcStrict != VINF_SUCCESS)
2156 return rcStrict;
2157 uNewESP = uFrame.pu32[0];
2158 uNewSS = (uint16_t)uFrame.pu32[1];
2159 }
2160 else
2161 {
2162 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2163 if (rcStrict != VINF_SUCCESS)
2164 return rcStrict;
2165 uNewESP = uFrame.pu16[0];
2166 uNewSS = uFrame.pu16[1];
2167 }
2168 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2169 if (rcStrict != VINF_SUCCESS)
2170 return rcStrict;
2171
2172 /* Read the SS descriptor. */
2173 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2174 {
2175 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2176 return iemRaiseGeneralProtectionFault0(pIemCpu);
2177 }
2178
2179 IEMSELDESC DescSS;
2180 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2181 if (rcStrict != VINF_SUCCESS)
2182 {
2183 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2184 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2185 return rcStrict;
2186 }
2187
2188 /* Privilege checks. */
2189 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2190 {
2191 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2192 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2193 }
2194 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2195 {
2196 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2197 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2198 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2199 }
2200
2201 /* Must be a writeable data segment descriptor. */
2202 if (!DescSS.Legacy.Gen.u1DescType)
2203 {
2204 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2205 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2206 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2207 }
2208 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2209 {
2210 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2211 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2212 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2213 }
2214
2215 /* Present? */
2216 if (!DescSS.Legacy.Gen.u1Present)
2217 {
2218 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2219 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2220 }
2221
2222 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2223
2224 /* Check EIP. */
2225 if (uNewEip > cbLimitCS)
2226 {
2227 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2228 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2229 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2230 }
2231
2232 /*
2233 * Commit the changes, marking CS and SS accessed first since
2234 * that may fail.
2235 */
2236 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2237 {
2238 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2239 if (rcStrict != VINF_SUCCESS)
2240 return rcStrict;
2241 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2242 }
2243 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2244 {
2245 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2246 if (rcStrict != VINF_SUCCESS)
2247 return rcStrict;
2248 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2249 }
2250
2251 pCtx->rip = uNewEip;
2252 pCtx->cs.Sel = uNewCs;
2253 pCtx->cs.ValidSel = uNewCs;
2254 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2255 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2256 pCtx->cs.u32Limit = cbLimitCS;
2257 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2258 pCtx->rsp = uNewESP;
2259 pCtx->ss.Sel = uNewSS;
2260 pCtx->ss.ValidSel = uNewSS;
2261 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2262 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2263 pCtx->ss.u32Limit = cbLimitSs;
2264 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2265
2266 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2267 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2268 if (enmEffOpSize != IEMMODE_16BIT)
2269 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2270 if (pIemCpu->uCpl == 0)
2271 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2272 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2273 fEFlagsMask |= X86_EFL_IF;
2274 pCtx->eflags.u &= ~fEFlagsMask;
2275 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2276
2277 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2278 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2279 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2280 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2281 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2282
2283 /* Done! */
2284
2285 }
2286 /*
2287 * Return to the same level.
2288 */
2289 else
2290 {
2291 /* Check EIP. */
2292 if (uNewEip > cbLimitCS)
2293 {
2294 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2295 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2296 }
2297
2298 /*
2299 * Commit the changes, marking CS first since it may fail.
2300 */
2301 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2302 {
2303 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2304 if (rcStrict != VINF_SUCCESS)
2305 return rcStrict;
2306 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2307 }
2308
2309 pCtx->rip = uNewEip;
2310 pCtx->cs.Sel = uNewCs;
2311 pCtx->cs.ValidSel = uNewCs;
2312 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2313 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2314 pCtx->cs.u32Limit = cbLimitCS;
2315 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2316 pCtx->rsp = uNewRsp;
2317
2318 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2319 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2320 if (enmEffOpSize != IEMMODE_16BIT)
2321 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2322 if (pIemCpu->uCpl == 0)
2323 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2324 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2325 fEFlagsMask |= X86_EFL_IF;
2326 pCtx->eflags.u &= ~fEFlagsMask;
2327 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2328 /* Done! */
2329 }
2330 return VINF_SUCCESS;
2331}
2332
2333
2334/**
2335 * Implements iret for long mode
2336 *
2337 * @param enmEffOpSize The effective operand size.
2338 */
2339IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2340{
2341 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2342 //VBOXSTRICTRC rcStrict;
2343 //uint64_t uNewRsp;
2344
2345 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2346 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2347}
2348
2349
2350/**
2351 * Implements iret.
2352 *
2353 * @param enmEffOpSize The effective operand size.
2354 */
2355IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2356{
2357 /*
2358 * Call a mode specific worker.
2359 */
2360 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2361 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2362 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2363 if (IEM_IS_LONG_MODE(pIemCpu))
2364 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2365
2366 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2367}
2368
2369
2370/**
2371 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2372 *
2373 * @param iSegReg The segment register number (valid).
2374 * @param uSel The new selector value.
2375 */
2376IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2377{
2378 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2379 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2380 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2381
2382 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2383
2384 /*
2385 * Real mode and V8086 mode are easy.
2386 */
2387 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2388 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2389 {
2390 *pSel = uSel;
2391 pHid->u64Base = (uint32_t)uSel << 4;
2392 pHid->ValidSel = uSel;
2393 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2394#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2395 /** @todo Does the CPU actually load limits and attributes in the
2396 * real/V8086 mode segment load case? It doesn't for CS in far
2397 * jumps... Affects unreal mode. */
2398 pHid->u32Limit = 0xffff;
2399 pHid->Attr.u = 0;
2400 pHid->Attr.n.u1Present = 1;
2401 pHid->Attr.n.u1DescType = 1;
2402 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2403 ? X86_SEL_TYPE_RW
2404 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2405#endif
2406 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2407 iemRegAddToRip(pIemCpu, cbInstr);
2408 return VINF_SUCCESS;
2409 }
2410
2411 /*
2412 * Protected mode.
2413 *
2414 * Check if it's a null segment selector value first, that's OK for DS, ES,
2415 * FS and GS. If not null, then we have to load and parse the descriptor.
2416 */
2417 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2418 {
2419 if (iSegReg == X86_SREG_SS)
2420 {
2421 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2422 || pIemCpu->uCpl != 0
2423 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2424 {
2425 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2426 return iemRaiseGeneralProtectionFault0(pIemCpu);
2427 }
2428
2429 /* In 64-bit kernel mode, the stack can be 0 because of the way
2430 interrupts are dispatched when in kernel ctx. Just load the
2431 selector value into the register and leave the hidden bits
2432 as is. */
2433 *pSel = uSel;
2434 pHid->ValidSel = uSel;
2435 iemRegAddToRip(pIemCpu, cbInstr);
2436 return VINF_SUCCESS;
2437 }
2438
2439 *pSel = uSel; /* Not RPL, remember :-) */
2440 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2441 && iSegReg != X86_SREG_FS
2442 && iSegReg != X86_SREG_GS)
2443 {
2444 /** @todo figure out what this actually does, it works. Needs
2445 * testcase! */
2446 pHid->Attr.u = 0;
2447 pHid->Attr.n.u1Present = 1;
2448 pHid->Attr.n.u1Long = 1;
2449 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2450 pHid->Attr.n.u2Dpl = 3;
2451 pHid->u32Limit = 0;
2452 pHid->u64Base = 0;
2453 pHid->ValidSel = uSel;
2454 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2455 }
2456 else
2457 iemHlpLoadNullDataSelectorProt(pHid, uSel);
2458 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2459 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2460
2461 iemRegAddToRip(pIemCpu, cbInstr);
2462 return VINF_SUCCESS;
2463 }
2464
2465 /* Fetch the descriptor. */
2466 IEMSELDESC Desc;
2467 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2468 if (rcStrict != VINF_SUCCESS)
2469 return rcStrict;
2470
2471 /* Check GPs first. */
2472 if (!Desc.Legacy.Gen.u1DescType)
2473 {
2474 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2475 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2476 }
2477 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2478 {
2479 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2480 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2481 {
2482 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2483 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2484 }
2485 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2486 {
2487 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2488 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2489 }
2490 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2491 {
2492 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2493 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2494 }
2495 }
2496 else
2497 {
2498 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2499 {
2500 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2501 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2502 }
2503 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2504 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2505 {
2506#if 0 /* this is what intel says. */
2507 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2508 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2509 {
2510 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2511 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2512 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2513 }
2514#else /* this is what makes more sense. */
2515 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2516 {
2517 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2518 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2519 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2520 }
2521 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2522 {
2523 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2524 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2525 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2526 }
2527#endif
2528 }
2529 }
2530
2531 /* Is it there? */
2532 if (!Desc.Legacy.Gen.u1Present)
2533 {
2534 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2535 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2536 }
2537
2538 /* The base and limit. */
2539 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2540 uint64_t u64Base;
2541 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2542 && iSegReg < X86_SREG_FS)
2543 u64Base = 0;
2544 else
2545 u64Base = X86DESC_BASE(&Desc.Legacy);
2546
2547 /*
2548 * Ok, everything checked out fine. Now set the accessed bit before
2549 * committing the result into the registers.
2550 */
2551 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2552 {
2553 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2554 if (rcStrict != VINF_SUCCESS)
2555 return rcStrict;
2556 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2557 }
2558
2559 /* commit */
2560 *pSel = uSel;
2561 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2562 pHid->u32Limit = cbLimit;
2563 pHid->u64Base = u64Base;
2564 pHid->ValidSel = uSel;
2565 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2566
2567 /** @todo check if the hidden bits are loaded correctly for 64-bit
2568 * mode. */
2569 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2570
2571 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2572 iemRegAddToRip(pIemCpu, cbInstr);
2573 return VINF_SUCCESS;
2574}
2575
2576
2577/**
2578 * Implements 'mov SReg, r/m'.
2579 *
2580 * @param iSegReg The segment register number (valid).
2581 * @param uSel The new selector value.
2582 */
2583IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2584{
2585 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2586 if (rcStrict == VINF_SUCCESS)
2587 {
2588 if (iSegReg == X86_SREG_SS)
2589 {
2590 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2591 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2592 }
2593 }
2594 return rcStrict;
2595}
2596
2597
2598/**
2599 * Implements 'pop SReg'.
2600 *
2601 * @param iSegReg The segment register number (valid).
2602 * @param enmEffOpSize The efficient operand size (valid).
2603 */
2604IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2605{
2606 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2607 VBOXSTRICTRC rcStrict;
2608
2609 /*
2610 * Read the selector off the stack and join paths with mov ss, reg.
2611 */
2612 RTUINT64U TmpRsp;
2613 TmpRsp.u = pCtx->rsp;
2614 switch (enmEffOpSize)
2615 {
2616 case IEMMODE_16BIT:
2617 {
2618 uint16_t uSel;
2619 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2620 if (rcStrict == VINF_SUCCESS)
2621 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2622 break;
2623 }
2624
2625 case IEMMODE_32BIT:
2626 {
2627 uint32_t u32Value;
2628 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2629 if (rcStrict == VINF_SUCCESS)
2630 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2631 break;
2632 }
2633
2634 case IEMMODE_64BIT:
2635 {
2636 uint64_t u64Value;
2637 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2638 if (rcStrict == VINF_SUCCESS)
2639 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2640 break;
2641 }
2642 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2643 }
2644
2645 /*
2646 * Commit the stack on success.
2647 */
2648 if (rcStrict == VINF_SUCCESS)
2649 {
2650 pCtx->rsp = TmpRsp.u;
2651 if (iSegReg == X86_SREG_SS)
2652 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2653 }
2654 return rcStrict;
2655}
2656
2657
2658/**
2659 * Implements lgs, lfs, les, lds & lss.
2660 */
2661IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2662 uint16_t, uSel,
2663 uint64_t, offSeg,
2664 uint8_t, iSegReg,
2665 uint8_t, iGReg,
2666 IEMMODE, enmEffOpSize)
2667{
2668 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2669 VBOXSTRICTRC rcStrict;
2670
2671 /*
2672 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2673 */
2674 /** @todo verify and test that mov, pop and lXs works the segment
2675 * register loading in the exact same way. */
2676 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2677 if (rcStrict == VINF_SUCCESS)
2678 {
2679 switch (enmEffOpSize)
2680 {
2681 case IEMMODE_16BIT:
2682 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2683 break;
2684 case IEMMODE_32BIT:
2685 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2686 break;
2687 case IEMMODE_64BIT:
2688 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2689 break;
2690 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2691 }
2692 }
2693
2694 return rcStrict;
2695}
2696
2697
2698/**
2699 * Implements lgdt.
2700 *
2701 * @param iEffSeg The segment of the new ldtr contents
2702 * @param GCPtrEffSrc The address of the new ldtr contents.
2703 * @param enmEffOpSize The effective operand size.
2704 */
2705IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2706{
2707 if (pIemCpu->uCpl != 0)
2708 return iemRaiseGeneralProtectionFault0(pIemCpu);
2709 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2710
2711 /*
2712 * Fetch the limit and base address.
2713 */
2714 uint16_t cbLimit;
2715 RTGCPTR GCPtrBase;
2716 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2717 if (rcStrict == VINF_SUCCESS)
2718 {
2719 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2720 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2721 else
2722 {
2723 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2724 pCtx->gdtr.cbGdt = cbLimit;
2725 pCtx->gdtr.pGdt = GCPtrBase;
2726 }
2727 if (rcStrict == VINF_SUCCESS)
2728 iemRegAddToRip(pIemCpu, cbInstr);
2729 }
2730 return rcStrict;
2731}
2732
2733
2734/**
2735 * Implements sgdt.
2736 *
2737 * @param iEffSeg The segment where to store the gdtr content.
2738 * @param GCPtrEffDst The address where to store the gdtr content.
2739 * @param enmEffOpSize The effective operand size.
2740 */
2741IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
2742{
2743 /*
2744 * Join paths with sidt.
2745 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
2746 * you really must know.
2747 */
2748 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2749 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
2750 if (rcStrict == VINF_SUCCESS)
2751 iemRegAddToRip(pIemCpu, cbInstr);
2752 return rcStrict;
2753}
2754
2755
2756/**
2757 * Implements lidt.
2758 *
2759 * @param iEffSeg The segment of the new ldtr contents
2760 * @param GCPtrEffSrc The address of the new ldtr contents.
2761 * @param enmEffOpSize The effective operand size.
2762 */
2763IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2764{
2765 if (pIemCpu->uCpl != 0)
2766 return iemRaiseGeneralProtectionFault0(pIemCpu);
2767 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2768
2769 /*
2770 * Fetch the limit and base address.
2771 */
2772 uint16_t cbLimit;
2773 RTGCPTR GCPtrBase;
2774 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2775 if (rcStrict == VINF_SUCCESS)
2776 {
2777 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2778 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2779 else
2780 {
2781 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2782 pCtx->idtr.cbIdt = cbLimit;
2783 pCtx->idtr.pIdt = GCPtrBase;
2784 }
2785 iemRegAddToRip(pIemCpu, cbInstr);
2786 }
2787 return rcStrict;
2788}
2789
2790
2791/**
2792 * Implements sidt.
2793 *
2794 * @param iEffSeg The segment where to store the idtr content.
2795 * @param GCPtrEffDst The address where to store the idtr content.
2796 * @param enmEffOpSize The effective operand size.
2797 */
2798IEM_CIMPL_DEF_3(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
2799{
2800 /*
2801 * Join paths with sgdt.
2802 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
2803 * you really must know.
2804 */
2805 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2806 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
2807 if (rcStrict == VINF_SUCCESS)
2808 iemRegAddToRip(pIemCpu, cbInstr);
2809 return rcStrict;
2810}
2811
2812
2813/**
2814 * Implements lldt.
2815 *
2816 * @param uNewLdt The new LDT selector value.
2817 */
2818IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2819{
2820 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2821
2822 /*
2823 * Check preconditions.
2824 */
2825 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2826 {
2827 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2828 return iemRaiseUndefinedOpcode(pIemCpu);
2829 }
2830 if (pIemCpu->uCpl != 0)
2831 {
2832 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2833 return iemRaiseGeneralProtectionFault0(pIemCpu);
2834 }
2835 if (uNewLdt & X86_SEL_LDT)
2836 {
2837 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2838 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2839 }
2840
2841 /*
2842 * Now, loading a NULL selector is easy.
2843 */
2844 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2845 {
2846 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2847 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2848 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
2849 else
2850 pCtx->ldtr.Sel = uNewLdt;
2851 pCtx->ldtr.ValidSel = uNewLdt;
2852 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2853 if (IEM_IS_GUEST_CPU_AMD(pIemCpu) && !IEM_VERIFICATION_ENABLED(pIemCpu))
2854 pCtx->ldtr.Attr.u = 0;
2855 else
2856 {
2857 pCtx->ldtr.u64Base = 0;
2858 pCtx->ldtr.u32Limit = 0;
2859 }
2860
2861 iemRegAddToRip(pIemCpu, cbInstr);
2862 return VINF_SUCCESS;
2863 }
2864
2865 /*
2866 * Read the descriptor.
2867 */
2868 IEMSELDESC Desc;
2869 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2870 if (rcStrict != VINF_SUCCESS)
2871 return rcStrict;
2872
2873 /* Check GPs first. */
2874 if (Desc.Legacy.Gen.u1DescType)
2875 {
2876 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2877 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2878 }
2879 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2880 {
2881 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2882 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2883 }
2884 uint64_t u64Base;
2885 if (!IEM_IS_LONG_MODE(pIemCpu))
2886 u64Base = X86DESC_BASE(&Desc.Legacy);
2887 else
2888 {
2889 if (Desc.Long.Gen.u5Zeros)
2890 {
2891 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2892 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2893 }
2894
2895 u64Base = X86DESC64_BASE(&Desc.Long);
2896 if (!IEM_IS_CANONICAL(u64Base))
2897 {
2898 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2899 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2900 }
2901 }
2902
2903 /* NP */
2904 if (!Desc.Legacy.Gen.u1Present)
2905 {
2906 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2907 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2908 }
2909
2910 /*
2911 * It checks out alright, update the registers.
2912 */
2913/** @todo check if the actual value is loaded or if the RPL is dropped */
2914 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2915 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
2916 else
2917 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
2918 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
2919 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2920 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2921 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
2922 pCtx->ldtr.u64Base = u64Base;
2923
2924 iemRegAddToRip(pIemCpu, cbInstr);
2925 return VINF_SUCCESS;
2926}
2927
2928
2929/**
2930 * Implements lldt.
2931 *
2932 * @param uNewLdt The new LDT selector value.
2933 */
2934IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2935{
2936 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2937
2938 /*
2939 * Check preconditions.
2940 */
2941 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2942 {
2943 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2944 return iemRaiseUndefinedOpcode(pIemCpu);
2945 }
2946 if (pIemCpu->uCpl != 0)
2947 {
2948 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2949 return iemRaiseGeneralProtectionFault0(pIemCpu);
2950 }
2951 if (uNewTr & X86_SEL_LDT)
2952 {
2953 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2954 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
2955 }
2956 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
2957 {
2958 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2959 return iemRaiseGeneralProtectionFault0(pIemCpu);
2960 }
2961
2962 /*
2963 * Read the descriptor.
2964 */
2965 IEMSELDESC Desc;
2966 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2967 if (rcStrict != VINF_SUCCESS)
2968 return rcStrict;
2969
2970 /* Check GPs first. */
2971 if (Desc.Legacy.Gen.u1DescType)
2972 {
2973 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2974 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2975 }
2976 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2977 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2978 || IEM_IS_LONG_MODE(pIemCpu)) )
2979 {
2980 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2981 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2982 }
2983 uint64_t u64Base;
2984 if (!IEM_IS_LONG_MODE(pIemCpu))
2985 u64Base = X86DESC_BASE(&Desc.Legacy);
2986 else
2987 {
2988 if (Desc.Long.Gen.u5Zeros)
2989 {
2990 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2991 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2992 }
2993
2994 u64Base = X86DESC64_BASE(&Desc.Long);
2995 if (!IEM_IS_CANONICAL(u64Base))
2996 {
2997 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2998 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2999 }
3000 }
3001
3002 /* NP */
3003 if (!Desc.Legacy.Gen.u1Present)
3004 {
3005 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
3006 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
3007 }
3008
3009 /*
3010 * Set it busy.
3011 * Note! Intel says this should lock down the whole descriptor, but we'll
3012 * restrict our selves to 32-bit for now due to lack of inline
3013 * assembly and such.
3014 */
3015 void *pvDesc;
3016 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
3017 if (rcStrict != VINF_SUCCESS)
3018 return rcStrict;
3019 switch ((uintptr_t)pvDesc & 3)
3020 {
3021 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
3022 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
3023 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
3024 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
3025 }
3026 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
3027 if (rcStrict != VINF_SUCCESS)
3028 return rcStrict;
3029 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3030
3031 /*
3032 * It checks out alright, update the registers.
3033 */
3034/** @todo check if the actual value is loaded or if the RPL is dropped */
3035 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3036 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
3037 else
3038 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
3039 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
3040 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3041 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3042 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3043 pCtx->tr.u64Base = u64Base;
3044
3045 iemRegAddToRip(pIemCpu, cbInstr);
3046 return VINF_SUCCESS;
3047}
3048
3049
3050/**
3051 * Implements mov GReg,CRx.
3052 *
3053 * @param iGReg The general register to store the CRx value in.
3054 * @param iCrReg The CRx register to read (valid).
3055 */
3056IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
3057{
3058 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3059 if (pIemCpu->uCpl != 0)
3060 return iemRaiseGeneralProtectionFault0(pIemCpu);
3061 Assert(!pCtx->eflags.Bits.u1VM);
3062
3063 /* read it */
3064 uint64_t crX;
3065 switch (iCrReg)
3066 {
3067 case 0: crX = pCtx->cr0; break;
3068 case 2: crX = pCtx->cr2; break;
3069 case 3: crX = pCtx->cr3; break;
3070 case 4: crX = pCtx->cr4; break;
3071 case 8:
3072 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3073 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
3074 else
3075 crX = 0xff;
3076 break;
3077 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3078 }
3079
3080 /* store it */
3081 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3082 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
3083 else
3084 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
3085
3086 iemRegAddToRip(pIemCpu, cbInstr);
3087 return VINF_SUCCESS;
3088}
3089
3090
3091/**
3092 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
3093 *
3094 * @param iCrReg The CRx register to write (valid).
3095 * @param uNewCrX The new value.
3096 */
3097IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
3098{
3099 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3100 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3101 VBOXSTRICTRC rcStrict;
3102 int rc;
3103
3104 /*
3105 * Try store it.
3106 * Unfortunately, CPUM only does a tiny bit of the work.
3107 */
3108 switch (iCrReg)
3109 {
3110 case 0:
3111 {
3112 /*
3113 * Perform checks.
3114 */
3115 uint64_t const uOldCrX = pCtx->cr0;
3116 uNewCrX |= X86_CR0_ET; /* hardcoded */
3117
3118 /* Check for reserved bits. */
3119 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
3120 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
3121 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
3122 if (uNewCrX & ~(uint64_t)fValid)
3123 {
3124 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3125 return iemRaiseGeneralProtectionFault0(pIemCpu);
3126 }
3127
3128 /* Check for invalid combinations. */
3129 if ( (uNewCrX & X86_CR0_PG)
3130 && !(uNewCrX & X86_CR0_PE) )
3131 {
3132 Log(("Trying to set CR0.PG without CR0.PE\n"));
3133 return iemRaiseGeneralProtectionFault0(pIemCpu);
3134 }
3135
3136 if ( !(uNewCrX & X86_CR0_CD)
3137 && (uNewCrX & X86_CR0_NW) )
3138 {
3139 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
3140 return iemRaiseGeneralProtectionFault0(pIemCpu);
3141 }
3142
3143 /* Long mode consistency checks. */
3144 if ( (uNewCrX & X86_CR0_PG)
3145 && !(uOldCrX & X86_CR0_PG)
3146 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3147 {
3148 if (!(pCtx->cr4 & X86_CR4_PAE))
3149 {
3150 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
3151 return iemRaiseGeneralProtectionFault0(pIemCpu);
3152 }
3153 if (pCtx->cs.Attr.n.u1Long)
3154 {
3155 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
3156 return iemRaiseGeneralProtectionFault0(pIemCpu);
3157 }
3158 }
3159
3160 /** @todo check reserved PDPTR bits as AMD states. */
3161
3162 /*
3163 * Change CR0.
3164 */
3165 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3166 CPUMSetGuestCR0(pVCpu, uNewCrX);
3167 else
3168 pCtx->cr0 = uNewCrX;
3169 Assert(pCtx->cr0 == uNewCrX);
3170
3171 /*
3172 * Change EFER.LMA if entering or leaving long mode.
3173 */
3174 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
3175 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3176 {
3177 uint64_t NewEFER = pCtx->msrEFER;
3178 if (uNewCrX & X86_CR0_PG)
3179 NewEFER |= MSR_K6_EFER_LME;
3180 else
3181 NewEFER &= ~MSR_K6_EFER_LME;
3182
3183 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3184 CPUMSetGuestEFER(pVCpu, NewEFER);
3185 else
3186 pCtx->msrEFER = NewEFER;
3187 Assert(pCtx->msrEFER == NewEFER);
3188 }
3189
3190 /*
3191 * Inform PGM.
3192 */
3193 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3194 {
3195 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3196 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3197 {
3198 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3199 AssertRCReturn(rc, rc);
3200 /* ignore informational status codes */
3201 }
3202 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3203 }
3204 else
3205 rcStrict = VINF_SUCCESS;
3206 break;
3207 }
3208
3209 /*
3210 * CR2 can be changed without any restrictions.
3211 */
3212 case 2:
3213 pCtx->cr2 = uNewCrX;
3214 rcStrict = VINF_SUCCESS;
3215 break;
3216
3217 /*
3218 * CR3 is relatively simple, although AMD and Intel have different
3219 * accounts of how setting reserved bits are handled. We take intel's
3220 * word for the lower bits and AMD's for the high bits (63:52).
3221 */
3222 /** @todo Testcase: Setting reserved bits in CR3, especially before
3223 * enabling paging. */
3224 case 3:
3225 {
3226 /* check / mask the value. */
3227 if (uNewCrX & UINT64_C(0xfff0000000000000))
3228 {
3229 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3230 return iemRaiseGeneralProtectionFault0(pIemCpu);
3231 }
3232
3233 uint64_t fValid;
3234 if ( (pCtx->cr4 & X86_CR4_PAE)
3235 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3236 fValid = UINT64_C(0x000ffffffffff014);
3237 else if (pCtx->cr4 & X86_CR4_PAE)
3238 fValid = UINT64_C(0xfffffff4);
3239 else
3240 fValid = UINT64_C(0xfffff014);
3241 if (uNewCrX & ~fValid)
3242 {
3243 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3244 uNewCrX, uNewCrX & ~fValid));
3245 uNewCrX &= fValid;
3246 }
3247
3248 /** @todo If we're in PAE mode we should check the PDPTRs for
3249 * invalid bits. */
3250
3251 /* Make the change. */
3252 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3253 {
3254 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3255 AssertRCSuccessReturn(rc, rc);
3256 }
3257 else
3258 pCtx->cr3 = uNewCrX;
3259
3260 /* Inform PGM. */
3261 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3262 {
3263 if (pCtx->cr0 & X86_CR0_PG)
3264 {
3265 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
3266 AssertRCReturn(rc, rc);
3267 /* ignore informational status codes */
3268 }
3269 }
3270 rcStrict = VINF_SUCCESS;
3271 break;
3272 }
3273
3274 /*
3275 * CR4 is a bit more tedious as there are bits which cannot be cleared
3276 * under some circumstances and such.
3277 */
3278 case 4:
3279 {
3280 uint64_t const uOldCrX = pCtx->cr4;
3281
3282 /* reserved bits */
3283 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3284 | X86_CR4_TSD | X86_CR4_DE
3285 | X86_CR4_PSE | X86_CR4_PAE
3286 | X86_CR4_MCE | X86_CR4_PGE
3287 | X86_CR4_PCE | X86_CR4_OSFSXR
3288 | X86_CR4_OSXMMEEXCPT;
3289 //if (xxx)
3290 // fValid |= X86_CR4_VMXE;
3291 //if (xxx)
3292 // fValid |= X86_CR4_OSXSAVE;
3293 if (uNewCrX & ~(uint64_t)fValid)
3294 {
3295 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3296 return iemRaiseGeneralProtectionFault0(pIemCpu);
3297 }
3298
3299 /* long mode checks. */
3300 if ( (uOldCrX & X86_CR4_PAE)
3301 && !(uNewCrX & X86_CR4_PAE)
3302 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3303 {
3304 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3305 return iemRaiseGeneralProtectionFault0(pIemCpu);
3306 }
3307
3308
3309 /*
3310 * Change it.
3311 */
3312 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3313 {
3314 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3315 AssertRCSuccessReturn(rc, rc);
3316 }
3317 else
3318 pCtx->cr4 = uNewCrX;
3319 Assert(pCtx->cr4 == uNewCrX);
3320
3321 /*
3322 * Notify SELM and PGM.
3323 */
3324 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3325 {
3326 /* SELM - VME may change things wrt to the TSS shadowing. */
3327 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3328 {
3329 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
3330 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
3331 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3332 }
3333
3334 /* PGM - flushing and mode. */
3335 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3336 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3337 {
3338 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3339 AssertRCReturn(rc, rc);
3340 /* ignore informational status codes */
3341 }
3342 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3343 }
3344 else
3345 rcStrict = VINF_SUCCESS;
3346 break;
3347 }
3348
3349 /*
3350 * CR8 maps to the APIC TPR.
3351 */
3352 case 8:
3353 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3354 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
3355 else
3356 rcStrict = VINF_SUCCESS;
3357 break;
3358
3359 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3360 }
3361
3362 /*
3363 * Advance the RIP on success.
3364 */
3365 if (RT_SUCCESS(rcStrict))
3366 {
3367 if (rcStrict != VINF_SUCCESS)
3368 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3369 iemRegAddToRip(pIemCpu, cbInstr);
3370 }
3371
3372 return rcStrict;
3373}
3374
3375
3376/**
3377 * Implements mov CRx,GReg.
3378 *
3379 * @param iCrReg The CRx register to write (valid).
3380 * @param iGReg The general register to load the DRx value from.
3381 */
3382IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3383{
3384 if (pIemCpu->uCpl != 0)
3385 return iemRaiseGeneralProtectionFault0(pIemCpu);
3386 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3387
3388 /*
3389 * Read the new value from the source register and call common worker.
3390 */
3391 uint64_t uNewCrX;
3392 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3393 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3394 else
3395 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3396 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3397}
3398
3399
3400/**
3401 * Implements 'LMSW r/m16'
3402 *
3403 * @param u16NewMsw The new value.
3404 */
3405IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3406{
3407 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3408
3409 if (pIemCpu->uCpl != 0)
3410 return iemRaiseGeneralProtectionFault0(pIemCpu);
3411 Assert(!pCtx->eflags.Bits.u1VM);
3412
3413 /*
3414 * Compose the new CR0 value and call common worker.
3415 */
3416 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3417 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3418 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3419}
3420
3421
3422/**
3423 * Implements 'CLTS'.
3424 */
3425IEM_CIMPL_DEF_0(iemCImpl_clts)
3426{
3427 if (pIemCpu->uCpl != 0)
3428 return iemRaiseGeneralProtectionFault0(pIemCpu);
3429
3430 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3431 uint64_t uNewCr0 = pCtx->cr0;
3432 uNewCr0 &= ~X86_CR0_TS;
3433 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3434}
3435
3436
3437/**
3438 * Implements mov GReg,DRx.
3439 *
3440 * @param iGReg The general register to store the DRx value in.
3441 * @param iDrReg The DRx register to read (0-7).
3442 */
3443IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3444{
3445 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3446
3447 /*
3448 * Check preconditions.
3449 */
3450
3451 /* Raise GPs. */
3452 if (pIemCpu->uCpl != 0)
3453 return iemRaiseGeneralProtectionFault0(pIemCpu);
3454 Assert(!pCtx->eflags.Bits.u1VM);
3455
3456 if ( (iDrReg == 4 || iDrReg == 5)
3457 && (pCtx->cr4 & X86_CR4_DE) )
3458 {
3459 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3460 return iemRaiseGeneralProtectionFault0(pIemCpu);
3461 }
3462
3463 /* Raise #DB if general access detect is enabled. */
3464 if (pCtx->dr[7] & X86_DR7_GD)
3465 {
3466 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3467 return iemRaiseDebugException(pIemCpu);
3468 }
3469
3470 /*
3471 * Read the debug register and store it in the specified general register.
3472 */
3473 uint64_t drX;
3474 switch (iDrReg)
3475 {
3476 case 0: drX = pCtx->dr[0]; break;
3477 case 1: drX = pCtx->dr[1]; break;
3478 case 2: drX = pCtx->dr[2]; break;
3479 case 3: drX = pCtx->dr[3]; break;
3480 case 6:
3481 case 4:
3482 drX = pCtx->dr[6];
3483 drX &= ~RT_BIT_32(12);
3484 drX |= UINT32_C(0xffff0ff0);
3485 break;
3486 case 7:
3487 case 5:
3488 drX = pCtx->dr[7];
3489 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3490 drX |= RT_BIT_32(10);
3491 break;
3492 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3493 }
3494
3495 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3496 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3497 else
3498 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3499
3500 iemRegAddToRip(pIemCpu, cbInstr);
3501 return VINF_SUCCESS;
3502}
3503
3504
3505/**
3506 * Implements mov DRx,GReg.
3507 *
3508 * @param iDrReg The DRx register to write (valid).
3509 * @param iGReg The general register to load the DRx value from.
3510 */
3511IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3512{
3513 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3514
3515 /*
3516 * Check preconditions.
3517 */
3518 if (pIemCpu->uCpl != 0)
3519 return iemRaiseGeneralProtectionFault0(pIemCpu);
3520 Assert(!pCtx->eflags.Bits.u1VM);
3521
3522 if ( (iDrReg == 4 || iDrReg == 5)
3523 && (pCtx->cr4 & X86_CR4_DE) )
3524 {
3525 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3526 return iemRaiseGeneralProtectionFault0(pIemCpu);
3527 }
3528
3529 /* Raise #DB if general access detect is enabled. */
3530 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3531 * \#GP? */
3532 if (pCtx->dr[7] & X86_DR7_GD)
3533 {
3534 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3535 return iemRaiseDebugException(pIemCpu);
3536 }
3537
3538 /*
3539 * Read the new value from the source register.
3540 */
3541 uint64_t uNewDrX;
3542 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3543 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3544 else
3545 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3546
3547 /*
3548 * Adjust it.
3549 */
3550 switch (iDrReg)
3551 {
3552 case 0:
3553 case 1:
3554 case 2:
3555 case 3:
3556 /* nothing to adjust */
3557 break;
3558
3559 case 6:
3560 case 4:
3561 if (uNewDrX & UINT64_C(0xffffffff00000000))
3562 {
3563 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3564 return iemRaiseGeneralProtectionFault0(pIemCpu);
3565 }
3566 uNewDrX &= ~RT_BIT_32(12);
3567 uNewDrX |= UINT32_C(0xffff0ff0);
3568 break;
3569
3570 case 7:
3571 case 5:
3572 if (uNewDrX & UINT64_C(0xffffffff00000000))
3573 {
3574 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3575 return iemRaiseGeneralProtectionFault0(pIemCpu);
3576 }
3577 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3578 uNewDrX |= RT_BIT_32(10);
3579 break;
3580
3581 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3582 }
3583
3584 /*
3585 * Do the actual setting.
3586 */
3587 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3588 {
3589 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3590 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3591 }
3592 else
3593 pCtx->dr[iDrReg] = uNewDrX;
3594
3595 iemRegAddToRip(pIemCpu, cbInstr);
3596 return VINF_SUCCESS;
3597}
3598
3599
3600/**
3601 * Implements 'INVLPG m'.
3602 *
3603 * @param GCPtrPage The effective address of the page to invalidate.
3604 * @remarks Updates the RIP.
3605 */
3606IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3607{
3608 /* ring-0 only. */
3609 if (pIemCpu->uCpl != 0)
3610 return iemRaiseGeneralProtectionFault0(pIemCpu);
3611 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3612
3613 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3614 iemRegAddToRip(pIemCpu, cbInstr);
3615
3616 if (rc == VINF_SUCCESS)
3617 return VINF_SUCCESS;
3618 if (rc == VINF_PGM_SYNC_CR3)
3619 return iemSetPassUpStatus(pIemCpu, rc);
3620
3621 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3622 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3623 return rc;
3624}
3625
3626
3627/**
3628 * Implements RDTSC.
3629 */
3630IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3631{
3632 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3633
3634 /*
3635 * Check preconditions.
3636 */
3637 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3638 return iemRaiseUndefinedOpcode(pIemCpu);
3639
3640 if ( (pCtx->cr4 & X86_CR4_TSD)
3641 && pIemCpu->uCpl != 0)
3642 {
3643 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3644 return iemRaiseGeneralProtectionFault0(pIemCpu);
3645 }
3646
3647 /*
3648 * Do the job.
3649 */
3650 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3651 pCtx->rax = (uint32_t)uTicks;
3652 pCtx->rdx = uTicks >> 32;
3653#ifdef IEM_VERIFICATION_MODE
3654 pIemCpu->fIgnoreRaxRdx = true;
3655#endif
3656
3657 iemRegAddToRip(pIemCpu, cbInstr);
3658 return VINF_SUCCESS;
3659}
3660
3661
3662/**
3663 * Implements RDMSR.
3664 */
3665IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3666{
3667 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3668
3669 /*
3670 * Check preconditions.
3671 */
3672 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3673 return iemRaiseUndefinedOpcode(pIemCpu);
3674 if (pIemCpu->uCpl != 0)
3675 return iemRaiseGeneralProtectionFault0(pIemCpu);
3676
3677 /*
3678 * Do the job.
3679 */
3680 RTUINT64U uValue;
3681 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3682 if (rc != VINF_SUCCESS)
3683 {
3684 Log(("IEM: rdmsr(%#x) -> GP(0)\n", pCtx->ecx));
3685 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3686 return iemRaiseGeneralProtectionFault0(pIemCpu);
3687 }
3688
3689 pCtx->rax = uValue.s.Lo;
3690 pCtx->rdx = uValue.s.Hi;
3691
3692 iemRegAddToRip(pIemCpu, cbInstr);
3693 return VINF_SUCCESS;
3694}
3695
3696
3697/**
3698 * Implements WRMSR.
3699 */
3700IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
3701{
3702 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3703
3704 /*
3705 * Check preconditions.
3706 */
3707 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3708 return iemRaiseUndefinedOpcode(pIemCpu);
3709 if (pIemCpu->uCpl != 0)
3710 return iemRaiseGeneralProtectionFault0(pIemCpu);
3711
3712 /*
3713 * Do the job.
3714 */
3715 RTUINT64U uValue;
3716 uValue.s.Lo = pCtx->eax;
3717 uValue.s.Hi = pCtx->edx;
3718
3719 int rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
3720 if (rc != VINF_SUCCESS)
3721 {
3722 Log(("IEM: wrmsr(%#x,%#x`%08x) -> GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
3723 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3724 return iemRaiseGeneralProtectionFault0(pIemCpu);
3725 }
3726
3727 iemRegAddToRip(pIemCpu, cbInstr);
3728 return VINF_SUCCESS;
3729}
3730
3731
3732/**
3733 * Implements 'IN eAX, port'.
3734 *
3735 * @param u16Port The source port.
3736 * @param cbReg The register size.
3737 */
3738IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3739{
3740 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3741
3742 /*
3743 * CPL check
3744 */
3745 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3746 if (rcStrict != VINF_SUCCESS)
3747 return rcStrict;
3748
3749 /*
3750 * Perform the I/O.
3751 */
3752 uint32_t u32Value;
3753 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3754 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
3755 else
3756 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3757 if (IOM_SUCCESS(rcStrict))
3758 {
3759 switch (cbReg)
3760 {
3761 case 1: pCtx->al = (uint8_t)u32Value; break;
3762 case 2: pCtx->ax = (uint16_t)u32Value; break;
3763 case 4: pCtx->rax = u32Value; break;
3764 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3765 }
3766 iemRegAddToRip(pIemCpu, cbInstr);
3767 pIemCpu->cPotentialExits++;
3768 if (rcStrict != VINF_SUCCESS)
3769 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3770 }
3771
3772 return rcStrict;
3773}
3774
3775
3776/**
3777 * Implements 'IN eAX, DX'.
3778 *
3779 * @param cbReg The register size.
3780 */
3781IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3782{
3783 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3784}
3785
3786
3787/**
3788 * Implements 'OUT port, eAX'.
3789 *
3790 * @param u16Port The destination port.
3791 * @param cbReg The register size.
3792 */
3793IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3794{
3795 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3796
3797 /*
3798 * CPL check
3799 */
3800 if ( (pCtx->cr0 & X86_CR0_PE)
3801 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3802 || pCtx->eflags.Bits.u1VM) )
3803 {
3804 /** @todo I/O port permission bitmap check */
3805 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap checks.\n"));
3806 }
3807
3808 /*
3809 * Perform the I/O.
3810 */
3811 uint32_t u32Value;
3812 switch (cbReg)
3813 {
3814 case 1: u32Value = pCtx->al; break;
3815 case 2: u32Value = pCtx->ax; break;
3816 case 4: u32Value = pCtx->eax; break;
3817 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3818 }
3819 VBOXSTRICTRC rcStrict;
3820 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3821 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
3822 else
3823 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3824 if (IOM_SUCCESS(rcStrict))
3825 {
3826 iemRegAddToRip(pIemCpu, cbInstr);
3827 pIemCpu->cPotentialExits++;
3828 if (rcStrict != VINF_SUCCESS)
3829 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3830 }
3831 return rcStrict;
3832}
3833
3834
3835/**
3836 * Implements 'OUT DX, eAX'.
3837 *
3838 * @param cbReg The register size.
3839 */
3840IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3841{
3842 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3843}
3844
3845
3846/**
3847 * Implements 'CLI'.
3848 */
3849IEM_CIMPL_DEF_0(iemCImpl_cli)
3850{
3851 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3852
3853 if (pCtx->cr0 & X86_CR0_PE)
3854 {
3855 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3856 if (!pCtx->eflags.Bits.u1VM)
3857 {
3858 if (pIemCpu->uCpl <= uIopl)
3859 pCtx->eflags.Bits.u1IF = 0;
3860 else if ( pIemCpu->uCpl == 3
3861 && (pCtx->cr4 & X86_CR4_PVI) )
3862 pCtx->eflags.Bits.u1VIF = 0;
3863 else
3864 return iemRaiseGeneralProtectionFault0(pIemCpu);
3865 }
3866 /* V8086 */
3867 else if (uIopl == 3)
3868 pCtx->eflags.Bits.u1IF = 0;
3869 else if ( uIopl < 3
3870 && (pCtx->cr4 & X86_CR4_VME) )
3871 pCtx->eflags.Bits.u1VIF = 0;
3872 else
3873 return iemRaiseGeneralProtectionFault0(pIemCpu);
3874 }
3875 /* real mode */
3876 else
3877 pCtx->eflags.Bits.u1IF = 0;
3878 iemRegAddToRip(pIemCpu, cbInstr);
3879 return VINF_SUCCESS;
3880}
3881
3882
3883/**
3884 * Implements 'STI'.
3885 */
3886IEM_CIMPL_DEF_0(iemCImpl_sti)
3887{
3888 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3889
3890 if (pCtx->cr0 & X86_CR0_PE)
3891 {
3892 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3893 if (!pCtx->eflags.Bits.u1VM)
3894 {
3895 if (pIemCpu->uCpl <= uIopl)
3896 pCtx->eflags.Bits.u1IF = 1;
3897 else if ( pIemCpu->uCpl == 3
3898 && (pCtx->cr4 & X86_CR4_PVI)
3899 && !pCtx->eflags.Bits.u1VIP )
3900 pCtx->eflags.Bits.u1VIF = 1;
3901 else
3902 return iemRaiseGeneralProtectionFault0(pIemCpu);
3903 }
3904 /* V8086 */
3905 else if (uIopl == 3)
3906 pCtx->eflags.Bits.u1IF = 1;
3907 else if ( uIopl < 3
3908 && (pCtx->cr4 & X86_CR4_VME)
3909 && !pCtx->eflags.Bits.u1VIP )
3910 pCtx->eflags.Bits.u1VIF = 1;
3911 else
3912 return iemRaiseGeneralProtectionFault0(pIemCpu);
3913 }
3914 /* real mode */
3915 else
3916 pCtx->eflags.Bits.u1IF = 1;
3917
3918 iemRegAddToRip(pIemCpu, cbInstr);
3919 /** @todo don't do this unconditionally... */
3920 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3921 return VINF_SUCCESS;
3922}
3923
3924
3925/**
3926 * Implements 'HLT'.
3927 */
3928IEM_CIMPL_DEF_0(iemCImpl_hlt)
3929{
3930 if (pIemCpu->uCpl != 0)
3931 return iemRaiseGeneralProtectionFault0(pIemCpu);
3932 iemRegAddToRip(pIemCpu, cbInstr);
3933 return VINF_EM_HALT;
3934}
3935
3936
3937/**
3938 * Implements 'CPUID'.
3939 */
3940IEM_CIMPL_DEF_0(iemCImpl_cpuid)
3941{
3942 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3943
3944 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
3945 pCtx->rax &= UINT32_C(0xffffffff);
3946 pCtx->rbx &= UINT32_C(0xffffffff);
3947 pCtx->rcx &= UINT32_C(0xffffffff);
3948 pCtx->rdx &= UINT32_C(0xffffffff);
3949
3950 iemRegAddToRip(pIemCpu, cbInstr);
3951 return VINF_SUCCESS;
3952}
3953
3954
3955/**
3956 * Implements 'AAD'.
3957 *
3958 * @param enmEffOpSize The effective operand size.
3959 */
3960IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
3961{
3962 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3963
3964 uint16_t const ax = pCtx->ax;
3965 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
3966 pCtx->ax = al;
3967 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3968 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3969 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3970
3971 iemRegAddToRip(pIemCpu, cbInstr);
3972 return VINF_SUCCESS;
3973}
3974
3975
3976/**
3977 * Implements 'AAM'.
3978 *
3979 * @param bImm The immediate operand. Cannot be 0.
3980 */
3981IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
3982{
3983 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3984 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
3985
3986 uint16_t const ax = pCtx->ax;
3987 uint8_t const al = (uint8_t)ax % bImm;
3988 uint8_t const ah = (uint8_t)ax / bImm;
3989 pCtx->ax = (ah << 8) + al;
3990 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3991 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3992 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3993
3994 iemRegAddToRip(pIemCpu, cbInstr);
3995 return VINF_SUCCESS;
3996}
3997
3998
3999
4000
4001/*
4002 * Instantiate the various string operation combinations.
4003 */
4004#define OP_SIZE 8
4005#define ADDR_SIZE 16
4006#include "IEMAllCImplStrInstr.cpp.h"
4007#define OP_SIZE 8
4008#define ADDR_SIZE 32
4009#include "IEMAllCImplStrInstr.cpp.h"
4010#define OP_SIZE 8
4011#define ADDR_SIZE 64
4012#include "IEMAllCImplStrInstr.cpp.h"
4013
4014#define OP_SIZE 16
4015#define ADDR_SIZE 16
4016#include "IEMAllCImplStrInstr.cpp.h"
4017#define OP_SIZE 16
4018#define ADDR_SIZE 32
4019#include "IEMAllCImplStrInstr.cpp.h"
4020#define OP_SIZE 16
4021#define ADDR_SIZE 64
4022#include "IEMAllCImplStrInstr.cpp.h"
4023
4024#define OP_SIZE 32
4025#define ADDR_SIZE 16
4026#include "IEMAllCImplStrInstr.cpp.h"
4027#define OP_SIZE 32
4028#define ADDR_SIZE 32
4029#include "IEMAllCImplStrInstr.cpp.h"
4030#define OP_SIZE 32
4031#define ADDR_SIZE 64
4032#include "IEMAllCImplStrInstr.cpp.h"
4033
4034#define OP_SIZE 64
4035#define ADDR_SIZE 32
4036#include "IEMAllCImplStrInstr.cpp.h"
4037#define OP_SIZE 64
4038#define ADDR_SIZE 64
4039#include "IEMAllCImplStrInstr.cpp.h"
4040
4041
4042/**
4043 * Implements 'FINIT' and 'FNINIT'.
4044 *
4045 * @param fCheckXcpts Whether to check for umasked pending exceptions or
4046 * not.
4047 */
4048IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
4049{
4050 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4051
4052 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4053 return iemRaiseDeviceNotAvailable(pIemCpu);
4054
4055 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
4056 if (fCheckXcpts && TODO )
4057 return iemRaiseMathFault(pIemCpu);
4058 */
4059
4060 if (iemFRegIsFxSaveFormat(pIemCpu))
4061 {
4062 pCtx->fpu.FCW = 0x37f;
4063 pCtx->fpu.FSW = 0;
4064 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
4065 pCtx->fpu.FPUDP = 0;
4066 pCtx->fpu.DS = 0; //??
4067 pCtx->fpu.Rsrvd2= 0;
4068 pCtx->fpu.FPUIP = 0;
4069 pCtx->fpu.CS = 0; //??
4070 pCtx->fpu.Rsrvd1= 0;
4071 pCtx->fpu.FOP = 0;
4072 }
4073 else
4074 {
4075 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
4076 pFpu->FCW = 0x37f;
4077 pFpu->FSW = 0;
4078 pFpu->FTW = 0xffff; /* 11 - empty */
4079 pFpu->FPUOO = 0; //??
4080 pFpu->FPUOS = 0; //??
4081 pFpu->FPUIP = 0;
4082 pFpu->CS = 0; //??
4083 pFpu->FOP = 0;
4084 }
4085
4086 iemRegAddToRip(pIemCpu, cbInstr);
4087 return VINF_SUCCESS;
4088}
4089
4090
4091/**
4092 * Implements 'FXSAVE'.
4093 *
4094 * @param iEffSeg The effective segment.
4095 * @param GCPtrEff The address of the image.
4096 * @param enmEffOpSize The operand size (only REX.W really matters).
4097 */
4098IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4099{
4100 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4101
4102 /*
4103 * Raise exceptions.
4104 */
4105 if (pCtx->cr0 & X86_CR0_EM)
4106 return iemRaiseUndefinedOpcode(pIemCpu);
4107 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4108 return iemRaiseDeviceNotAvailable(pIemCpu);
4109 if (GCPtrEff & 15)
4110 {
4111 /** @todo CPU/VM detection possible! \#AC might not be signal for
4112 * all/any misalignment sizes, intel says its an implementation detail. */
4113 if ( (pCtx->cr0 & X86_CR0_AM)
4114 && pCtx->eflags.Bits.u1AC
4115 && pIemCpu->uCpl == 3)
4116 return iemRaiseAlignmentCheckException(pIemCpu);
4117 return iemRaiseGeneralProtectionFault0(pIemCpu);
4118 }
4119 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4120
4121 /*
4122 * Access the memory.
4123 */
4124 void *pvMem512;
4125 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4126 if (rcStrict != VINF_SUCCESS)
4127 return rcStrict;
4128 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
4129
4130 /*
4131 * Store the registers.
4132 */
4133 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4134 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
4135
4136 /* common for all formats */
4137 pDst->FCW = pCtx->fpu.FCW;
4138 pDst->FSW = pCtx->fpu.FSW;
4139 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
4140 pDst->FOP = pCtx->fpu.FOP;
4141 pDst->MXCSR = pCtx->fpu.MXCSR;
4142 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
4143 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
4144 {
4145 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
4146 * them for now... */
4147 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4148 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4149 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
4150 pDst->aRegs[i].au32[3] = 0;
4151 }
4152
4153 /* FPU IP, CS, DP and DS. */
4154 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
4155 * state information. :-/
4156 * Storing zeros now to prevent any potential leakage of host info. */
4157 pDst->FPUIP = 0;
4158 pDst->CS = 0;
4159 pDst->Rsrvd1 = 0;
4160 pDst->FPUDP = 0;
4161 pDst->DS = 0;
4162 pDst->Rsrvd2 = 0;
4163
4164 /* XMM registers. */
4165 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4166 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4167 || pIemCpu->uCpl != 0)
4168 {
4169 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4170 for (uint32_t i = 0; i < cXmmRegs; i++)
4171 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
4172 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
4173 * right? */
4174 }
4175
4176 /*
4177 * Commit the memory.
4178 */
4179 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4180 if (rcStrict != VINF_SUCCESS)
4181 return rcStrict;
4182
4183 iemRegAddToRip(pIemCpu, cbInstr);
4184 return VINF_SUCCESS;
4185}
4186
4187
4188/**
4189 * Implements 'FXRSTOR'.
4190 *
4191 * @param GCPtrEff The address of the image.
4192 * @param enmEffOpSize The operand size (only REX.W really matters).
4193 */
4194IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4195{
4196 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4197
4198 /*
4199 * Raise exceptions.
4200 */
4201 if (pCtx->cr0 & X86_CR0_EM)
4202 return iemRaiseUndefinedOpcode(pIemCpu);
4203 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4204 return iemRaiseDeviceNotAvailable(pIemCpu);
4205 if (GCPtrEff & 15)
4206 {
4207 /** @todo CPU/VM detection possible! \#AC might not be signal for
4208 * all/any misalignment sizes, intel says its an implementation detail. */
4209 if ( (pCtx->cr0 & X86_CR0_AM)
4210 && pCtx->eflags.Bits.u1AC
4211 && pIemCpu->uCpl == 3)
4212 return iemRaiseAlignmentCheckException(pIemCpu);
4213 return iemRaiseGeneralProtectionFault0(pIemCpu);
4214 }
4215 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4216
4217 /*
4218 * Access the memory.
4219 */
4220 void *pvMem512;
4221 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
4222 if (rcStrict != VINF_SUCCESS)
4223 return rcStrict;
4224 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
4225
4226 /*
4227 * Check the state for stuff which will GP(0).
4228 */
4229 uint32_t const fMXCSR = pSrc->MXCSR;
4230 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
4231 if (fMXCSR & ~fMXCSR_MASK)
4232 {
4233 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
4234 return iemRaiseGeneralProtectionFault0(pIemCpu);
4235 }
4236
4237 /*
4238 * Load the registers.
4239 */
4240 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4241 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4242
4243 /* common for all formats */
4244 pCtx->fpu.FCW = pSrc->FCW;
4245 pCtx->fpu.FSW = pSrc->FSW;
4246 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4247 pCtx->fpu.FOP = pSrc->FOP;
4248 pCtx->fpu.MXCSR = fMXCSR;
4249 /* (MXCSR_MASK is read-only) */
4250 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4251 {
4252 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4253 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4254 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4255 pCtx->fpu.aRegs[i].au32[3] = 0;
4256 }
4257
4258 /* FPU IP, CS, DP and DS. */
4259 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4260 {
4261 pCtx->fpu.FPUIP = pSrc->FPUIP;
4262 pCtx->fpu.CS = pSrc->CS;
4263 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4264 pCtx->fpu.FPUDP = pSrc->FPUDP;
4265 pCtx->fpu.DS = pSrc->DS;
4266 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4267 }
4268 else
4269 {
4270 pCtx->fpu.FPUIP = pSrc->FPUIP;
4271 pCtx->fpu.CS = pSrc->CS;
4272 pCtx->fpu.Rsrvd1 = 0;
4273 pCtx->fpu.FPUDP = pSrc->FPUDP;
4274 pCtx->fpu.DS = pSrc->DS;
4275 pCtx->fpu.Rsrvd2 = 0;
4276 }
4277
4278 /* XMM registers. */
4279 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4280 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4281 || pIemCpu->uCpl != 0)
4282 {
4283 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4284 for (uint32_t i = 0; i < cXmmRegs; i++)
4285 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4286 }
4287
4288 /*
4289 * Commit the memory.
4290 */
4291 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4292 if (rcStrict != VINF_SUCCESS)
4293 return rcStrict;
4294
4295 iemRegAddToRip(pIemCpu, cbInstr);
4296 return VINF_SUCCESS;
4297}
4298
4299
4300/**
4301 * Commmon routine for fnstenv and fnsave.
4302 *
4303 * @param uPtr Where to store the state.
4304 * @param pCtx The CPU context.
4305 */
4306static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4307{
4308 if (enmEffOpSize == IEMMODE_16BIT)
4309 {
4310 uPtr.pu16[0] = pCtx->fpu.FCW;
4311 uPtr.pu16[1] = pCtx->fpu.FSW;
4312 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4313 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4314 {
4315 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4316 * protected mode or long mode and we save it in real mode? And vice
4317 * versa? And with 32-bit operand size? I think CPU is storing the
4318 * effective address ((CS << 4) + IP) in the offset register and not
4319 * doing any address calculations here. */
4320 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4321 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4322 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4323 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4324 }
4325 else
4326 {
4327 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4328 uPtr.pu16[4] = pCtx->fpu.CS;
4329 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4330 uPtr.pu16[6] = pCtx->fpu.DS;
4331 }
4332 }
4333 else
4334 {
4335 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4336 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4337 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4338 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4339 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4340 {
4341 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4342 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4343 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4344 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4345 }
4346 else
4347 {
4348 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4349 uPtr.pu16[4*2] = pCtx->fpu.CS;
4350 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4351 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4352 uPtr.pu16[6*2] = pCtx->fpu.DS;
4353 }
4354 }
4355}
4356
4357
4358/**
4359 * Commmon routine for fnstenv and fnsave.
4360 *
4361 * @param uPtr Where to store the state.
4362 * @param pCtx The CPU context.
4363 */
4364static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4365{
4366 if (enmEffOpSize == IEMMODE_16BIT)
4367 {
4368 pCtx->fpu.FCW = uPtr.pu16[0];
4369 pCtx->fpu.FSW = uPtr.pu16[1];
4370 pCtx->fpu.FTW = uPtr.pu16[2];
4371 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4372 {
4373 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4374 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4375 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4376 pCtx->fpu.CS = 0;
4377 pCtx->fpu.Rsrvd1= 0;
4378 pCtx->fpu.DS = 0;
4379 pCtx->fpu.Rsrvd2= 0;
4380 }
4381 else
4382 {
4383 pCtx->fpu.FPUIP = uPtr.pu16[3];
4384 pCtx->fpu.CS = uPtr.pu16[4];
4385 pCtx->fpu.Rsrvd1= 0;
4386 pCtx->fpu.FPUDP = uPtr.pu16[5];
4387 pCtx->fpu.DS = uPtr.pu16[6];
4388 pCtx->fpu.Rsrvd2= 0;
4389 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4390 }
4391 }
4392 else
4393 {
4394 pCtx->fpu.FCW = uPtr.pu16[0*2];
4395 pCtx->fpu.FSW = uPtr.pu16[1*2];
4396 pCtx->fpu.FTW = uPtr.pu16[2*2];
4397 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4398 {
4399 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4400 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4401 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4402 pCtx->fpu.CS = 0;
4403 pCtx->fpu.Rsrvd1= 0;
4404 pCtx->fpu.DS = 0;
4405 pCtx->fpu.Rsrvd2= 0;
4406 }
4407 else
4408 {
4409 pCtx->fpu.FPUIP = uPtr.pu32[3];
4410 pCtx->fpu.CS = uPtr.pu16[4*2];
4411 pCtx->fpu.Rsrvd1= 0;
4412 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4413 pCtx->fpu.FPUDP = uPtr.pu32[5];
4414 pCtx->fpu.DS = uPtr.pu16[6*2];
4415 pCtx->fpu.Rsrvd2= 0;
4416 }
4417 }
4418
4419 /* Make adjustments. */
4420 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4421 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4422 iemFpuRecalcExceptionStatus(pCtx);
4423 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4424 * exceptions are pending after loading the saved state? */
4425}
4426
4427
4428/**
4429 * Implements 'FNSTENV'.
4430 *
4431 * @param enmEffOpSize The operand size (only REX.W really matters).
4432 * @param iEffSeg The effective segment register for @a GCPtrEff.
4433 * @param GCPtrEffDst The address of the image.
4434 */
4435IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4436{
4437 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4438 RTPTRUNION uPtr;
4439 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4440 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4441 if (rcStrict != VINF_SUCCESS)
4442 return rcStrict;
4443
4444 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4445
4446 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4447 if (rcStrict != VINF_SUCCESS)
4448 return rcStrict;
4449
4450 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4451 iemRegAddToRip(pIemCpu, cbInstr);
4452 return VINF_SUCCESS;
4453}
4454
4455
4456/**
4457 * Implements 'FNSAVE'.
4458 *
4459 * @param GCPtrEffDst The address of the image.
4460 * @param enmEffOpSize The operand size.
4461 */
4462IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4463{
4464 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4465 RTPTRUNION uPtr;
4466 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
4467 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4468 if (rcStrict != VINF_SUCCESS)
4469 return rcStrict;
4470
4471 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4472 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
4473 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
4474 {
4475 paRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4476 paRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4477 paRegs[i].au16[4] = pCtx->fpu.aRegs[i].au16[4];
4478 }
4479
4480 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4481 if (rcStrict != VINF_SUCCESS)
4482 return rcStrict;
4483
4484 /*
4485 * Re-initialize the FPU.
4486 */
4487 pCtx->fpu.FCW = 0x37f;
4488 pCtx->fpu.FSW = 0;
4489 pCtx->fpu.FTW = 0xffff; /* 11 - empty */
4490 pCtx->fpu.FPUDP = 0;
4491 pCtx->fpu.DS = 0;
4492 pCtx->fpu.Rsrvd2= 0;
4493 pCtx->fpu.FPUIP = 0;
4494 pCtx->fpu.CS = 0;
4495 pCtx->fpu.Rsrvd1= 0;
4496 pCtx->fpu.FOP = 0;
4497
4498
4499 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4500 iemRegAddToRip(pIemCpu, cbInstr);
4501 return VINF_SUCCESS;
4502}
4503
4504
4505
4506/**
4507 * Implements 'FLDENV'.
4508 *
4509 * @param enmEffOpSize The operand size (only REX.W really matters).
4510 * @param iEffSeg The effective segment register for @a GCPtrEff.
4511 * @param GCPtrEffSrc The address of the image.
4512 */
4513IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4514{
4515 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4516 RTCPTRUNION uPtr;
4517 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4518 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4519 if (rcStrict != VINF_SUCCESS)
4520 return rcStrict;
4521
4522 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4523
4524 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4525 if (rcStrict != VINF_SUCCESS)
4526 return rcStrict;
4527
4528 iemRegAddToRip(pIemCpu, cbInstr);
4529 return VINF_SUCCESS;
4530}
4531
4532
4533/**
4534 * Implements 'FRSTOR'.
4535 *
4536 * @param GCPtrEffSrc The address of the image.
4537 * @param enmEffOpSize The operand size.
4538 */
4539IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4540{
4541 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4542 RTCPTRUNION uPtr;
4543 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
4544 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4545 if (rcStrict != VINF_SUCCESS)
4546 return rcStrict;
4547
4548 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4549 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
4550 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
4551 {
4552 pCtx->fpu.aRegs[i].au32[0] = paRegs[i].au32[0];
4553 pCtx->fpu.aRegs[i].au32[1] = paRegs[i].au32[1];
4554 pCtx->fpu.aRegs[i].au32[2] = paRegs[i].au16[4];
4555 pCtx->fpu.aRegs[i].au32[3] = 0;
4556 }
4557
4558 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4559 if (rcStrict != VINF_SUCCESS)
4560 return rcStrict;
4561
4562 iemRegAddToRip(pIemCpu, cbInstr);
4563 return VINF_SUCCESS;
4564}
4565
4566
4567/**
4568 * Implements 'FLDCW'.
4569 *
4570 * @param u16Fcw The new FCW.
4571 */
4572IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4573{
4574 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4575
4576 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4577 /** @todo Testcase: Try see what happens when trying to set undefined bits
4578 * (other than 6 and 7). Currently ignoring them. */
4579 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4580 * according to FSW. (This is was is currently implemented.) */
4581 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4582 iemFpuRecalcExceptionStatus(pCtx);
4583
4584 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4585 iemRegAddToRip(pIemCpu, cbInstr);
4586 return VINF_SUCCESS;
4587}
4588
4589
4590
4591/**
4592 * Implements the underflow case of fxch.
4593 *
4594 * @param iStReg The other stack register.
4595 */
4596IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4597{
4598 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4599
4600 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4601 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4602 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4603
4604 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4605 * registers are read as QNaN and then exchanged. This could be
4606 * wrong... */
4607 if (pCtx->fpu.FCW & X86_FCW_IM)
4608 {
4609 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4610 {
4611 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4612 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4613 else
4614 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4615 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4616 }
4617 else
4618 {
4619 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4620 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4621 }
4622 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4623 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4624 }
4625 else
4626 {
4627 /* raise underflow exception, don't change anything. */
4628 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
4629 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4630 }
4631 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4632
4633 iemRegAddToRip(pIemCpu, cbInstr);
4634 return VINF_SUCCESS;
4635}
4636
4637
4638/**
4639 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
4640 *
4641 * @param cToAdd 1 or 7.
4642 */
4643IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
4644{
4645 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4646 Assert(iStReg < 8);
4647
4648 /*
4649 * Raise exceptions.
4650 */
4651 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4652 return iemRaiseDeviceNotAvailable(pIemCpu);
4653 uint16_t u16Fsw = pCtx->fpu.FSW;
4654 if (u16Fsw & X86_FSW_ES)
4655 return iemRaiseMathFault(pIemCpu);
4656
4657 /*
4658 * Check if any of the register accesses causes #SF + #IA.
4659 */
4660 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
4661 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4662 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
4663 {
4664 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
4665 pCtx->fpu.FSW &= ~X86_FSW_C1;
4666 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
4667 if ( !(u16Fsw & X86_FSW_IE)
4668 || (pCtx->fpu.FCW & X86_FCW_IM) )
4669 {
4670 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4671 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4672 }
4673 }
4674 else if (pCtx->fpu.FCW & X86_FCW_IM)
4675 {
4676 /* Masked underflow. */
4677 pCtx->fpu.FSW &= ~X86_FSW_C1;
4678 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4679 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4680 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
4681 }
4682 else
4683 {
4684 /* Raise underflow - don't touch EFLAGS or TOP. */
4685 pCtx->fpu.FSW &= ~X86_FSW_C1;
4686 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4687 fPop = false;
4688 }
4689
4690 /*
4691 * Pop if necessary.
4692 */
4693 if (fPop)
4694 {
4695 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
4696 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
4697 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
4698 }
4699
4700 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4701 iemRegAddToRip(pIemCpu, cbInstr);
4702 return VINF_SUCCESS;
4703}
4704
4705/** @} */
4706
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette