VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 41906

Last change on this file since 41906 was 41906, checked in by vboxsync, 12 years ago

CPUM: Combined the visible and hidden selector register data into one structure. Preparing for lazily resolving+caching of hidden registers in raw-mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 145.8 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 41906 2012-06-24 15:44:03Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
42 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
43 }
44 return VINF_SUCCESS;
45}
46
47
48#if 0
49/**
50 * Calculates the parity bit.
51 *
52 * @returns true if the bit is set, false if not.
53 * @param u8Result The least significant byte of the result.
54 */
55static bool iemHlpCalcParityFlag(uint8_t u8Result)
56{
57 /*
58 * Parity is set if the number of bits in the least significant byte of
59 * the result is even.
60 */
61 uint8_t cBits;
62 cBits = u8Result & 1; /* 0 */
63 u8Result >>= 1;
64 cBits += u8Result & 1;
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1; /* 4 */
71 u8Result >>= 1;
72 cBits += u8Result & 1;
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 return !(cBits & 1);
78}
79#endif /* not used */
80
81
82/**
83 * Updates the specified flags according to a 8-bit result.
84 *
85 * @param pIemCpu The.
86 * @param u8Result The result to set the flags according to.
87 * @param fToUpdate The flags to update.
88 * @param fUndefined The flags that are specified as undefined.
89 */
90static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
91{
92 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
93
94 uint32_t fEFlags = pCtx->eflags.u;
95 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
96 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
97 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
98}
99
100
101/**
102 * Loads a NULL data selector into a selector register, both the hidden and
103 * visible parts, in protected mode.
104 *
105 * @param pSReg Pointer to the segment register.
106 */
107static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg)
108{
109 /** @todo Testcase: write a testcase checking what happends when loading a NULL
110 * data selector in protected mode. */
111 pSReg->Sel = 0;
112 pSReg->ValidSel = 0;
113 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
114 pSReg->u64Base = 0;
115 pSReg->u32Limit = 0;
116 pSReg->Attr.u = 0;
117}
118
119
120/**
121 * Helper used by iret.
122 *
123 * @param uCpl The new CPL.
124 * @param pSReg Pointer to the segment register.
125 */
126static void iemHlpAdjustSelectorForNewCpl(uint8_t uCpl, PCPUMSELREG pSReg)
127{
128 if ( uCpl > pSReg->Attr.n.u2Dpl
129 && pSReg->Attr.n.u1DescType /* code or data, not system */
130 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
131 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
132 iemHlpLoadNullDataSelectorProt(pSReg);
133}
134
135/** @} */
136
137/** @name C Implementations
138 * @{
139 */
140
141/**
142 * Implements a 16-bit popa.
143 */
144IEM_CIMPL_DEF_0(iemCImpl_popa_16)
145{
146 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
147 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
148 RTGCPTR GCPtrLast = GCPtrStart + 15;
149 VBOXSTRICTRC rcStrict;
150
151 /*
152 * The docs are a bit hard to comprehend here, but it looks like we wrap
153 * around in real mode as long as none of the individual "popa" crosses the
154 * end of the stack segment. In protected mode we check the whole access
155 * in one go. For efficiency, only do the word-by-word thing if we're in
156 * danger of wrapping around.
157 */
158 /** @todo do popa boundary / wrap-around checks. */
159 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
160 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
161 {
162 /* word-by-word */
163 RTUINT64U TmpRsp;
164 TmpRsp.u = pCtx->rsp;
165 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
166 if (rcStrict == VINF_SUCCESS)
167 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
168 if (rcStrict == VINF_SUCCESS)
169 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
170 if (rcStrict == VINF_SUCCESS)
171 {
172 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
173 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
174 }
175 if (rcStrict == VINF_SUCCESS)
176 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
177 if (rcStrict == VINF_SUCCESS)
178 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
179 if (rcStrict == VINF_SUCCESS)
180 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
181 if (rcStrict == VINF_SUCCESS)
182 {
183 pCtx->rsp = TmpRsp.u;
184 iemRegAddToRip(pIemCpu, cbInstr);
185 }
186 }
187 else
188 {
189 uint16_t const *pa16Mem = NULL;
190 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
191 if (rcStrict == VINF_SUCCESS)
192 {
193 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
194 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
195 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
196 /* skip sp */
197 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
198 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
199 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
200 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
201 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
202 if (rcStrict == VINF_SUCCESS)
203 {
204 iemRegAddToRsp(pCtx, 16);
205 iemRegAddToRip(pIemCpu, cbInstr);
206 }
207 }
208 }
209 return rcStrict;
210}
211
212
213/**
214 * Implements a 32-bit popa.
215 */
216IEM_CIMPL_DEF_0(iemCImpl_popa_32)
217{
218 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
219 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
220 RTGCPTR GCPtrLast = GCPtrStart + 31;
221 VBOXSTRICTRC rcStrict;
222
223 /*
224 * The docs are a bit hard to comprehend here, but it looks like we wrap
225 * around in real mode as long as none of the individual "popa" crosses the
226 * end of the stack segment. In protected mode we check the whole access
227 * in one go. For efficiency, only do the word-by-word thing if we're in
228 * danger of wrapping around.
229 */
230 /** @todo do popa boundary / wrap-around checks. */
231 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
232 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
233 {
234 /* word-by-word */
235 RTUINT64U TmpRsp;
236 TmpRsp.u = pCtx->rsp;
237 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
238 if (rcStrict == VINF_SUCCESS)
239 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
240 if (rcStrict == VINF_SUCCESS)
241 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
242 if (rcStrict == VINF_SUCCESS)
243 {
244 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
245 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
246 }
247 if (rcStrict == VINF_SUCCESS)
248 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
249 if (rcStrict == VINF_SUCCESS)
250 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
251 if (rcStrict == VINF_SUCCESS)
252 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
253 if (rcStrict == VINF_SUCCESS)
254 {
255#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
256 pCtx->rdi &= UINT32_MAX;
257 pCtx->rsi &= UINT32_MAX;
258 pCtx->rbp &= UINT32_MAX;
259 pCtx->rbx &= UINT32_MAX;
260 pCtx->rdx &= UINT32_MAX;
261 pCtx->rcx &= UINT32_MAX;
262 pCtx->rax &= UINT32_MAX;
263#endif
264 pCtx->rsp = TmpRsp.u;
265 iemRegAddToRip(pIemCpu, cbInstr);
266 }
267 }
268 else
269 {
270 uint32_t const *pa32Mem;
271 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
272 if (rcStrict == VINF_SUCCESS)
273 {
274 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
275 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
276 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
277 /* skip esp */
278 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
279 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
280 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
281 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
282 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
283 if (rcStrict == VINF_SUCCESS)
284 {
285 iemRegAddToRsp(pCtx, 32);
286 iemRegAddToRip(pIemCpu, cbInstr);
287 }
288 }
289 }
290 return rcStrict;
291}
292
293
294/**
295 * Implements a 16-bit pusha.
296 */
297IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
298{
299 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
300 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
301 RTGCPTR GCPtrBottom = GCPtrTop - 15;
302 VBOXSTRICTRC rcStrict;
303
304 /*
305 * The docs are a bit hard to comprehend here, but it looks like we wrap
306 * around in real mode as long as none of the individual "pushd" crosses the
307 * end of the stack segment. In protected mode we check the whole access
308 * in one go. For efficiency, only do the word-by-word thing if we're in
309 * danger of wrapping around.
310 */
311 /** @todo do pusha boundary / wrap-around checks. */
312 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
313 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
314 {
315 /* word-by-word */
316 RTUINT64U TmpRsp;
317 TmpRsp.u = pCtx->rsp;
318 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
319 if (rcStrict == VINF_SUCCESS)
320 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
321 if (rcStrict == VINF_SUCCESS)
322 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
323 if (rcStrict == VINF_SUCCESS)
324 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
325 if (rcStrict == VINF_SUCCESS)
326 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
329 if (rcStrict == VINF_SUCCESS)
330 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
331 if (rcStrict == VINF_SUCCESS)
332 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
333 if (rcStrict == VINF_SUCCESS)
334 {
335 pCtx->rsp = TmpRsp.u;
336 iemRegAddToRip(pIemCpu, cbInstr);
337 }
338 }
339 else
340 {
341 GCPtrBottom--;
342 uint16_t *pa16Mem = NULL;
343 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
344 if (rcStrict == VINF_SUCCESS)
345 {
346 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
347 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
348 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
349 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
350 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
351 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
352 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
353 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
354 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
355 if (rcStrict == VINF_SUCCESS)
356 {
357 iemRegSubFromRsp(pCtx, 16);
358 iemRegAddToRip(pIemCpu, cbInstr);
359 }
360 }
361 }
362 return rcStrict;
363}
364
365
366/**
367 * Implements a 32-bit pusha.
368 */
369IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
370{
371 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
372 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
373 RTGCPTR GCPtrBottom = GCPtrTop - 31;
374 VBOXSTRICTRC rcStrict;
375
376 /*
377 * The docs are a bit hard to comprehend here, but it looks like we wrap
378 * around in real mode as long as none of the individual "pusha" crosses the
379 * end of the stack segment. In protected mode we check the whole access
380 * in one go. For efficiency, only do the word-by-word thing if we're in
381 * danger of wrapping around.
382 */
383 /** @todo do pusha boundary / wrap-around checks. */
384 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
385 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
386 {
387 /* word-by-word */
388 RTUINT64U TmpRsp;
389 TmpRsp.u = pCtx->rsp;
390 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
391 if (rcStrict == VINF_SUCCESS)
392 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
393 if (rcStrict == VINF_SUCCESS)
394 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
395 if (rcStrict == VINF_SUCCESS)
396 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
397 if (rcStrict == VINF_SUCCESS)
398 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
399 if (rcStrict == VINF_SUCCESS)
400 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
401 if (rcStrict == VINF_SUCCESS)
402 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
403 if (rcStrict == VINF_SUCCESS)
404 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
405 if (rcStrict == VINF_SUCCESS)
406 {
407 pCtx->rsp = TmpRsp.u;
408 iemRegAddToRip(pIemCpu, cbInstr);
409 }
410 }
411 else
412 {
413 GCPtrBottom--;
414 uint32_t *pa32Mem;
415 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
416 if (rcStrict == VINF_SUCCESS)
417 {
418 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
419 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
420 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
421 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
422 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
423 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
424 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
425 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
426 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
427 if (rcStrict == VINF_SUCCESS)
428 {
429 iemRegSubFromRsp(pCtx, 32);
430 iemRegAddToRip(pIemCpu, cbInstr);
431 }
432 }
433 }
434 return rcStrict;
435}
436
437
438/**
439 * Implements pushf.
440 *
441 *
442 * @param enmEffOpSize The effective operand size.
443 */
444IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
445{
446 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
447
448 /*
449 * If we're in V8086 mode some care is required (which is why we're in
450 * doing this in a C implementation).
451 */
452 uint32_t fEfl = pCtx->eflags.u;
453 if ( (fEfl & X86_EFL_VM)
454 && X86_EFL_GET_IOPL(fEfl) != 3 )
455 {
456 Assert(pCtx->cr0 & X86_CR0_PE);
457 if ( enmEffOpSize != IEMMODE_16BIT
458 || !(pCtx->cr4 & X86_CR4_VME))
459 return iemRaiseGeneralProtectionFault0(pIemCpu);
460 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
461 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
462 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
463 }
464
465 /*
466 * Ok, clear RF and VM and push the flags.
467 */
468 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
469
470 VBOXSTRICTRC rcStrict;
471 switch (enmEffOpSize)
472 {
473 case IEMMODE_16BIT:
474 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
475 break;
476 case IEMMODE_32BIT:
477 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
478 break;
479 case IEMMODE_64BIT:
480 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
481 break;
482 IEM_NOT_REACHED_DEFAULT_CASE_RET();
483 }
484 if (rcStrict != VINF_SUCCESS)
485 return rcStrict;
486
487 iemRegAddToRip(pIemCpu, cbInstr);
488 return VINF_SUCCESS;
489}
490
491
492/**
493 * Implements popf.
494 *
495 * @param enmEffOpSize The effective operand size.
496 */
497IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
498{
499 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
500 uint32_t const fEflOld = pCtx->eflags.u;
501 VBOXSTRICTRC rcStrict;
502 uint32_t fEflNew;
503
504 /*
505 * V8086 is special as usual.
506 */
507 if (fEflOld & X86_EFL_VM)
508 {
509 /*
510 * Almost anything goes if IOPL is 3.
511 */
512 if (X86_EFL_GET_IOPL(fEflOld) == 3)
513 {
514 switch (enmEffOpSize)
515 {
516 case IEMMODE_16BIT:
517 {
518 uint16_t u16Value;
519 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
520 if (rcStrict != VINF_SUCCESS)
521 return rcStrict;
522 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
523 break;
524 }
525 case IEMMODE_32BIT:
526 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
527 if (rcStrict != VINF_SUCCESS)
528 return rcStrict;
529 break;
530 IEM_NOT_REACHED_DEFAULT_CASE_RET();
531 }
532
533 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
534 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
535 }
536 /*
537 * Interrupt flag virtualization with CR4.VME=1.
538 */
539 else if ( enmEffOpSize == IEMMODE_16BIT
540 && (pCtx->cr4 & X86_CR4_VME) )
541 {
542 uint16_t u16Value;
543 RTUINT64U TmpRsp;
544 TmpRsp.u = pCtx->rsp;
545 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
546 if (rcStrict != VINF_SUCCESS)
547 return rcStrict;
548
549 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
550 * or before? */
551 if ( ( (u16Value & X86_EFL_IF)
552 && (fEflOld & X86_EFL_VIP))
553 || (u16Value & X86_EFL_TF) )
554 return iemRaiseGeneralProtectionFault0(pIemCpu);
555
556 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
557 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
558 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
559 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
560
561 pCtx->rsp = TmpRsp.u;
562 }
563 else
564 return iemRaiseGeneralProtectionFault0(pIemCpu);
565
566 }
567 /*
568 * Not in V8086 mode.
569 */
570 else
571 {
572 /* Pop the flags. */
573 switch (enmEffOpSize)
574 {
575 case IEMMODE_16BIT:
576 {
577 uint16_t u16Value;
578 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
579 if (rcStrict != VINF_SUCCESS)
580 return rcStrict;
581 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
582 break;
583 }
584 case IEMMODE_32BIT:
585 case IEMMODE_64BIT:
586 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
587 if (rcStrict != VINF_SUCCESS)
588 return rcStrict;
589 break;
590 IEM_NOT_REACHED_DEFAULT_CASE_RET();
591 }
592
593 /* Merge them with the current flags. */
594 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
595 || pIemCpu->uCpl == 0)
596 {
597 fEflNew &= X86_EFL_POPF_BITS;
598 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
599 }
600 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
601 {
602 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
603 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
604 }
605 else
606 {
607 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
608 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
609 }
610 }
611
612 /*
613 * Commit the flags.
614 */
615 Assert(fEflNew & RT_BIT_32(1));
616 pCtx->eflags.u = fEflNew;
617 iemRegAddToRip(pIemCpu, cbInstr);
618
619 return VINF_SUCCESS;
620}
621
622
623/**
624 * Implements an indirect call.
625 *
626 * @param uNewPC The new program counter (RIP) value (loaded from the
627 * operand).
628 * @param enmEffOpSize The effective operand size.
629 */
630IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
631{
632 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
633 uint16_t uOldPC = pCtx->ip + cbInstr;
634 if (uNewPC > pCtx->cs.u32Limit)
635 return iemRaiseGeneralProtectionFault0(pIemCpu);
636
637 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
638 if (rcStrict != VINF_SUCCESS)
639 return rcStrict;
640
641 pCtx->rip = uNewPC;
642 return VINF_SUCCESS;
643
644}
645
646
647/**
648 * Implements a 16-bit relative call.
649 *
650 * @param offDisp The displacment offset.
651 */
652IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
653{
654 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
655 uint16_t uOldPC = pCtx->ip + cbInstr;
656 uint16_t uNewPC = uOldPC + offDisp;
657 if (uNewPC > pCtx->cs.u32Limit)
658 return iemRaiseGeneralProtectionFault0(pIemCpu);
659
660 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
661 if (rcStrict != VINF_SUCCESS)
662 return rcStrict;
663
664 pCtx->rip = uNewPC;
665 return VINF_SUCCESS;
666}
667
668
669/**
670 * Implements a 32-bit indirect call.
671 *
672 * @param uNewPC The new program counter (RIP) value (loaded from the
673 * operand).
674 * @param enmEffOpSize The effective operand size.
675 */
676IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
677{
678 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
679 uint32_t uOldPC = pCtx->eip + cbInstr;
680 if (uNewPC > pCtx->cs.u32Limit)
681 return iemRaiseGeneralProtectionFault0(pIemCpu);
682
683 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
684 if (rcStrict != VINF_SUCCESS)
685 return rcStrict;
686
687 pCtx->rip = uNewPC;
688 return VINF_SUCCESS;
689
690}
691
692
693/**
694 * Implements a 32-bit relative call.
695 *
696 * @param offDisp The displacment offset.
697 */
698IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
699{
700 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
701 uint32_t uOldPC = pCtx->eip + cbInstr;
702 uint32_t uNewPC = uOldPC + offDisp;
703 if (uNewPC > pCtx->cs.u32Limit)
704 return iemRaiseGeneralProtectionFault0(pIemCpu);
705
706 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
707 if (rcStrict != VINF_SUCCESS)
708 return rcStrict;
709
710 pCtx->rip = uNewPC;
711 return VINF_SUCCESS;
712}
713
714
715/**
716 * Implements a 64-bit indirect call.
717 *
718 * @param uNewPC The new program counter (RIP) value (loaded from the
719 * operand).
720 * @param enmEffOpSize The effective operand size.
721 */
722IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
723{
724 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
725 uint64_t uOldPC = pCtx->rip + cbInstr;
726 if (!IEM_IS_CANONICAL(uNewPC))
727 return iemRaiseGeneralProtectionFault0(pIemCpu);
728
729 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
730 if (rcStrict != VINF_SUCCESS)
731 return rcStrict;
732
733 pCtx->rip = uNewPC;
734 return VINF_SUCCESS;
735
736}
737
738
739/**
740 * Implements a 64-bit relative call.
741 *
742 * @param offDisp The displacment offset.
743 */
744IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
745{
746 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
747 uint64_t uOldPC = pCtx->rip + cbInstr;
748 uint64_t uNewPC = uOldPC + offDisp;
749 if (!IEM_IS_CANONICAL(uNewPC))
750 return iemRaiseNotCanonical(pIemCpu);
751
752 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
753 if (rcStrict != VINF_SUCCESS)
754 return rcStrict;
755
756 pCtx->rip = uNewPC;
757 return VINF_SUCCESS;
758}
759
760
761/**
762 * Implements far jumps and calls thru task segments (TSS).
763 *
764 * @param uSel The selector.
765 * @param enmBranch The kind of branching we're performing.
766 * @param enmEffOpSize The effective operand size.
767 * @param pDesc The descriptor corrsponding to @a uSel. The type is
768 * call gate.
769 */
770IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
771{
772 /* Call various functions to do the work. */
773 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
774}
775
776
777/**
778 * Implements far jumps and calls thru task gates.
779 *
780 * @param uSel The selector.
781 * @param enmBranch The kind of branching we're performing.
782 * @param enmEffOpSize The effective operand size.
783 * @param pDesc The descriptor corrsponding to @a uSel. The type is
784 * call gate.
785 */
786IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
787{
788 /* Call various functions to do the work. */
789 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
790}
791
792
793/**
794 * Implements far jumps and calls thru call gates.
795 *
796 * @param uSel The selector.
797 * @param enmBranch The kind of branching we're performing.
798 * @param enmEffOpSize The effective operand size.
799 * @param pDesc The descriptor corrsponding to @a uSel. The type is
800 * call gate.
801 */
802IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
803{
804 /* Call various functions to do the work. */
805 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
806}
807
808
809/**
810 * Implements far jumps and calls thru system selectors.
811 *
812 * @param uSel The selector.
813 * @param enmBranch The kind of branching we're performing.
814 * @param enmEffOpSize The effective operand size.
815 * @param pDesc The descriptor corrsponding to @a uSel.
816 */
817IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
818{
819 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
820 Assert((uSel & (X86_SEL_MASK | X86_SEL_LDT)));
821
822 if (IEM_IS_LONG_MODE(pIemCpu))
823 switch (pDesc->Legacy.Gen.u4Type)
824 {
825 case AMD64_SEL_TYPE_SYS_CALL_GATE:
826 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
827
828 default:
829 case AMD64_SEL_TYPE_SYS_LDT:
830 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
831 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
832 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
833 case AMD64_SEL_TYPE_SYS_INT_GATE:
834 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
835 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
836
837 }
838
839 switch (pDesc->Legacy.Gen.u4Type)
840 {
841 case X86_SEL_TYPE_SYS_286_CALL_GATE:
842 case X86_SEL_TYPE_SYS_386_CALL_GATE:
843 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
844
845 case X86_SEL_TYPE_SYS_TASK_GATE:
846 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
847
848 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
849 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
850 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
851
852 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
853 Log(("branch %04x -> busy 286 TSS\n", uSel));
854 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
855
856 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
857 Log(("branch %04x -> busy 386 TSS\n", uSel));
858 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
859
860 default:
861 case X86_SEL_TYPE_SYS_LDT:
862 case X86_SEL_TYPE_SYS_286_INT_GATE:
863 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
864 case X86_SEL_TYPE_SYS_386_INT_GATE:
865 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
866 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
867 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
868 }
869}
870
871
872/**
873 * Implements far jumps.
874 *
875 * @param uSel The selector.
876 * @param offSeg The segment offset.
877 * @param enmEffOpSize The effective operand size.
878 */
879IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
880{
881 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
882 NOREF(cbInstr);
883 Assert(offSeg <= UINT32_MAX);
884
885 /*
886 * Real mode and V8086 mode are easy. The only snag seems to be that
887 * CS.limit doesn't change and the limit check is done against the current
888 * limit.
889 */
890 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
891 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
892 {
893 if (offSeg > pCtx->cs.u32Limit)
894 return iemRaiseGeneralProtectionFault0(pIemCpu);
895
896 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
897 pCtx->rip = offSeg;
898 else
899 pCtx->rip = offSeg & UINT16_MAX;
900 pCtx->cs.Sel = uSel;
901 pCtx->cs.ValidSel = uSel;
902 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
903 pCtx->cs.u64Base = (uint32_t)uSel << 4;
904 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
905 * PE. Check with VT-x and AMD-V. */
906#ifdef IEM_VERIFICATION_MODE
907 pCtx->cs.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
908#endif
909 return VINF_SUCCESS;
910 }
911
912 /*
913 * Protected mode. Need to parse the specified descriptor...
914 */
915 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
916 {
917 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
918 return iemRaiseGeneralProtectionFault0(pIemCpu);
919 }
920
921 /* Fetch the descriptor. */
922 IEMSELDESC Desc;
923 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
924 if (rcStrict != VINF_SUCCESS)
925 return rcStrict;
926
927 /* Is it there? */
928 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
929 {
930 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
931 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
932 }
933
934 /*
935 * Deal with it according to its type. We do the standard code selectors
936 * here and dispatch the system selectors to worker functions.
937 */
938 if (!Desc.Legacy.Gen.u1DescType)
939 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
940
941 /* Only code segments. */
942 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
943 {
944 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
945 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
946 }
947
948 /* L vs D. */
949 if ( Desc.Legacy.Gen.u1Long
950 && Desc.Legacy.Gen.u1DefBig
951 && IEM_IS_LONG_MODE(pIemCpu))
952 {
953 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
954 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
955 }
956
957 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
958 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
959 {
960 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
961 {
962 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
963 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
964 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
965 }
966 }
967 else
968 {
969 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
970 {
971 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
972 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
973 }
974 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
975 {
976 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
977 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
978 }
979 }
980
981 /* Chop the high bits if 16-bit (Intel says so). */
982 if (enmEffOpSize == IEMMODE_16BIT)
983 offSeg &= UINT16_MAX;
984
985 /* Limit check. (Should alternatively check for non-canonical addresses
986 here, but that is ruled out by offSeg being 32-bit, right?) */
987 uint64_t u64Base;
988 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
989 if (Desc.Legacy.Gen.u1Granularity)
990 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
991 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
992 u64Base = 0;
993 else
994 {
995 if (offSeg > cbLimit)
996 {
997 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
998 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
999 }
1000 u64Base = X86DESC_BASE(Desc.Legacy);
1001 }
1002
1003 /*
1004 * Ok, everything checked out fine. Now set the accessed bit before
1005 * committing the result into CS, CSHID and RIP.
1006 */
1007 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1008 {
1009 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1010 if (rcStrict != VINF_SUCCESS)
1011 return rcStrict;
1012#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1013 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1014#endif
1015 }
1016
1017 /* commit */
1018 pCtx->rip = offSeg;
1019 pCtx->cs.Sel = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1020 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1021 pCtx->cs.ValidSel = pCtx->cs.Sel;
1022 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1023 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1024 pCtx->cs.u32Limit = cbLimit;
1025 pCtx->cs.u64Base = u64Base;
1026 /** @todo check if the hidden bits are loaded correctly for 64-bit
1027 * mode. */
1028 return VINF_SUCCESS;
1029}
1030
1031
1032/**
1033 * Implements far calls.
1034 *
1035 * This very similar to iemCImpl_FarJmp.
1036 *
1037 * @param uSel The selector.
1038 * @param offSeg The segment offset.
1039 * @param enmEffOpSize The operand size (in case we need it).
1040 */
1041IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1042{
1043 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1044 VBOXSTRICTRC rcStrict;
1045 uint64_t uNewRsp;
1046 RTPTRUNION uPtrRet;
1047
1048 /*
1049 * Real mode and V8086 mode are easy. The only snag seems to be that
1050 * CS.limit doesn't change and the limit check is done against the current
1051 * limit.
1052 */
1053 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1054 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1055 {
1056 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1057
1058 /* Check stack first - may #SS(0). */
1059 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1060 &uPtrRet.pv, &uNewRsp);
1061 if (rcStrict != VINF_SUCCESS)
1062 return rcStrict;
1063
1064 /* Check the target address range. */
1065 if (offSeg > UINT32_MAX)
1066 return iemRaiseGeneralProtectionFault0(pIemCpu);
1067
1068 /* Everything is fine, push the return address. */
1069 if (enmEffOpSize == IEMMODE_16BIT)
1070 {
1071 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1072 uPtrRet.pu16[1] = pCtx->cs.Sel;
1073 }
1074 else
1075 {
1076 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1077 uPtrRet.pu16[3] = pCtx->cs.Sel;
1078 }
1079 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1080 if (rcStrict != VINF_SUCCESS)
1081 return rcStrict;
1082
1083 /* Branch. */
1084 pCtx->rip = offSeg;
1085 pCtx->cs.Sel = uSel;
1086 pCtx->cs.ValidSel = uSel;
1087 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1088 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1089 /** @todo Does REM reset the accessed bit here too? (See on jmp far16
1090 * after disabling PE.) Check with VT-x and AMD-V. */
1091#ifdef IEM_VERIFICATION_MODE
1092 pCtx->cs.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
1093#endif
1094 return VINF_SUCCESS;
1095 }
1096
1097 /*
1098 * Protected mode. Need to parse the specified descriptor...
1099 */
1100 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
1101 {
1102 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1103 return iemRaiseGeneralProtectionFault0(pIemCpu);
1104 }
1105
1106 /* Fetch the descriptor. */
1107 IEMSELDESC Desc;
1108 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1109 if (rcStrict != VINF_SUCCESS)
1110 return rcStrict;
1111
1112 /*
1113 * Deal with it according to its type. We do the standard code selectors
1114 * here and dispatch the system selectors to worker functions.
1115 */
1116 if (!Desc.Legacy.Gen.u1DescType)
1117 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1118
1119 /* Only code segments. */
1120 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1121 {
1122 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1123 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1124 }
1125
1126 /* L vs D. */
1127 if ( Desc.Legacy.Gen.u1Long
1128 && Desc.Legacy.Gen.u1DefBig
1129 && IEM_IS_LONG_MODE(pIemCpu))
1130 {
1131 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1132 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1133 }
1134
1135 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1136 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1137 {
1138 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1139 {
1140 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1141 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1142 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1143 }
1144 }
1145 else
1146 {
1147 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1148 {
1149 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1150 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1151 }
1152 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1153 {
1154 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1155 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1156 }
1157 }
1158
1159 /* Is it there? */
1160 if (!Desc.Legacy.Gen.u1Present)
1161 {
1162 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1163 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1164 }
1165
1166 /* Check stack first - may #SS(0). */
1167 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1168 * 16-bit code cause a two or four byte CS to be pushed? */
1169 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1170 enmEffOpSize == IEMMODE_64BIT ? 8+8
1171 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1172 &uPtrRet.pv, &uNewRsp);
1173 if (rcStrict != VINF_SUCCESS)
1174 return rcStrict;
1175
1176 /* Chop the high bits if 16-bit (Intel says so). */
1177 if (enmEffOpSize == IEMMODE_16BIT)
1178 offSeg &= UINT16_MAX;
1179
1180 /* Limit / canonical check. */
1181 uint64_t u64Base;
1182 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
1183 if (Desc.Legacy.Gen.u1Granularity)
1184 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1185
1186 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1187 {
1188 if (!IEM_IS_CANONICAL(offSeg))
1189 {
1190 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1191 return iemRaiseNotCanonical(pIemCpu);
1192 }
1193 u64Base = 0;
1194 }
1195 else
1196 {
1197 if (offSeg > cbLimit)
1198 {
1199 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1200 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1201 }
1202 u64Base = X86DESC_BASE(Desc.Legacy);
1203 }
1204
1205 /*
1206 * Now set the accessed bit before
1207 * writing the return address to the stack and committing the result into
1208 * CS, CSHID and RIP.
1209 */
1210 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1211 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1212 {
1213 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1214 if (rcStrict != VINF_SUCCESS)
1215 return rcStrict;
1216#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1217 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1218#endif
1219 }
1220
1221 /* stack */
1222 if (enmEffOpSize == IEMMODE_16BIT)
1223 {
1224 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1225 uPtrRet.pu16[1] = pCtx->cs.Sel;
1226 }
1227 else if (enmEffOpSize == IEMMODE_32BIT)
1228 {
1229 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1230 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1231 }
1232 else
1233 {
1234 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1235 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1236 }
1237 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1238 if (rcStrict != VINF_SUCCESS)
1239 return rcStrict;
1240
1241 /* commit */
1242 pCtx->rip = offSeg;
1243 pCtx->cs.Sel = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1244 pCtx->cs.Sel |= pIemCpu->uCpl;
1245 pCtx->cs.ValidSel = pCtx->cs.Sel;
1246 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1247 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1248 pCtx->cs.u32Limit = cbLimit;
1249 pCtx->cs.u64Base = u64Base;
1250 /** @todo check if the hidden bits are loaded correctly for 64-bit
1251 * mode. */
1252 return VINF_SUCCESS;
1253}
1254
1255
1256/**
1257 * Implements retf.
1258 *
1259 * @param enmEffOpSize The effective operand size.
1260 * @param cbPop The amount of arguments to pop from the stack
1261 * (bytes).
1262 */
1263IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1264{
1265 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1266 VBOXSTRICTRC rcStrict;
1267 RTCPTRUNION uPtrFrame;
1268 uint64_t uNewRsp;
1269 uint64_t uNewRip;
1270 uint16_t uNewCs;
1271 NOREF(cbInstr);
1272
1273 /*
1274 * Read the stack values first.
1275 */
1276 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1277 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1278 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1279 if (rcStrict != VINF_SUCCESS)
1280 return rcStrict;
1281 if (enmEffOpSize == IEMMODE_16BIT)
1282 {
1283 uNewRip = uPtrFrame.pu16[0];
1284 uNewCs = uPtrFrame.pu16[1];
1285 }
1286 else if (enmEffOpSize == IEMMODE_32BIT)
1287 {
1288 uNewRip = uPtrFrame.pu32[0];
1289 uNewCs = uPtrFrame.pu16[2];
1290 }
1291 else
1292 {
1293 uNewRip = uPtrFrame.pu64[0];
1294 uNewCs = uPtrFrame.pu16[4];
1295 }
1296
1297 /*
1298 * Real mode and V8086 mode are easy.
1299 */
1300 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1301 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1302 {
1303 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1304 /** @todo check how this is supposed to work if sp=0xfffe. */
1305
1306 /* Check the limit of the new EIP. */
1307 /** @todo Intel pseudo code only does the limit check for 16-bit
1308 * operands, AMD does not make any distinction. What is right? */
1309 if (uNewRip > pCtx->cs.u32Limit)
1310 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1311
1312 /* commit the operation. */
1313 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1314 if (rcStrict != VINF_SUCCESS)
1315 return rcStrict;
1316 pCtx->rip = uNewRip;
1317 pCtx->cs.Sel = uNewCs;
1318 pCtx->cs.ValidSel = uNewCs;
1319 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1320 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1321 /** @todo do we load attribs and limit as well? */
1322 if (cbPop)
1323 iemRegAddToRsp(pCtx, cbPop);
1324 return VINF_SUCCESS;
1325 }
1326
1327 /*
1328 * Protected mode is complicated, of course.
1329 */
1330 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1331 {
1332 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1333 return iemRaiseGeneralProtectionFault0(pIemCpu);
1334 }
1335
1336 /* Fetch the descriptor. */
1337 IEMSELDESC DescCs;
1338 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1339 if (rcStrict != VINF_SUCCESS)
1340 return rcStrict;
1341
1342 /* Can only return to a code selector. */
1343 if ( !DescCs.Legacy.Gen.u1DescType
1344 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1345 {
1346 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1347 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1348 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1349 }
1350
1351 /* L vs D. */
1352 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1353 && DescCs.Legacy.Gen.u1DefBig
1354 && IEM_IS_LONG_MODE(pIemCpu))
1355 {
1356 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1357 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1358 }
1359
1360 /* DPL/RPL/CPL checks. */
1361 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1362 {
1363 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1364 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1365 }
1366
1367 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1368 {
1369 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1370 {
1371 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1372 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1373 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1374 }
1375 }
1376 else
1377 {
1378 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1379 {
1380 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1381 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1382 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1383 }
1384 }
1385
1386 /* Is it there? */
1387 if (!DescCs.Legacy.Gen.u1Present)
1388 {
1389 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1390 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1391 }
1392
1393 /*
1394 * Return to outer privilege? (We'll typically have entered via a call gate.)
1395 */
1396 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1397 {
1398 /* Read the return pointer, it comes before the parameters. */
1399 RTCPTRUNION uPtrStack;
1400 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1401 if (rcStrict != VINF_SUCCESS)
1402 return rcStrict;
1403 uint16_t uNewOuterSs;
1404 uint64_t uNewOuterRsp;
1405 if (enmEffOpSize == IEMMODE_16BIT)
1406 {
1407 uNewOuterRsp = uPtrFrame.pu16[0];
1408 uNewOuterSs = uPtrFrame.pu16[1];
1409 }
1410 else if (enmEffOpSize == IEMMODE_32BIT)
1411 {
1412 uNewOuterRsp = uPtrFrame.pu32[0];
1413 uNewOuterSs = uPtrFrame.pu16[2];
1414 }
1415 else
1416 {
1417 uNewOuterRsp = uPtrFrame.pu64[0];
1418 uNewOuterSs = uPtrFrame.pu16[4];
1419 }
1420
1421 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1422 and read the selector. */
1423 IEMSELDESC DescSs;
1424 if (!(uNewOuterSs & (X86_SEL_MASK | X86_SEL_LDT)))
1425 {
1426 if ( !DescCs.Legacy.Gen.u1Long
1427 || (uNewOuterSs & X86_SEL_RPL) == 3)
1428 {
1429 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1430 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1431 return iemRaiseGeneralProtectionFault0(pIemCpu);
1432 }
1433 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1434 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1435 }
1436 else
1437 {
1438 /* Fetch the descriptor for the new stack segment. */
1439 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1440 if (rcStrict != VINF_SUCCESS)
1441 return rcStrict;
1442 }
1443
1444 /* Check that RPL of stack and code selectors match. */
1445 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1446 {
1447 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1448 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1449 }
1450
1451 /* Must be a writable data segment. */
1452 if ( !DescSs.Legacy.Gen.u1DescType
1453 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1454 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1455 {
1456 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1457 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1458 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1459 }
1460
1461 /* L vs D. (Not mentioned by intel.) */
1462 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1463 && DescSs.Legacy.Gen.u1DefBig
1464 && IEM_IS_LONG_MODE(pIemCpu))
1465 {
1466 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1467 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1468 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1469 }
1470
1471 /* DPL/RPL/CPL checks. */
1472 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1473 {
1474 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1475 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1476 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1477 }
1478
1479 /* Is it there? */
1480 if (!DescSs.Legacy.Gen.u1Present)
1481 {
1482 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1483 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1484 }
1485
1486 /* Calc SS limit.*/
1487 uint32_t cbLimitSs = X86DESC_LIMIT(DescSs.Legacy);
1488 if (DescSs.Legacy.Gen.u1Granularity)
1489 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1490
1491
1492 /* Is RIP canonical or within CS.limit? */
1493 uint64_t u64Base;
1494 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
1495 if (DescCs.Legacy.Gen.u1Granularity)
1496 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1497
1498 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1499 {
1500 if (!IEM_IS_CANONICAL(uNewRip))
1501 {
1502 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1503 return iemRaiseNotCanonical(pIemCpu);
1504 }
1505 u64Base = 0;
1506 }
1507 else
1508 {
1509 if (uNewRip > cbLimitCs)
1510 {
1511 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1512 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1513 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1514 }
1515 u64Base = X86DESC_BASE(DescCs.Legacy);
1516 }
1517
1518 /*
1519 * Now set the accessed bit before
1520 * writing the return address to the stack and committing the result into
1521 * CS, CSHID and RIP.
1522 */
1523 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1524 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1525 {
1526 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1527 if (rcStrict != VINF_SUCCESS)
1528 return rcStrict;
1529#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1530 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1531#endif
1532 }
1533 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1534 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1535 {
1536 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1537 if (rcStrict != VINF_SUCCESS)
1538 return rcStrict;
1539#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1540 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1541#endif
1542 }
1543
1544 /* commit */
1545 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1546 if (rcStrict != VINF_SUCCESS)
1547 return rcStrict;
1548 if (enmEffOpSize == IEMMODE_16BIT)
1549 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1550 else
1551 pCtx->rip = uNewRip;
1552 pCtx->cs.Sel = uNewCs;
1553 pCtx->cs.ValidSel = uNewCs;
1554 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1555 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy);
1556 pCtx->cs.u32Limit = cbLimitCs;
1557 pCtx->cs.u64Base = u64Base;
1558 pCtx->rsp = uNewRsp;
1559 pCtx->ss.Sel = uNewOuterSs;
1560 pCtx->ss.ValidSel = uNewOuterSs;
1561 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1562 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(DescSs.Legacy);
1563 pCtx->ss.u32Limit = cbLimitSs;
1564 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1565 pCtx->ss.u64Base = 0;
1566 else
1567 pCtx->ss.u64Base = X86DESC_BASE(DescSs.Legacy);
1568
1569 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1570 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds);
1571 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es);
1572 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs);
1573 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs);
1574
1575 /** @todo check if the hidden bits are loaded correctly for 64-bit
1576 * mode. */
1577
1578 if (cbPop)
1579 iemRegAddToRsp(pCtx, cbPop);
1580
1581 /* Done! */
1582 }
1583 /*
1584 * Return to the same privilege level
1585 */
1586 else
1587 {
1588 /* Limit / canonical check. */
1589 uint64_t u64Base;
1590 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
1591 if (DescCs.Legacy.Gen.u1Granularity)
1592 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1593
1594 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1595 {
1596 if (!IEM_IS_CANONICAL(uNewRip))
1597 {
1598 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1599 return iemRaiseNotCanonical(pIemCpu);
1600 }
1601 u64Base = 0;
1602 }
1603 else
1604 {
1605 if (uNewRip > cbLimitCs)
1606 {
1607 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1608 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1609 }
1610 u64Base = X86DESC_BASE(DescCs.Legacy);
1611 }
1612
1613 /*
1614 * Now set the accessed bit before
1615 * writing the return address to the stack and committing the result into
1616 * CS, CSHID and RIP.
1617 */
1618 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1619 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1620 {
1621 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1622 if (rcStrict != VINF_SUCCESS)
1623 return rcStrict;
1624#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1625 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1626#endif
1627 }
1628
1629 /* commit */
1630 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1631 if (rcStrict != VINF_SUCCESS)
1632 return rcStrict;
1633 if (enmEffOpSize == IEMMODE_16BIT)
1634 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1635 else
1636 pCtx->rip = uNewRip;
1637 pCtx->cs.Sel = uNewCs;
1638 pCtx->cs.ValidSel = uNewCs;
1639 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1640 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy);
1641 pCtx->cs.u32Limit = cbLimitCs;
1642 pCtx->cs.u64Base = u64Base;
1643 /** @todo check if the hidden bits are loaded correctly for 64-bit
1644 * mode. */
1645 if (cbPop)
1646 iemRegAddToRsp(pCtx, cbPop);
1647 }
1648 return VINF_SUCCESS;
1649}
1650
1651
1652/**
1653 * Implements retn.
1654 *
1655 * We're doing this in C because of the \#GP that might be raised if the popped
1656 * program counter is out of bounds.
1657 *
1658 * @param enmEffOpSize The effective operand size.
1659 * @param cbPop The amount of arguments to pop from the stack
1660 * (bytes).
1661 */
1662IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1663{
1664 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1665 NOREF(cbInstr);
1666
1667 /* Fetch the RSP from the stack. */
1668 VBOXSTRICTRC rcStrict;
1669 RTUINT64U NewRip;
1670 RTUINT64U NewRsp;
1671 NewRsp.u = pCtx->rsp;
1672 switch (enmEffOpSize)
1673 {
1674 case IEMMODE_16BIT:
1675 NewRip.u = 0;
1676 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1677 break;
1678 case IEMMODE_32BIT:
1679 NewRip.u = 0;
1680 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1681 break;
1682 case IEMMODE_64BIT:
1683 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1684 break;
1685 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1686 }
1687 if (rcStrict != VINF_SUCCESS)
1688 return rcStrict;
1689
1690 /* Check the new RSP before loading it. */
1691 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1692 * of it. The canonical test is performed here and for call. */
1693 if (enmEffOpSize != IEMMODE_64BIT)
1694 {
1695 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
1696 {
1697 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
1698 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1699 }
1700 }
1701 else
1702 {
1703 if (!IEM_IS_CANONICAL(NewRip.u))
1704 {
1705 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1706 return iemRaiseNotCanonical(pIemCpu);
1707 }
1708 }
1709
1710 /* Commit it. */
1711 pCtx->rip = NewRip.u;
1712 pCtx->rsp = NewRsp.u;
1713 if (cbPop)
1714 iemRegAddToRsp(pCtx, cbPop);
1715
1716 return VINF_SUCCESS;
1717}
1718
1719
1720/**
1721 * Implements leave.
1722 *
1723 * We're doing this in C because messing with the stack registers is annoying
1724 * since they depends on SS attributes.
1725 *
1726 * @param enmEffOpSize The effective operand size.
1727 */
1728IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1729{
1730 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1731
1732 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1733 RTUINT64U NewRsp;
1734 if (pCtx->ss.Attr.n.u1Long)
1735 {
1736 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1737 NewRsp.u = pCtx->rsp;
1738 NewRsp.Words.w0 = pCtx->bp;
1739 }
1740 else if (pCtx->ss.Attr.n.u1DefBig)
1741 NewRsp.u = pCtx->ebp;
1742 else
1743 NewRsp.u = pCtx->rbp;
1744
1745 /* Pop RBP according to the operand size. */
1746 VBOXSTRICTRC rcStrict;
1747 RTUINT64U NewRbp;
1748 switch (enmEffOpSize)
1749 {
1750 case IEMMODE_16BIT:
1751 NewRbp.u = pCtx->rbp;
1752 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1753 break;
1754 case IEMMODE_32BIT:
1755 NewRbp.u = 0;
1756 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1757 break;
1758 case IEMMODE_64BIT:
1759 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1760 break;
1761 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1762 }
1763 if (rcStrict != VINF_SUCCESS)
1764 return rcStrict;
1765
1766
1767 /* Commit it. */
1768 pCtx->rbp = NewRbp.u;
1769 pCtx->rsp = NewRsp.u;
1770 iemRegAddToRip(pIemCpu, cbInstr);
1771
1772 return VINF_SUCCESS;
1773}
1774
1775
1776/**
1777 * Implements int3 and int XX.
1778 *
1779 * @param u8Int The interrupt vector number.
1780 * @param fIsBpInstr Is it the breakpoint instruction.
1781 */
1782IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1783{
1784 Assert(pIemCpu->cXcptRecursions == 0);
1785 return iemRaiseXcptOrInt(pIemCpu,
1786 cbInstr,
1787 u8Int,
1788 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1789 0,
1790 0);
1791}
1792
1793
1794/**
1795 * Implements iret for real mode and V8086 mode.
1796 *
1797 * @param enmEffOpSize The effective operand size.
1798 */
1799IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1800{
1801 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1802 NOREF(cbInstr);
1803
1804 /*
1805 * iret throws an exception if VME isn't enabled.
1806 */
1807 if ( pCtx->eflags.Bits.u1VM
1808 && !(pCtx->cr4 & X86_CR4_VME))
1809 return iemRaiseGeneralProtectionFault0(pIemCpu);
1810
1811 /*
1812 * Do the stack bits, but don't commit RSP before everything checks
1813 * out right.
1814 */
1815 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1816 VBOXSTRICTRC rcStrict;
1817 RTCPTRUNION uFrame;
1818 uint16_t uNewCs;
1819 uint32_t uNewEip;
1820 uint32_t uNewFlags;
1821 uint64_t uNewRsp;
1822 if (enmEffOpSize == IEMMODE_32BIT)
1823 {
1824 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1825 if (rcStrict != VINF_SUCCESS)
1826 return rcStrict;
1827 uNewEip = uFrame.pu32[0];
1828 uNewCs = (uint16_t)uFrame.pu32[1];
1829 uNewFlags = uFrame.pu32[2];
1830 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1831 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1832 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1833 | X86_EFL_ID;
1834 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1835 }
1836 else
1837 {
1838 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1839 if (rcStrict != VINF_SUCCESS)
1840 return rcStrict;
1841 uNewEip = uFrame.pu16[0];
1842 uNewCs = uFrame.pu16[1];
1843 uNewFlags = uFrame.pu16[2];
1844 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1845 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1846 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1847 /** @todo The intel pseudo code does not indicate what happens to
1848 * reserved flags. We just ignore them. */
1849 }
1850 /** @todo Check how this is supposed to work if sp=0xfffe. */
1851
1852 /*
1853 * Check the limit of the new EIP.
1854 */
1855 /** @todo Only the AMD pseudo code check the limit here, what's
1856 * right? */
1857 if (uNewEip > pCtx->cs.u32Limit)
1858 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1859
1860 /*
1861 * V8086 checks and flag adjustments
1862 */
1863 if (pCtx->eflags.Bits.u1VM)
1864 {
1865 if (pCtx->eflags.Bits.u2IOPL == 3)
1866 {
1867 /* Preserve IOPL and clear RF. */
1868 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1869 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1870 }
1871 else if ( enmEffOpSize == IEMMODE_16BIT
1872 && ( !(uNewFlags & X86_EFL_IF)
1873 || !pCtx->eflags.Bits.u1VIP )
1874 && !(uNewFlags & X86_EFL_TF) )
1875 {
1876 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1877 uNewFlags &= ~X86_EFL_VIF;
1878 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1879 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1880 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1881 }
1882 else
1883 return iemRaiseGeneralProtectionFault0(pIemCpu);
1884 }
1885
1886 /*
1887 * Commit the operation.
1888 */
1889 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1890 if (rcStrict != VINF_SUCCESS)
1891 return rcStrict;
1892 pCtx->rip = uNewEip;
1893 pCtx->cs.Sel = uNewCs;
1894 pCtx->cs.ValidSel = uNewCs;
1895 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1896 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1897 /** @todo do we load attribs and limit as well? */
1898 Assert(uNewFlags & X86_EFL_1);
1899 pCtx->eflags.u = uNewFlags;
1900
1901 return VINF_SUCCESS;
1902}
1903
1904
1905/**
1906 * Implements iret for protected mode
1907 *
1908 * @param enmEffOpSize The effective operand size.
1909 */
1910IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
1911{
1912 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1913 NOREF(cbInstr);
1914
1915 /*
1916 * Nested task return.
1917 */
1918 if (pCtx->eflags.Bits.u1NT)
1919 {
1920 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1921 }
1922 /*
1923 * Normal return.
1924 */
1925 else
1926 {
1927 /*
1928 * Do the stack bits, but don't commit RSP before everything checks
1929 * out right.
1930 */
1931 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1932 VBOXSTRICTRC rcStrict;
1933 RTCPTRUNION uFrame;
1934 uint16_t uNewCs;
1935 uint32_t uNewEip;
1936 uint32_t uNewFlags;
1937 uint64_t uNewRsp;
1938 if (enmEffOpSize == IEMMODE_32BIT)
1939 {
1940 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1941 if (rcStrict != VINF_SUCCESS)
1942 return rcStrict;
1943 uNewEip = uFrame.pu32[0];
1944 uNewCs = (uint16_t)uFrame.pu32[1];
1945 uNewFlags = uFrame.pu32[2];
1946 }
1947 else
1948 {
1949 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1950 if (rcStrict != VINF_SUCCESS)
1951 return rcStrict;
1952 uNewEip = uFrame.pu16[0];
1953 uNewCs = uFrame.pu16[1];
1954 uNewFlags = uFrame.pu16[2];
1955 }
1956 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
1957 if (rcStrict != VINF_SUCCESS)
1958 return rcStrict;
1959
1960 /*
1961 * What are we returning to?
1962 */
1963 if ( (uNewFlags & X86_EFL_VM)
1964 && pIemCpu->uCpl == 0)
1965 {
1966 /* V8086 mode! */
1967 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1968 }
1969 else
1970 {
1971 /*
1972 * Protected mode.
1973 */
1974 /* Read the CS descriptor. */
1975 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1976 {
1977 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
1978 return iemRaiseGeneralProtectionFault0(pIemCpu);
1979 }
1980
1981 IEMSELDESC DescCS;
1982 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
1983 if (rcStrict != VINF_SUCCESS)
1984 {
1985 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
1986 return rcStrict;
1987 }
1988
1989 /* Must be a code descriptor. */
1990 if (!DescCS.Legacy.Gen.u1DescType)
1991 {
1992 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1993 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1994 }
1995 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1996 {
1997 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1998 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1999 }
2000
2001 /* Privilege checks. */
2002 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2003 {
2004 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
2005 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2006 }
2007 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2008 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2009 {
2010 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
2011 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2012 }
2013
2014 /* Present? */
2015 if (!DescCS.Legacy.Gen.u1Present)
2016 {
2017 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2018 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2019 }
2020
2021 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
2022 if (DescCS.Legacy.Gen.u1Granularity)
2023 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2024
2025 /*
2026 * Return to outer level?
2027 */
2028 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2029 {
2030 uint16_t uNewSS;
2031 uint32_t uNewESP;
2032 if (enmEffOpSize == IEMMODE_32BIT)
2033 {
2034 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2035 if (rcStrict != VINF_SUCCESS)
2036 return rcStrict;
2037 uNewESP = uFrame.pu32[0];
2038 uNewSS = (uint16_t)uFrame.pu32[1];
2039 }
2040 else
2041 {
2042 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2043 if (rcStrict != VINF_SUCCESS)
2044 return rcStrict;
2045 uNewESP = uFrame.pu16[0];
2046 uNewSS = uFrame.pu16[1];
2047 }
2048 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2049 if (rcStrict != VINF_SUCCESS)
2050 return rcStrict;
2051
2052 /* Read the SS descriptor. */
2053 if (!(uNewSS & (X86_SEL_MASK | X86_SEL_LDT)))
2054 {
2055 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2056 return iemRaiseGeneralProtectionFault0(pIemCpu);
2057 }
2058
2059 IEMSELDESC DescSS;
2060 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2061 if (rcStrict != VINF_SUCCESS)
2062 {
2063 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2064 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2065 return rcStrict;
2066 }
2067
2068 /* Privilege checks. */
2069 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2070 {
2071 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2072 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2073 }
2074 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2075 {
2076 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2077 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2078 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2079 }
2080
2081 /* Must be a writeable data segment descriptor. */
2082 if (!DescSS.Legacy.Gen.u1DescType)
2083 {
2084 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2085 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2086 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2087 }
2088 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2089 {
2090 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2091 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2092 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2093 }
2094
2095 /* Present? */
2096 if (!DescSS.Legacy.Gen.u1Present)
2097 {
2098 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2099 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2100 }
2101
2102 uint32_t cbLimitSs = X86DESC_LIMIT(DescSS.Legacy);
2103 if (DescSS.Legacy.Gen.u1Granularity)
2104 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2105
2106 /* Check EIP. */
2107 if (uNewEip > cbLimitCS)
2108 {
2109 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2110 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2111 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2112 }
2113
2114 /*
2115 * Commit the changes, marking CS and SS accessed first since
2116 * that may fail.
2117 */
2118 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2119 {
2120 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2121 if (rcStrict != VINF_SUCCESS)
2122 return rcStrict;
2123 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2124 }
2125 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2126 {
2127 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2128 if (rcStrict != VINF_SUCCESS)
2129 return rcStrict;
2130 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2131 }
2132
2133 pCtx->rip = uNewEip;
2134 pCtx->cs.Sel = uNewCs;
2135 pCtx->cs.ValidSel = uNewCs;
2136 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2137 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2138 pCtx->cs.u32Limit = cbLimitCS;
2139 pCtx->cs.u64Base = X86DESC_BASE(DescCS.Legacy);
2140 pCtx->rsp = uNewESP;
2141 pCtx->ss.Sel = uNewSS;
2142 pCtx->ss.ValidSel = uNewSS;
2143 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2144 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
2145 pCtx->ss.u32Limit = cbLimitSs;
2146 pCtx->ss.u64Base = X86DESC_BASE(DescSS.Legacy);
2147
2148 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2149 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2150 if (enmEffOpSize != IEMMODE_16BIT)
2151 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2152 if (pIemCpu->uCpl == 0)
2153 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2154 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2155 fEFlagsMask |= X86_EFL_IF;
2156 pCtx->eflags.u &= ~fEFlagsMask;
2157 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2158
2159 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2160 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds);
2161 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es);
2162 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs);
2163 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs);
2164
2165 /* Done! */
2166
2167 }
2168 /*
2169 * Return to the same level.
2170 */
2171 else
2172 {
2173 /* Check EIP. */
2174 if (uNewEip > cbLimitCS)
2175 {
2176 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2177 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2178 }
2179
2180 /*
2181 * Commit the changes, marking CS first since it may fail.
2182 */
2183 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2184 {
2185 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2186 if (rcStrict != VINF_SUCCESS)
2187 return rcStrict;
2188 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2189 }
2190
2191 pCtx->rip = uNewEip;
2192 pCtx->cs.Sel = uNewCs;
2193 pCtx->cs.ValidSel = uNewCs;
2194 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2195 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2196 pCtx->cs.u32Limit = cbLimitCS;
2197 pCtx->cs.u64Base = X86DESC_BASE(DescCS.Legacy);
2198 pCtx->rsp = uNewRsp;
2199
2200 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2201 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2202 if (enmEffOpSize != IEMMODE_16BIT)
2203 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2204 if (pIemCpu->uCpl == 0)
2205 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2206 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2207 fEFlagsMask |= X86_EFL_IF;
2208 pCtx->eflags.u &= ~fEFlagsMask;
2209 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2210 /* Done! */
2211 }
2212 }
2213 }
2214
2215 return VINF_SUCCESS;
2216}
2217
2218
2219/**
2220 * Implements iret for long mode
2221 *
2222 * @param enmEffOpSize The effective operand size.
2223 */
2224IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2225{
2226 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2227 //VBOXSTRICTRC rcStrict;
2228 //uint64_t uNewRsp;
2229
2230 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2231 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2232}
2233
2234
2235/**
2236 * Implements iret.
2237 *
2238 * @param enmEffOpSize The effective operand size.
2239 */
2240IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2241{
2242 /*
2243 * Call a mode specific worker.
2244 */
2245 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2246 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2247 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2248 if (IEM_IS_LONG_MODE(pIemCpu))
2249 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2250
2251 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2252}
2253
2254
2255/**
2256 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2257 *
2258 * @param iSegReg The segment register number (valid).
2259 * @param uSel The new selector value.
2260 */
2261IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2262{
2263 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2264 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2265 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2266
2267 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2268
2269 /*
2270 * Real mode and V8086 mode are easy.
2271 */
2272 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2273 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2274 {
2275 *pSel = uSel;
2276 pHid->u64Base = (uint32_t)uSel << 4;
2277#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2278 /** @todo Does the CPU actually load limits and attributes in the
2279 * real/V8086 mode segment load case? It doesn't for CS in far
2280 * jumps... Affects unreal mode. */
2281 pHid->u32Limit = 0xffff;
2282 pHid->Attr.u = 0;
2283 pHid->Attr.n.u1Present = 1;
2284 pHid->Attr.n.u1DescType = 1;
2285 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2286 ? X86_SEL_TYPE_RW
2287 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2288#endif
2289 iemRegAddToRip(pIemCpu, cbInstr);
2290 return VINF_SUCCESS;
2291 }
2292
2293 /*
2294 * Protected mode.
2295 *
2296 * Check if it's a null segment selector value first, that's OK for DS, ES,
2297 * FS and GS. If not null, then we have to load and parse the descriptor.
2298 */
2299 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
2300 {
2301 if (iSegReg == X86_SREG_SS)
2302 {
2303 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2304 || pIemCpu->uCpl != 0
2305 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2306 {
2307 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2308 return iemRaiseGeneralProtectionFault0(pIemCpu);
2309 }
2310
2311 /* In 64-bit kernel mode, the stack can be 0 because of the way
2312 interrupts are dispatched when in kernel ctx. Just load the
2313 selector value into the register and leave the hidden bits
2314 as is. */
2315 *pSel = uSel;
2316 iemRegAddToRip(pIemCpu, cbInstr);
2317 return VINF_SUCCESS;
2318 }
2319
2320 *pSel = uSel; /* Not RPL, remember :-) */
2321 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2322 && iSegReg != X86_SREG_FS
2323 && iSegReg != X86_SREG_GS)
2324 {
2325 /** @todo figure out what this actually does, it works. Needs
2326 * testcase! */
2327 pHid->Attr.u = 0;
2328 pHid->Attr.n.u1Present = 1;
2329 pHid->Attr.n.u1Long = 1;
2330 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2331 pHid->Attr.n.u2Dpl = 3;
2332 pHid->u32Limit = 0;
2333 pHid->u64Base = 0;
2334 }
2335 else
2336 {
2337 pHid->Attr.u = 0;
2338 pHid->u32Limit = 0;
2339 pHid->u64Base = 0;
2340 }
2341 iemRegAddToRip(pIemCpu, cbInstr);
2342 return VINF_SUCCESS;
2343 }
2344
2345 /* Fetch the descriptor. */
2346 IEMSELDESC Desc;
2347 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2348 if (rcStrict != VINF_SUCCESS)
2349 return rcStrict;
2350
2351 /* Check GPs first. */
2352 if (!Desc.Legacy.Gen.u1DescType)
2353 {
2354 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2355 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2356 }
2357 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2358 {
2359 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2360 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2361 {
2362 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2363 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2364 }
2365 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2366 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2367 {
2368 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2369 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2370 }
2371 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2372 {
2373 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2374 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2375 }
2376 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2377 {
2378 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2379 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2380 }
2381 }
2382 else
2383 {
2384 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2385 {
2386 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2387 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2388 }
2389 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2390 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2391 {
2392#if 0 /* this is what intel says. */
2393 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2394 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2395 {
2396 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2397 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2398 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2399 }
2400#else /* this is what makes more sense. */
2401 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2402 {
2403 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2404 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2405 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2406 }
2407 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2408 {
2409 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2410 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2411 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2412 }
2413#endif
2414 }
2415 }
2416
2417 /* Is it there? */
2418 if (!Desc.Legacy.Gen.u1Present)
2419 {
2420 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2421 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2422 }
2423
2424 /* The base and limit. */
2425 uint64_t u64Base;
2426 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
2427 if (Desc.Legacy.Gen.u1Granularity)
2428 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2429
2430 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2431 && iSegReg < X86_SREG_FS)
2432 u64Base = 0;
2433 else
2434 u64Base = X86DESC_BASE(Desc.Legacy);
2435
2436 /*
2437 * Ok, everything checked out fine. Now set the accessed bit before
2438 * committing the result into the registers.
2439 */
2440 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2441 {
2442 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2443 if (rcStrict != VINF_SUCCESS)
2444 return rcStrict;
2445 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2446 }
2447
2448 /* commit */
2449 *pSel = uSel;
2450 pHid->Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2451 pHid->u32Limit = cbLimit;
2452 pHid->u64Base = u64Base;
2453
2454 /** @todo check if the hidden bits are loaded correctly for 64-bit
2455 * mode. */
2456
2457 iemRegAddToRip(pIemCpu, cbInstr);
2458 return VINF_SUCCESS;
2459}
2460
2461
2462/**
2463 * Implements 'mov SReg, r/m'.
2464 *
2465 * @param iSegReg The segment register number (valid).
2466 * @param uSel The new selector value.
2467 */
2468IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2469{
2470 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2471 if (rcStrict == VINF_SUCCESS)
2472 {
2473 if (iSegReg == X86_SREG_SS)
2474 {
2475 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2476 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2477 }
2478 }
2479 return rcStrict;
2480}
2481
2482
2483/**
2484 * Implements 'pop SReg'.
2485 *
2486 * @param iSegReg The segment register number (valid).
2487 * @param enmEffOpSize The efficient operand size (valid).
2488 */
2489IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2490{
2491 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2492 VBOXSTRICTRC rcStrict;
2493
2494 /*
2495 * Read the selector off the stack and join paths with mov ss, reg.
2496 */
2497 RTUINT64U TmpRsp;
2498 TmpRsp.u = pCtx->rsp;
2499 switch (enmEffOpSize)
2500 {
2501 case IEMMODE_16BIT:
2502 {
2503 uint16_t uSel;
2504 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2505 if (rcStrict == VINF_SUCCESS)
2506 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2507 break;
2508 }
2509
2510 case IEMMODE_32BIT:
2511 {
2512 uint32_t u32Value;
2513 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2514 if (rcStrict == VINF_SUCCESS)
2515 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2516 break;
2517 }
2518
2519 case IEMMODE_64BIT:
2520 {
2521 uint64_t u64Value;
2522 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2523 if (rcStrict == VINF_SUCCESS)
2524 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2525 break;
2526 }
2527 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2528 }
2529
2530 /*
2531 * Commit the stack on success.
2532 */
2533 if (rcStrict == VINF_SUCCESS)
2534 {
2535 pCtx->rsp = TmpRsp.u;
2536 if (iSegReg == X86_SREG_SS)
2537 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2538 }
2539 return rcStrict;
2540}
2541
2542
2543/**
2544 * Implements lgs, lfs, les, lds & lss.
2545 */
2546IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2547 uint16_t, uSel,
2548 uint64_t, offSeg,
2549 uint8_t, iSegReg,
2550 uint8_t, iGReg,
2551 IEMMODE, enmEffOpSize)
2552{
2553 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2554 VBOXSTRICTRC rcStrict;
2555
2556 /*
2557 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2558 */
2559 /** @todo verify and test that mov, pop and lXs works the segment
2560 * register loading in the exact same way. */
2561 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2562 if (rcStrict == VINF_SUCCESS)
2563 {
2564 switch (enmEffOpSize)
2565 {
2566 case IEMMODE_16BIT:
2567 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2568 break;
2569 case IEMMODE_32BIT:
2570 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2571 break;
2572 case IEMMODE_64BIT:
2573 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2574 break;
2575 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2576 }
2577 }
2578
2579 return rcStrict;
2580}
2581
2582
2583/**
2584 * Implements lgdt.
2585 *
2586 * @param iEffSeg The segment of the new ldtr contents
2587 * @param GCPtrEffSrc The address of the new ldtr contents.
2588 * @param enmEffOpSize The effective operand size.
2589 */
2590IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2591{
2592 if (pIemCpu->uCpl != 0)
2593 return iemRaiseGeneralProtectionFault0(pIemCpu);
2594 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2595
2596 /*
2597 * Fetch the limit and base address.
2598 */
2599 uint16_t cbLimit;
2600 RTGCPTR GCPtrBase;
2601 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2602 if (rcStrict == VINF_SUCCESS)
2603 {
2604 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2605 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2606 else
2607 {
2608 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2609 pCtx->gdtr.cbGdt = cbLimit;
2610 pCtx->gdtr.pGdt = GCPtrBase;
2611 }
2612 if (rcStrict == VINF_SUCCESS)
2613 iemRegAddToRip(pIemCpu, cbInstr);
2614 }
2615 return rcStrict;
2616}
2617
2618
2619/**
2620 * Implements lidt.
2621 *
2622 * @param iEffSeg The segment of the new ldtr contents
2623 * @param GCPtrEffSrc The address of the new ldtr contents.
2624 * @param enmEffOpSize The effective operand size.
2625 */
2626IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2627{
2628 if (pIemCpu->uCpl != 0)
2629 return iemRaiseGeneralProtectionFault0(pIemCpu);
2630 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2631
2632 /*
2633 * Fetch the limit and base address.
2634 */
2635 uint16_t cbLimit;
2636 RTGCPTR GCPtrBase;
2637 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2638 if (rcStrict == VINF_SUCCESS)
2639 {
2640 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2641 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2642 else
2643 {
2644 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2645 pCtx->idtr.cbIdt = cbLimit;
2646 pCtx->idtr.pIdt = GCPtrBase;
2647 }
2648 if (rcStrict == VINF_SUCCESS)
2649 iemRegAddToRip(pIemCpu, cbInstr);
2650 }
2651 return rcStrict;
2652}
2653
2654
2655/**
2656 * Implements lldt.
2657 *
2658 * @param uNewLdt The new LDT selector value.
2659 */
2660IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2661{
2662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2663
2664 /*
2665 * Check preconditions.
2666 */
2667 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2668 {
2669 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2670 return iemRaiseUndefinedOpcode(pIemCpu);
2671 }
2672 if (pIemCpu->uCpl != 0)
2673 {
2674 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2675 return iemRaiseGeneralProtectionFault0(pIemCpu);
2676 }
2677 if (uNewLdt & X86_SEL_LDT)
2678 {
2679 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2680 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2681 }
2682
2683 /*
2684 * Now, loading a NULL selector is easy.
2685 */
2686 if ((uNewLdt & X86_SEL_MASK) == 0)
2687 {
2688 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2689 /** @todo check if the actual value is loaded or if it's always 0. */
2690 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2691 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), 0);
2692 else
2693 pCtx->ldtr.Sel = 0;
2694 pCtx->ldtr.ValidSel = 0;
2695 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2696 pCtx->ldtr.Attr.u = 0;
2697 pCtx->ldtr.u64Base = 0;
2698 pCtx->ldtr.u32Limit = 0;
2699
2700 iemRegAddToRip(pIemCpu, cbInstr);
2701 return VINF_SUCCESS;
2702 }
2703
2704 /*
2705 * Read the descriptor.
2706 */
2707 IEMSELDESC Desc;
2708 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2709 if (rcStrict != VINF_SUCCESS)
2710 return rcStrict;
2711
2712 /* Check GPs first. */
2713 if (Desc.Legacy.Gen.u1DescType)
2714 {
2715 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2716 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2717 }
2718 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2719 {
2720 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2721 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2722 }
2723 uint64_t u64Base;
2724 if (!IEM_IS_LONG_MODE(pIemCpu))
2725 u64Base = X86DESC_BASE(Desc.Legacy);
2726 else
2727 {
2728 if (Desc.Long.Gen.u5Zeros)
2729 {
2730 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2731 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2732 }
2733
2734 u64Base = X86DESC64_BASE(Desc.Long);
2735 if (!IEM_IS_CANONICAL(u64Base))
2736 {
2737 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2738 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2739 }
2740 }
2741
2742 /* NP */
2743 if (!Desc.Legacy.Gen.u1Present)
2744 {
2745 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2746 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2747 }
2748
2749 /*
2750 * It checks out alright, update the registers.
2751 */
2752/** @todo check if the actual value is loaded or if the RPL is dropped */
2753 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2754 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK);
2755 else
2756 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK;
2757 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK;
2758 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2759 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2760 pCtx->ldtr.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2761 pCtx->ldtr.u64Base = u64Base;
2762
2763 iemRegAddToRip(pIemCpu, cbInstr);
2764 return VINF_SUCCESS;
2765}
2766
2767
2768/**
2769 * Implements lldt.
2770 *
2771 * @param uNewLdt The new LDT selector value.
2772 */
2773IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2774{
2775 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2776
2777 /*
2778 * Check preconditions.
2779 */
2780 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2781 {
2782 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2783 return iemRaiseUndefinedOpcode(pIemCpu);
2784 }
2785 if (pIemCpu->uCpl != 0)
2786 {
2787 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2788 return iemRaiseGeneralProtectionFault0(pIemCpu);
2789 }
2790 if (uNewTr & X86_SEL_LDT)
2791 {
2792 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2793 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
2794 }
2795 if ((uNewTr & X86_SEL_MASK) == 0)
2796 {
2797 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2798 return iemRaiseGeneralProtectionFault0(pIemCpu);
2799 }
2800
2801 /*
2802 * Read the descriptor.
2803 */
2804 IEMSELDESC Desc;
2805 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2806 if (rcStrict != VINF_SUCCESS)
2807 return rcStrict;
2808
2809 /* Check GPs first. */
2810 if (Desc.Legacy.Gen.u1DescType)
2811 {
2812 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2813 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2814 }
2815 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2816 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2817 || IEM_IS_LONG_MODE(pIemCpu)) )
2818 {
2819 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2820 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2821 }
2822 uint64_t u64Base;
2823 if (!IEM_IS_LONG_MODE(pIemCpu))
2824 u64Base = X86DESC_BASE(Desc.Legacy);
2825 else
2826 {
2827 if (Desc.Long.Gen.u5Zeros)
2828 {
2829 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2830 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2831 }
2832
2833 u64Base = X86DESC64_BASE(Desc.Long);
2834 if (!IEM_IS_CANONICAL(u64Base))
2835 {
2836 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2837 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2838 }
2839 }
2840
2841 /* NP */
2842 if (!Desc.Legacy.Gen.u1Present)
2843 {
2844 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
2845 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
2846 }
2847
2848 /*
2849 * Set it busy.
2850 * Note! Intel says this should lock down the whole descriptor, but we'll
2851 * restrict our selves to 32-bit for now due to lack of inline
2852 * assembly and such.
2853 */
2854 void *pvDesc;
2855 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2856 if (rcStrict != VINF_SUCCESS)
2857 return rcStrict;
2858 switch ((uintptr_t)pvDesc & 3)
2859 {
2860 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
2861 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
2862 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
2863 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
2864 }
2865 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2866 if (rcStrict != VINF_SUCCESS)
2867 return rcStrict;
2868 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2869
2870 /*
2871 * It checks out alright, update the registers.
2872 */
2873/** @todo check if the actual value is loaded or if the RPL is dropped */
2874 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2875 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK);
2876 else
2877 pCtx->tr.Sel = uNewTr & X86_SEL_MASK;
2878 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK;
2879 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2880 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2881 pCtx->tr.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2882 pCtx->tr.u64Base = u64Base;
2883
2884 iemRegAddToRip(pIemCpu, cbInstr);
2885 return VINF_SUCCESS;
2886}
2887
2888
2889/**
2890 * Implements mov GReg,CRx.
2891 *
2892 * @param iGReg The general register to store the CRx value in.
2893 * @param iCrReg The CRx register to read (valid).
2894 */
2895IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
2896{
2897 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2898 if (pIemCpu->uCpl != 0)
2899 return iemRaiseGeneralProtectionFault0(pIemCpu);
2900 Assert(!pCtx->eflags.Bits.u1VM);
2901
2902 /* read it */
2903 uint64_t crX;
2904 switch (iCrReg)
2905 {
2906 case 0: crX = pCtx->cr0; break;
2907 case 2: crX = pCtx->cr2; break;
2908 case 3: crX = pCtx->cr3; break;
2909 case 4: crX = pCtx->cr4; break;
2910 case 8:
2911 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2912 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
2913 else
2914 crX = 0xff;
2915 break;
2916 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2917 }
2918
2919 /* store it */
2920 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2921 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
2922 else
2923 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
2924
2925 iemRegAddToRip(pIemCpu, cbInstr);
2926 return VINF_SUCCESS;
2927}
2928
2929
2930/**
2931 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
2932 *
2933 * @param iCrReg The CRx register to write (valid).
2934 * @param uNewCrX The new value.
2935 */
2936IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
2937{
2938 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2939 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2940 VBOXSTRICTRC rcStrict;
2941 int rc;
2942
2943 /*
2944 * Try store it.
2945 * Unfortunately, CPUM only does a tiny bit of the work.
2946 */
2947 switch (iCrReg)
2948 {
2949 case 0:
2950 {
2951 /*
2952 * Perform checks.
2953 */
2954 uint64_t const uOldCrX = pCtx->cr0;
2955 uNewCrX |= X86_CR0_ET; /* hardcoded */
2956
2957 /* Check for reserved bits. */
2958 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
2959 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
2960 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
2961 if (uNewCrX & ~(uint64_t)fValid)
2962 {
2963 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2964 return iemRaiseGeneralProtectionFault0(pIemCpu);
2965 }
2966
2967 /* Check for invalid combinations. */
2968 if ( (uNewCrX & X86_CR0_PG)
2969 && !(uNewCrX & X86_CR0_PE) )
2970 {
2971 Log(("Trying to set CR0.PG without CR0.PE\n"));
2972 return iemRaiseGeneralProtectionFault0(pIemCpu);
2973 }
2974
2975 if ( !(uNewCrX & X86_CR0_CD)
2976 && (uNewCrX & X86_CR0_NW) )
2977 {
2978 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
2979 return iemRaiseGeneralProtectionFault0(pIemCpu);
2980 }
2981
2982 /* Long mode consistency checks. */
2983 if ( (uNewCrX & X86_CR0_PG)
2984 && !(uOldCrX & X86_CR0_PG)
2985 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2986 {
2987 if (!(pCtx->cr4 & X86_CR4_PAE))
2988 {
2989 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
2990 return iemRaiseGeneralProtectionFault0(pIemCpu);
2991 }
2992 if (pCtx->cs.Attr.n.u1Long)
2993 {
2994 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
2995 return iemRaiseGeneralProtectionFault0(pIemCpu);
2996 }
2997 }
2998
2999 /** @todo check reserved PDPTR bits as AMD states. */
3000
3001 /*
3002 * Change CR0.
3003 */
3004 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3005 {
3006 rc = CPUMSetGuestCR0(pVCpu, uNewCrX);
3007 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
3008 }
3009 else
3010 pCtx->cr0 = uNewCrX;
3011 Assert(pCtx->cr0 == uNewCrX);
3012
3013 /*
3014 * Change EFER.LMA if entering or leaving long mode.
3015 */
3016 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
3017 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3018 {
3019 uint64_t NewEFER = pCtx->msrEFER;
3020 if (uNewCrX & X86_CR0_PG)
3021 NewEFER |= MSR_K6_EFER_LME;
3022 else
3023 NewEFER &= ~MSR_K6_EFER_LME;
3024
3025 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3026 CPUMSetGuestEFER(pVCpu, NewEFER);
3027 else
3028 pCtx->msrEFER = NewEFER;
3029 Assert(pCtx->msrEFER == NewEFER);
3030 }
3031
3032 /*
3033 * Inform PGM.
3034 */
3035 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3036 {
3037 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3038 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3039 {
3040 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3041 AssertRCReturn(rc, rc);
3042 /* ignore informational status codes */
3043 }
3044 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3045 /** @todo Status code management. */
3046 }
3047 else
3048 rcStrict = VINF_SUCCESS;
3049 break;
3050 }
3051
3052 /*
3053 * CR2 can be changed without any restrictions.
3054 */
3055 case 2:
3056 pCtx->cr2 = uNewCrX;
3057 rcStrict = VINF_SUCCESS;
3058 break;
3059
3060 /*
3061 * CR3 is relatively simple, although AMD and Intel have different
3062 * accounts of how setting reserved bits are handled. We take intel's
3063 * word for the lower bits and AMD's for the high bits (63:52).
3064 */
3065 /** @todo Testcase: Setting reserved bits in CR3, especially before
3066 * enabling paging. */
3067 case 3:
3068 {
3069 /* check / mask the value. */
3070 if (uNewCrX & UINT64_C(0xfff0000000000000))
3071 {
3072 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3073 return iemRaiseGeneralProtectionFault0(pIemCpu);
3074 }
3075
3076 uint64_t fValid;
3077 if ( (pCtx->cr4 & X86_CR4_PAE)
3078 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3079 fValid = UINT64_C(0x000ffffffffff014);
3080 else if (pCtx->cr4 & X86_CR4_PAE)
3081 fValid = UINT64_C(0xfffffff4);
3082 else
3083 fValid = UINT64_C(0xfffff014);
3084 if (uNewCrX & ~fValid)
3085 {
3086 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3087 uNewCrX, uNewCrX & ~fValid));
3088 uNewCrX &= fValid;
3089 }
3090
3091 /** @todo If we're in PAE mode we should check the PDPTRs for
3092 * invalid bits. */
3093
3094 /* Make the change. */
3095 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3096 {
3097 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3098 AssertRCSuccessReturn(rc, rc);
3099 }
3100 else
3101 pCtx->cr3 = uNewCrX;
3102
3103 /* Inform PGM. */
3104 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3105 {
3106 if (pCtx->cr0 & X86_CR0_PG)
3107 {
3108 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
3109 AssertRCReturn(rc, rc);
3110 /* ignore informational status codes */
3111 /** @todo status code management */
3112 }
3113 }
3114 rcStrict = VINF_SUCCESS;
3115 break;
3116 }
3117
3118 /*
3119 * CR4 is a bit more tedious as there are bits which cannot be cleared
3120 * under some circumstances and such.
3121 */
3122 case 4:
3123 {
3124 uint64_t const uOldCrX = pCtx->cr0;
3125
3126 /* reserved bits */
3127 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3128 | X86_CR4_TSD | X86_CR4_DE
3129 | X86_CR4_PSE | X86_CR4_PAE
3130 | X86_CR4_MCE | X86_CR4_PGE
3131 | X86_CR4_PCE | X86_CR4_OSFSXR
3132 | X86_CR4_OSXMMEEXCPT;
3133 //if (xxx)
3134 // fValid |= X86_CR4_VMXE;
3135 //if (xxx)
3136 // fValid |= X86_CR4_OSXSAVE;
3137 if (uNewCrX & ~(uint64_t)fValid)
3138 {
3139 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3140 return iemRaiseGeneralProtectionFault0(pIemCpu);
3141 }
3142
3143 /* long mode checks. */
3144 if ( (uOldCrX & X86_CR4_PAE)
3145 && !(uNewCrX & X86_CR4_PAE)
3146 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3147 {
3148 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3149 return iemRaiseGeneralProtectionFault0(pIemCpu);
3150 }
3151
3152
3153 /*
3154 * Change it.
3155 */
3156 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3157 {
3158 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3159 AssertRCSuccessReturn(rc, rc);
3160 }
3161 else
3162 pCtx->cr4 = uNewCrX;
3163 Assert(pCtx->cr4 == uNewCrX);
3164
3165 /*
3166 * Notify SELM and PGM.
3167 */
3168 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3169 {
3170 /* SELM - VME may change things wrt to the TSS shadowing. */
3171 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3172 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3173
3174 /* PGM - flushing and mode. */
3175 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3176 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3177 {
3178 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3179 AssertRCReturn(rc, rc);
3180 /* ignore informational status codes */
3181 }
3182 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3183 /** @todo Status code management. */
3184 }
3185 else
3186 rcStrict = VINF_SUCCESS;
3187 break;
3188 }
3189
3190 /*
3191 * CR8 maps to the APIC TPR.
3192 */
3193 case 8:
3194 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3195 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
3196 else
3197 rcStrict = VINF_SUCCESS;
3198 break;
3199
3200 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3201 }
3202
3203 /*
3204 * Advance the RIP on success.
3205 */
3206 /** @todo Status code management. */
3207 if (rcStrict == VINF_SUCCESS)
3208 iemRegAddToRip(pIemCpu, cbInstr);
3209 return rcStrict;
3210
3211}
3212
3213
3214/**
3215 * Implements mov CRx,GReg.
3216 *
3217 * @param iCrReg The CRx register to write (valid).
3218 * @param iGReg The general register to load the DRx value from.
3219 */
3220IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3221{
3222 if (pIemCpu->uCpl != 0)
3223 return iemRaiseGeneralProtectionFault0(pIemCpu);
3224 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3225
3226 /*
3227 * Read the new value from the source register and call common worker.
3228 */
3229 uint64_t uNewCrX;
3230 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3231 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3232 else
3233 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3234 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3235}
3236
3237
3238/**
3239 * Implements 'LMSW r/m16'
3240 *
3241 * @param u16NewMsw The new value.
3242 */
3243IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3244{
3245 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3246
3247 if (pIemCpu->uCpl != 0)
3248 return iemRaiseGeneralProtectionFault0(pIemCpu);
3249 Assert(!pCtx->eflags.Bits.u1VM);
3250
3251 /*
3252 * Compose the new CR0 value and call common worker.
3253 */
3254 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3255 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3256 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3257}
3258
3259
3260/**
3261 * Implements 'CLTS'.
3262 */
3263IEM_CIMPL_DEF_0(iemCImpl_clts)
3264{
3265 if (pIemCpu->uCpl != 0)
3266 return iemRaiseGeneralProtectionFault0(pIemCpu);
3267
3268 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3269 uint64_t uNewCr0 = pCtx->cr0;
3270 uNewCr0 &= ~X86_CR0_TS;
3271 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3272}
3273
3274
3275/**
3276 * Implements mov GReg,DRx.
3277 *
3278 * @param iGReg The general register to store the DRx value in.
3279 * @param iDrReg The DRx register to read (0-7).
3280 */
3281IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3282{
3283 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3284
3285 /*
3286 * Check preconditions.
3287 */
3288
3289 /* Raise GPs. */
3290 if (pIemCpu->uCpl != 0)
3291 return iemRaiseGeneralProtectionFault0(pIemCpu);
3292 Assert(!pCtx->eflags.Bits.u1VM);
3293
3294 if ( (iDrReg == 4 || iDrReg == 5)
3295 && (pCtx->cr4 & X86_CR4_DE) )
3296 {
3297 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3298 return iemRaiseGeneralProtectionFault0(pIemCpu);
3299 }
3300
3301 /* Raise #DB if general access detect is enabled. */
3302 if (pCtx->dr[7] & X86_DR7_GD)
3303 {
3304 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3305 return iemRaiseDebugException(pIemCpu);
3306 }
3307
3308 /*
3309 * Read the debug register and store it in the specified general register.
3310 */
3311 uint64_t drX;
3312 switch (iDrReg)
3313 {
3314 case 0: drX = pCtx->dr[0]; break;
3315 case 1: drX = pCtx->dr[1]; break;
3316 case 2: drX = pCtx->dr[2]; break;
3317 case 3: drX = pCtx->dr[3]; break;
3318 case 6:
3319 case 4:
3320 drX = pCtx->dr[6];
3321 drX &= ~RT_BIT_32(12);
3322 drX |= UINT32_C(0xffff0ff0);
3323 break;
3324 case 7:
3325 case 5:
3326 drX = pCtx->dr[7];
3327 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3328 drX |= RT_BIT_32(10);
3329 break;
3330 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3331 }
3332
3333 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3334 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3335 else
3336 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3337
3338 iemRegAddToRip(pIemCpu, cbInstr);
3339 return VINF_SUCCESS;
3340}
3341
3342
3343/**
3344 * Implements mov DRx,GReg.
3345 *
3346 * @param iDrReg The DRx register to write (valid).
3347 * @param iGReg The general register to load the DRx value from.
3348 */
3349IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3350{
3351 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3352
3353 /*
3354 * Check preconditions.
3355 */
3356 if (pIemCpu->uCpl != 0)
3357 return iemRaiseGeneralProtectionFault0(pIemCpu);
3358 Assert(!pCtx->eflags.Bits.u1VM);
3359
3360 if ( (iDrReg == 4 || iDrReg == 5)
3361 && (pCtx->cr4 & X86_CR4_DE) )
3362 {
3363 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3364 return iemRaiseGeneralProtectionFault0(pIemCpu);
3365 }
3366
3367 /* Raise #DB if general access detect is enabled. */
3368 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3369 * \#GP? */
3370 if (pCtx->dr[7] & X86_DR7_GD)
3371 {
3372 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3373 return iemRaiseDebugException(pIemCpu);
3374 }
3375
3376 /*
3377 * Read the new value from the source register.
3378 */
3379 uint64_t uNewDrX;
3380 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3381 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3382 else
3383 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3384
3385 /*
3386 * Adjust it.
3387 */
3388 switch (iDrReg)
3389 {
3390 case 0:
3391 case 1:
3392 case 2:
3393 case 3:
3394 /* nothing to adjust */
3395 break;
3396
3397 case 6:
3398 case 4:
3399 if (uNewDrX & UINT64_C(0xffffffff00000000))
3400 {
3401 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3402 return iemRaiseGeneralProtectionFault0(pIemCpu);
3403 }
3404 uNewDrX &= ~RT_BIT_32(12);
3405 uNewDrX |= UINT32_C(0xffff0ff0);
3406 break;
3407
3408 case 7:
3409 case 5:
3410 if (uNewDrX & UINT64_C(0xffffffff00000000))
3411 {
3412 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3413 return iemRaiseGeneralProtectionFault0(pIemCpu);
3414 }
3415 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3416 uNewDrX |= RT_BIT_32(10);
3417 break;
3418
3419 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3420 }
3421
3422 /*
3423 * Do the actual setting.
3424 */
3425 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3426 {
3427 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3428 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3429 }
3430 else
3431 pCtx->dr[iDrReg] = uNewDrX;
3432
3433 iemRegAddToRip(pIemCpu, cbInstr);
3434 return VINF_SUCCESS;
3435}
3436
3437
3438/**
3439 * Implements 'INVLPG m'.
3440 *
3441 * @param GCPtrPage The effective address of the page to invalidate.
3442 * @remarks Updates the RIP.
3443 */
3444IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3445{
3446 /* ring-0 only. */
3447 if (pIemCpu->uCpl != 0)
3448 return iemRaiseGeneralProtectionFault0(pIemCpu);
3449 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3450
3451 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3452 iemRegAddToRip(pIemCpu, cbInstr);
3453
3454 if ( rc == VINF_SUCCESS
3455 || rc == VINF_PGM_SYNC_CR3)
3456 return VINF_SUCCESS;
3457 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3458 return rc;
3459}
3460
3461
3462/**
3463 * Implements RDTSC.
3464 */
3465IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3466{
3467 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3468
3469 /*
3470 * Check preconditions.
3471 */
3472 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3473 return iemRaiseUndefinedOpcode(pIemCpu);
3474
3475 if ( (pCtx->cr4 & X86_CR4_TSD)
3476 && pIemCpu->uCpl != 0)
3477 {
3478 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3479 return iemRaiseGeneralProtectionFault0(pIemCpu);
3480 }
3481
3482 /*
3483 * Do the job.
3484 */
3485 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3486 pCtx->rax = (uint32_t)uTicks;
3487 pCtx->rdx = uTicks >> 32;
3488#ifdef IEM_VERIFICATION_MODE
3489 pIemCpu->fIgnoreRaxRdx = true;
3490#endif
3491
3492 iemRegAddToRip(pIemCpu, cbInstr);
3493 return VINF_SUCCESS;
3494}
3495
3496
3497/**
3498 * Implements RDMSR.
3499 */
3500IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3501{
3502 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3503
3504 /*
3505 * Check preconditions.
3506 */
3507 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3508 return iemRaiseUndefinedOpcode(pIemCpu);
3509 if (pIemCpu->uCpl != 0)
3510 return iemRaiseGeneralProtectionFault0(pIemCpu);
3511
3512 /*
3513 * Do the job.
3514 */
3515 RTUINT64U uValue;
3516 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3517 if (rc != VINF_SUCCESS)
3518 {
3519 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3520 return iemRaiseGeneralProtectionFault0(pIemCpu);
3521 }
3522
3523 pCtx->rax = uValue.au32[0];
3524 pCtx->rdx = uValue.au32[1];
3525
3526 iemRegAddToRip(pIemCpu, cbInstr);
3527 return VINF_SUCCESS;
3528}
3529
3530
3531/**
3532 * Implements 'IN eAX, port'.
3533 *
3534 * @param u16Port The source port.
3535 * @param cbReg The register size.
3536 */
3537IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3538{
3539 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3540
3541 /*
3542 * CPL check
3543 */
3544 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3545 if (rcStrict != VINF_SUCCESS)
3546 return rcStrict;
3547
3548 /*
3549 * Perform the I/O.
3550 */
3551 uint32_t u32Value;
3552 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3553 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
3554 else
3555 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3556 if (IOM_SUCCESS(rcStrict))
3557 {
3558 switch (cbReg)
3559 {
3560 case 1: pCtx->al = (uint8_t)u32Value; break;
3561 case 2: pCtx->ax = (uint16_t)u32Value; break;
3562 case 4: pCtx->rax = u32Value; break;
3563 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3564 }
3565 iemRegAddToRip(pIemCpu, cbInstr);
3566 pIemCpu->cPotentialExits++;
3567 }
3568 /** @todo massage rcStrict. */
3569 return rcStrict;
3570}
3571
3572
3573/**
3574 * Implements 'IN eAX, DX'.
3575 *
3576 * @param cbReg The register size.
3577 */
3578IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3579{
3580 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3581}
3582
3583
3584/**
3585 * Implements 'OUT port, eAX'.
3586 *
3587 * @param u16Port The destination port.
3588 * @param cbReg The register size.
3589 */
3590IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3591{
3592 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3593
3594 /*
3595 * CPL check
3596 */
3597 if ( (pCtx->cr0 & X86_CR0_PE)
3598 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3599 || pCtx->eflags.Bits.u1VM) )
3600 {
3601 /** @todo I/O port permission bitmap check */
3602 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
3603 }
3604
3605 /*
3606 * Perform the I/O.
3607 */
3608 uint32_t u32Value;
3609 switch (cbReg)
3610 {
3611 case 1: u32Value = pCtx->al; break;
3612 case 2: u32Value = pCtx->ax; break;
3613 case 4: u32Value = pCtx->eax; break;
3614 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3615 }
3616 VBOXSTRICTRC rc;
3617 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3618 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
3619 else
3620 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3621 if (IOM_SUCCESS(rc))
3622 {
3623 iemRegAddToRip(pIemCpu, cbInstr);
3624 pIemCpu->cPotentialExits++;
3625 /** @todo massage rc. */
3626 }
3627 return rc;
3628}
3629
3630
3631/**
3632 * Implements 'OUT DX, eAX'.
3633 *
3634 * @param cbReg The register size.
3635 */
3636IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3637{
3638 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3639}
3640
3641
3642/**
3643 * Implements 'CLI'.
3644 */
3645IEM_CIMPL_DEF_0(iemCImpl_cli)
3646{
3647 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3648
3649 if (pCtx->cr0 & X86_CR0_PE)
3650 {
3651 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3652 if (!pCtx->eflags.Bits.u1VM)
3653 {
3654 if (pIemCpu->uCpl <= uIopl)
3655 pCtx->eflags.Bits.u1IF = 0;
3656 else if ( pIemCpu->uCpl == 3
3657 && (pCtx->cr4 & X86_CR4_PVI) )
3658 pCtx->eflags.Bits.u1VIF = 0;
3659 else
3660 return iemRaiseGeneralProtectionFault0(pIemCpu);
3661 }
3662 /* V8086 */
3663 else if (uIopl == 3)
3664 pCtx->eflags.Bits.u1IF = 0;
3665 else if ( uIopl < 3
3666 && (pCtx->cr4 & X86_CR4_VME) )
3667 pCtx->eflags.Bits.u1VIF = 0;
3668 else
3669 return iemRaiseGeneralProtectionFault0(pIemCpu);
3670 }
3671 /* real mode */
3672 else
3673 pCtx->eflags.Bits.u1IF = 0;
3674 iemRegAddToRip(pIemCpu, cbInstr);
3675 return VINF_SUCCESS;
3676}
3677
3678
3679/**
3680 * Implements 'STI'.
3681 */
3682IEM_CIMPL_DEF_0(iemCImpl_sti)
3683{
3684 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3685
3686 if (pCtx->cr0 & X86_CR0_PE)
3687 {
3688 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3689 if (!pCtx->eflags.Bits.u1VM)
3690 {
3691 if (pIemCpu->uCpl <= uIopl)
3692 pCtx->eflags.Bits.u1IF = 1;
3693 else if ( pIemCpu->uCpl == 3
3694 && (pCtx->cr4 & X86_CR4_PVI)
3695 && !pCtx->eflags.Bits.u1VIP )
3696 pCtx->eflags.Bits.u1VIF = 1;
3697 else
3698 return iemRaiseGeneralProtectionFault0(pIemCpu);
3699 }
3700 /* V8086 */
3701 else if (uIopl == 3)
3702 pCtx->eflags.Bits.u1IF = 1;
3703 else if ( uIopl < 3
3704 && (pCtx->cr4 & X86_CR4_VME)
3705 && !pCtx->eflags.Bits.u1VIP )
3706 pCtx->eflags.Bits.u1VIF = 1;
3707 else
3708 return iemRaiseGeneralProtectionFault0(pIemCpu);
3709 }
3710 /* real mode */
3711 else
3712 pCtx->eflags.Bits.u1IF = 1;
3713
3714 iemRegAddToRip(pIemCpu, cbInstr);
3715 /** @todo don't do this unconditionally... */
3716 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3717 return VINF_SUCCESS;
3718}
3719
3720
3721/**
3722 * Implements 'HLT'.
3723 */
3724IEM_CIMPL_DEF_0(iemCImpl_hlt)
3725{
3726 if (pIemCpu->uCpl != 0)
3727 return iemRaiseGeneralProtectionFault0(pIemCpu);
3728 iemRegAddToRip(pIemCpu, cbInstr);
3729 return VINF_EM_HALT;
3730}
3731
3732
3733/**
3734 * Implements 'CPUID'.
3735 */
3736IEM_CIMPL_DEF_0(iemCImpl_cpuid)
3737{
3738 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3739
3740 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
3741 pCtx->rax &= UINT32_C(0xffffffff);
3742 pCtx->rbx &= UINT32_C(0xffffffff);
3743 pCtx->rcx &= UINT32_C(0xffffffff);
3744 pCtx->rdx &= UINT32_C(0xffffffff);
3745
3746 iemRegAddToRip(pIemCpu, cbInstr);
3747 return VINF_SUCCESS;
3748}
3749
3750
3751/**
3752 * Implements 'AAD'.
3753 *
3754 * @param enmEffOpSize The effective operand size.
3755 */
3756IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
3757{
3758 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3759
3760 uint16_t const ax = pCtx->ax;
3761 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
3762 pCtx->ax = al;
3763 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3764 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3765 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3766
3767 iemRegAddToRip(pIemCpu, cbInstr);
3768 return VINF_SUCCESS;
3769}
3770
3771
3772/**
3773 * Implements 'AAM'.
3774 *
3775 * @param bImm The immediate operand. Cannot be 0.
3776 */
3777IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
3778{
3779 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3780 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
3781
3782 uint16_t const ax = pCtx->ax;
3783 uint8_t const al = (uint8_t)ax % bImm;
3784 uint8_t const ah = (uint8_t)ax / bImm;
3785 pCtx->ax = (ah << 8) + al;
3786 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3787 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3788 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3789
3790 iemRegAddToRip(pIemCpu, cbInstr);
3791 return VINF_SUCCESS;
3792}
3793
3794
3795
3796
3797/*
3798 * Instantiate the various string operation combinations.
3799 */
3800#define OP_SIZE 8
3801#define ADDR_SIZE 16
3802#include "IEMAllCImplStrInstr.cpp.h"
3803#define OP_SIZE 8
3804#define ADDR_SIZE 32
3805#include "IEMAllCImplStrInstr.cpp.h"
3806#define OP_SIZE 8
3807#define ADDR_SIZE 64
3808#include "IEMAllCImplStrInstr.cpp.h"
3809
3810#define OP_SIZE 16
3811#define ADDR_SIZE 16
3812#include "IEMAllCImplStrInstr.cpp.h"
3813#define OP_SIZE 16
3814#define ADDR_SIZE 32
3815#include "IEMAllCImplStrInstr.cpp.h"
3816#define OP_SIZE 16
3817#define ADDR_SIZE 64
3818#include "IEMAllCImplStrInstr.cpp.h"
3819
3820#define OP_SIZE 32
3821#define ADDR_SIZE 16
3822#include "IEMAllCImplStrInstr.cpp.h"
3823#define OP_SIZE 32
3824#define ADDR_SIZE 32
3825#include "IEMAllCImplStrInstr.cpp.h"
3826#define OP_SIZE 32
3827#define ADDR_SIZE 64
3828#include "IEMAllCImplStrInstr.cpp.h"
3829
3830#define OP_SIZE 64
3831#define ADDR_SIZE 32
3832#include "IEMAllCImplStrInstr.cpp.h"
3833#define OP_SIZE 64
3834#define ADDR_SIZE 64
3835#include "IEMAllCImplStrInstr.cpp.h"
3836
3837
3838/**
3839 * Implements 'FINIT' and 'FNINIT'.
3840 *
3841 * @param fCheckXcpts Whether to check for umasked pending exceptions or
3842 * not.
3843 */
3844IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
3845{
3846 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3847
3848 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
3849 return iemRaiseDeviceNotAvailable(pIemCpu);
3850
3851 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
3852 if (fCheckXcpts && TODO )
3853 return iemRaiseMathFault(pIemCpu);
3854 */
3855
3856 if (iemFRegIsFxSaveFormat(pIemCpu))
3857 {
3858 pCtx->fpu.FCW = 0x37f;
3859 pCtx->fpu.FSW = 0;
3860 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
3861 pCtx->fpu.FPUDP = 0;
3862 pCtx->fpu.DS = 0; //??
3863 pCtx->fpu.FPUIP = 0;
3864 pCtx->fpu.CS = 0; //??
3865 pCtx->fpu.FOP = 0;
3866 }
3867 else
3868 {
3869 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
3870 pFpu->FCW = 0x37f;
3871 pFpu->FSW = 0;
3872 pFpu->FTW = 0xffff; /* 11 - empty */
3873 pFpu->FPUOO = 0; //??
3874 pFpu->FPUOS = 0; //??
3875 pFpu->FPUIP = 0;
3876 pFpu->CS = 0; //??
3877 pFpu->FOP = 0;
3878 }
3879
3880 iemRegAddToRip(pIemCpu, cbInstr);
3881 return VINF_SUCCESS;
3882}
3883
3884
3885/**
3886 * Implements 'FXSAVE'.
3887 *
3888 * @param iEffSeg The effective segment.
3889 * @param GCPtrEff The address of the image.
3890 * @param enmEffOpSize The operand size (only REX.W really matters).
3891 */
3892IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3893{
3894 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3895
3896 /*
3897 * Raise exceptions.
3898 */
3899 if (pCtx->cr0 & X86_CR0_EM)
3900 return iemRaiseUndefinedOpcode(pIemCpu);
3901 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3902 return iemRaiseDeviceNotAvailable(pIemCpu);
3903 if (GCPtrEff & 15)
3904 {
3905 /** @todo CPU/VM detection possible! \#AC might not be signal for
3906 * all/any misalignment sizes, intel says its an implementation detail. */
3907 if ( (pCtx->cr0 & X86_CR0_AM)
3908 && pCtx->eflags.Bits.u1AC
3909 && pIemCpu->uCpl == 3)
3910 return iemRaiseAlignmentCheckException(pIemCpu);
3911 return iemRaiseGeneralProtectionFault0(pIemCpu);
3912 }
3913 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3914
3915 /*
3916 * Access the memory.
3917 */
3918 void *pvMem512;
3919 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3920 if (rcStrict != VINF_SUCCESS)
3921 return rcStrict;
3922 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
3923
3924 /*
3925 * Store the registers.
3926 */
3927 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
3928 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
3929
3930 /* common for all formats */
3931 pDst->FCW = pCtx->fpu.FCW;
3932 pDst->FSW = pCtx->fpu.FSW;
3933 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
3934 pDst->FOP = pCtx->fpu.FOP;
3935 pDst->MXCSR = pCtx->fpu.MXCSR;
3936 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
3937 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
3938 {
3939 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
3940 * them for now... */
3941 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
3942 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
3943 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
3944 pDst->aRegs[i].au32[3] = 0;
3945 }
3946
3947 /* FPU IP, CS, DP and DS. */
3948 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
3949 * state information. :-/
3950 * Storing zeros now to prevent any potential leakage of host info. */
3951 pDst->FPUIP = 0;
3952 pDst->CS = 0;
3953 pDst->Rsrvd1 = 0;
3954 pDst->FPUDP = 0;
3955 pDst->DS = 0;
3956 pDst->Rsrvd2 = 0;
3957
3958 /* XMM registers. */
3959 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
3960 || pIemCpu->enmCpuMode != IEMMODE_64BIT
3961 || pIemCpu->uCpl != 0)
3962 {
3963 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
3964 for (uint32_t i = 0; i < cXmmRegs; i++)
3965 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
3966 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
3967 * right? */
3968 }
3969
3970 /*
3971 * Commit the memory.
3972 */
3973 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3974 if (rcStrict != VINF_SUCCESS)
3975 return rcStrict;
3976
3977 iemRegAddToRip(pIemCpu, cbInstr);
3978 return VINF_SUCCESS;
3979}
3980
3981
3982/**
3983 * Implements 'FXRSTOR'.
3984 *
3985 * @param GCPtrEff The address of the image.
3986 * @param enmEffOpSize The operand size (only REX.W really matters).
3987 */
3988IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3989{
3990 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3991
3992 /*
3993 * Raise exceptions.
3994 */
3995 if (pCtx->cr0 & X86_CR0_EM)
3996 return iemRaiseUndefinedOpcode(pIemCpu);
3997 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3998 return iemRaiseDeviceNotAvailable(pIemCpu);
3999 if (GCPtrEff & 15)
4000 {
4001 /** @todo CPU/VM detection possible! \#AC might not be signal for
4002 * all/any misalignment sizes, intel says its an implementation detail. */
4003 if ( (pCtx->cr0 & X86_CR0_AM)
4004 && pCtx->eflags.Bits.u1AC
4005 && pIemCpu->uCpl == 3)
4006 return iemRaiseAlignmentCheckException(pIemCpu);
4007 return iemRaiseGeneralProtectionFault0(pIemCpu);
4008 }
4009 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4010
4011 /*
4012 * Access the memory.
4013 */
4014 void *pvMem512;
4015 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
4016 if (rcStrict != VINF_SUCCESS)
4017 return rcStrict;
4018 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
4019
4020 /*
4021 * Check the state for stuff which will GP(0).
4022 */
4023 uint32_t const fMXCSR = pSrc->MXCSR;
4024 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
4025 if (fMXCSR & ~fMXCSR_MASK)
4026 {
4027 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
4028 return iemRaiseGeneralProtectionFault0(pIemCpu);
4029 }
4030
4031 /*
4032 * Load the registers.
4033 */
4034 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4035 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4036
4037 /* common for all formats */
4038 pCtx->fpu.FCW = pSrc->FCW;
4039 pCtx->fpu.FSW = pSrc->FSW;
4040 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4041 pCtx->fpu.FOP = pSrc->FOP;
4042 pCtx->fpu.MXCSR = fMXCSR;
4043 /* (MXCSR_MASK is read-only) */
4044 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4045 {
4046 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4047 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4048 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4049 pCtx->fpu.aRegs[i].au32[3] = 0;
4050 }
4051
4052 /* FPU IP, CS, DP and DS. */
4053 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4054 {
4055 pCtx->fpu.FPUIP = pSrc->FPUIP;
4056 pCtx->fpu.CS = pSrc->CS;
4057 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4058 pCtx->fpu.FPUDP = pSrc->FPUDP;
4059 pCtx->fpu.DS = pSrc->DS;
4060 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4061 }
4062 else
4063 {
4064 pCtx->fpu.FPUIP = pSrc->FPUIP;
4065 pCtx->fpu.CS = pSrc->CS;
4066 pCtx->fpu.Rsrvd1 = 0;
4067 pCtx->fpu.FPUDP = pSrc->FPUDP;
4068 pCtx->fpu.DS = pSrc->DS;
4069 pCtx->fpu.Rsrvd2 = 0;
4070 }
4071
4072 /* XMM registers. */
4073 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4074 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4075 || pIemCpu->uCpl != 0)
4076 {
4077 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4078 for (uint32_t i = 0; i < cXmmRegs; i++)
4079 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4080 }
4081
4082 /*
4083 * Commit the memory.
4084 */
4085 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4086 if (rcStrict != VINF_SUCCESS)
4087 return rcStrict;
4088
4089 iemRegAddToRip(pIemCpu, cbInstr);
4090 return VINF_SUCCESS;
4091}
4092
4093
4094/**
4095 * Commmon routine for fnstenv and fnsave.
4096 *
4097 * @param uPtr Where to store the state.
4098 * @param pCtx The CPU context.
4099 */
4100static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4101{
4102 if (enmEffOpSize == IEMMODE_16BIT)
4103 {
4104 uPtr.pu16[0] = pCtx->fpu.FCW;
4105 uPtr.pu16[1] = pCtx->fpu.FSW;
4106 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4107 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4108 {
4109 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4110 * protected mode or long mode and we save it in real mode? And vice
4111 * versa? And with 32-bit operand size? I think CPU is storing the
4112 * effective address ((CS << 4) + IP) in the offset register and not
4113 * doing any address calculations here. */
4114 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4115 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4116 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4117 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4118 }
4119 else
4120 {
4121 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4122 uPtr.pu16[4] = pCtx->fpu.CS;
4123 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4124 uPtr.pu16[6] = pCtx->fpu.DS;
4125 }
4126 }
4127 else
4128 {
4129 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4130 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4131 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4132 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4133 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4134 {
4135 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4136 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4137 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4138 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4139 }
4140 else
4141 {
4142 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4143 uPtr.pu16[4*2] = pCtx->fpu.CS;
4144 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4145 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4146 uPtr.pu16[6*2] = pCtx->fpu.DS;
4147 }
4148 }
4149}
4150
4151
4152/**
4153 * Commmon routine for fnstenv and fnsave.
4154 *
4155 * @param uPtr Where to store the state.
4156 * @param pCtx The CPU context.
4157 */
4158static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4159{
4160 if (enmEffOpSize == IEMMODE_16BIT)
4161 {
4162 pCtx->fpu.FCW = uPtr.pu16[0];
4163 pCtx->fpu.FSW = uPtr.pu16[1];
4164 pCtx->fpu.FTW = uPtr.pu16[2];
4165 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4166 {
4167 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4168 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4169 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4170 pCtx->fpu.CS = 0;
4171 pCtx->fpu.DS = 0;
4172 }
4173 else
4174 {
4175 pCtx->fpu.FPUIP = uPtr.pu16[3];
4176 pCtx->fpu.CS = uPtr.pu16[4];
4177 pCtx->fpu.FPUDP = uPtr.pu16[5];
4178 pCtx->fpu.DS = uPtr.pu16[6];
4179 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4180 }
4181 }
4182 else
4183 {
4184 pCtx->fpu.FCW = uPtr.pu16[0*2];
4185 pCtx->fpu.FSW = uPtr.pu16[1*2];
4186 pCtx->fpu.FTW = uPtr.pu16[2*2];
4187 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4188 {
4189 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4190 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4191 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4192 pCtx->fpu.CS = 0;
4193 pCtx->fpu.DS = 0;
4194 }
4195 else
4196 {
4197 pCtx->fpu.FPUIP = uPtr.pu32[3];
4198 pCtx->fpu.CS = uPtr.pu16[4*2];
4199 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4200 pCtx->fpu.FPUDP = uPtr.pu32[5];
4201 pCtx->fpu.DS = uPtr.pu16[6*2];
4202 }
4203 }
4204
4205 /* Make adjustments. */
4206 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4207 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4208 iemFpuRecalcExceptionStatus(pCtx);
4209 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4210 * exceptions are pending after loading the saved state? */
4211}
4212
4213
4214/**
4215 * Implements 'FNSTENV'.
4216 *
4217 * @param enmEffOpSize The operand size (only REX.W really matters).
4218 * @param iEffSeg The effective segment register for @a GCPtrEff.
4219 * @param GCPtrEffDst The address of the image.
4220 */
4221IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4222{
4223 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4224 RTPTRUNION uPtr;
4225 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4226 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4227 if (rcStrict != VINF_SUCCESS)
4228 return rcStrict;
4229
4230 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4231
4232 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4233 if (rcStrict != VINF_SUCCESS)
4234 return rcStrict;
4235
4236 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4237 iemRegAddToRip(pIemCpu, cbInstr);
4238 return VINF_SUCCESS;
4239}
4240
4241
4242/**
4243 * Implements 'FLDENV'.
4244 *
4245 * @param enmEffOpSize The operand size (only REX.W really matters).
4246 * @param iEffSeg The effective segment register for @a GCPtrEff.
4247 * @param GCPtrEffSrc The address of the image.
4248 */
4249IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4250{
4251 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4252 RTCPTRUNION uPtr;
4253 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4254 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4255 if (rcStrict != VINF_SUCCESS)
4256 return rcStrict;
4257
4258 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4259
4260 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4261 if (rcStrict != VINF_SUCCESS)
4262 return rcStrict;
4263
4264 iemRegAddToRip(pIemCpu, cbInstr);
4265 return VINF_SUCCESS;
4266}
4267
4268
4269/**
4270 * Implements 'FLDCW'.
4271 *
4272 * @param u16Fcw The new FCW.
4273 */
4274IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4275{
4276 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4277
4278 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4279 /** @todo Testcase: Try see what happens when trying to set undefined bits
4280 * (other than 6 and 7). Currently ignoring them. */
4281 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4282 * according to FSW. (This is was is currently implemented.) */
4283 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4284 iemFpuRecalcExceptionStatus(pCtx);
4285
4286 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4287 iemRegAddToRip(pIemCpu, cbInstr);
4288 return VINF_SUCCESS;
4289}
4290
4291
4292
4293/**
4294 * Implements the underflow case of fxch.
4295 *
4296 * @param iStReg The other stack register.
4297 */
4298IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4299{
4300 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4301
4302 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4303 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4304 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4305
4306 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4307 * registers are read as QNaN and then exchanged. This could be
4308 * wrong... */
4309 if (pCtx->fpu.FCW & X86_FCW_IM)
4310 {
4311 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4312 {
4313 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4314 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4315 else
4316 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4317 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4318 }
4319 else
4320 {
4321 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4322 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4323 }
4324 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4325 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4326 }
4327 else
4328 {
4329 /* raise underflow exception, don't change anything. */
4330 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
4331 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4332 }
4333 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4334
4335 iemRegAddToRip(pIemCpu, cbInstr);
4336 return VINF_SUCCESS;
4337}
4338
4339
4340/**
4341 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
4342 *
4343 * @param cToAdd 1 or 7.
4344 */
4345IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
4346{
4347 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4348 Assert(iStReg < 8);
4349
4350 /*
4351 * Raise exceptions.
4352 */
4353 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4354 return iemRaiseDeviceNotAvailable(pIemCpu);
4355 uint16_t u16Fsw = pCtx->fpu.FSW;
4356 if (u16Fsw & X86_FSW_ES)
4357 return iemRaiseMathFault(pIemCpu);
4358
4359 /*
4360 * Check if any of the register accesses causes #SF + #IA.
4361 */
4362 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
4363 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4364 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
4365 {
4366 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
4367 pCtx->fpu.FSW &= ~X86_FSW_C1;
4368 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
4369 if ( !(u16Fsw & X86_FSW_IE)
4370 || (pCtx->fpu.FCW & X86_FCW_IM) )
4371 {
4372 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4373 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4374 }
4375 }
4376 else if (pCtx->fpu.FCW & X86_FCW_IM)
4377 {
4378 /* Masked underflow. */
4379 pCtx->fpu.FSW &= ~X86_FSW_C1;
4380 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4381 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4382 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
4383 }
4384 else
4385 {
4386 /* Raise underflow - don't touch EFLAGS or TOP. */
4387 pCtx->fpu.FSW &= ~X86_FSW_C1;
4388 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4389 fPop = false;
4390 }
4391
4392 /*
4393 * Pop if necessary.
4394 */
4395 if (fPop)
4396 {
4397 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
4398 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
4399 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
4400 }
4401
4402 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4403 iemRegAddToRip(pIemCpu, cbInstr);
4404 return VINF_SUCCESS;
4405}
4406
4407/** @} */
4408
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette