VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 41829

Last change on this file since 41829 was 41783, checked in by vboxsync, 12 years ago

Doxygen, comment typos.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 144.6 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 41783 2012-06-16 19:24:15Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
42 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
43 }
44 return VINF_SUCCESS;
45}
46
47
48#if 0
49/**
50 * Calculates the parity bit.
51 *
52 * @returns true if the bit is set, false if not.
53 * @param u8Result The least significant byte of the result.
54 */
55static bool iemHlpCalcParityFlag(uint8_t u8Result)
56{
57 /*
58 * Parity is set if the number of bits in the least significant byte of
59 * the result is even.
60 */
61 uint8_t cBits;
62 cBits = u8Result & 1; /* 0 */
63 u8Result >>= 1;
64 cBits += u8Result & 1;
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1; /* 4 */
71 u8Result >>= 1;
72 cBits += u8Result & 1;
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 return !(cBits & 1);
78}
79#endif /* not used */
80
81
82/**
83 * Updates the specified flags according to a 8-bit result.
84 *
85 * @param pIemCpu The.
86 * @param u8Result The result to set the flags according to.
87 * @param fToUpdate The flags to update.
88 * @param fUndefined The flags that are specified as undefined.
89 */
90static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
91{
92 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
93
94 uint32_t fEFlags = pCtx->eflags.u;
95 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
96 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
97 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
98}
99
100
101/**
102 * Loads a NULL data selector into a selector register, both the hidden and
103 * visible parts, in protected mode.
104 *
105 * @param puSel The selector register.
106 * @param pHid The hidden register part.
107 */
108static void iemHlpLoadNullDataSelectorProt(PRTSEL puSel, PCPUMSELREGHID pHid)
109{
110 /** @todo Testcase: write a testcase checking what happends when loading a NULL
111 * data selector in protected mode. */
112 pHid->u64Base = 0;
113 pHid->u32Limit = 0;
114 pHid->Attr.u = 0;
115 *puSel = 0;
116}
117
118
119/**
120 * Helper used by iret.
121 *
122 * @param uCpl The new CPL.
123 * @param puSel The selector register.
124 * @param pHid The corresponding hidden register.
125 */
126static void iemHlpAdjustSelectorForNewCpl(uint8_t uCpl, PRTSEL puSel, PCPUMSELREGHID pHid)
127{
128 if ( uCpl > pHid->Attr.n.u2Dpl
129 && pHid->Attr.n.u1DescType /* code or data, not system */
130 && (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
131 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
132 iemHlpLoadNullDataSelectorProt(puSel, pHid);
133}
134
135/** @} */
136
137/** @name C Implementations
138 * @{
139 */
140
141/**
142 * Implements a 16-bit popa.
143 */
144IEM_CIMPL_DEF_0(iemCImpl_popa_16)
145{
146 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
147 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
148 RTGCPTR GCPtrLast = GCPtrStart + 15;
149 VBOXSTRICTRC rcStrict;
150
151 /*
152 * The docs are a bit hard to comprehend here, but it looks like we wrap
153 * around in real mode as long as none of the individual "popa" crosses the
154 * end of the stack segment. In protected mode we check the whole access
155 * in one go. For efficiency, only do the word-by-word thing if we're in
156 * danger of wrapping around.
157 */
158 /** @todo do popa boundary / wrap-around checks. */
159 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
160 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
161 {
162 /* word-by-word */
163 RTUINT64U TmpRsp;
164 TmpRsp.u = pCtx->rsp;
165 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
166 if (rcStrict == VINF_SUCCESS)
167 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
168 if (rcStrict == VINF_SUCCESS)
169 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
170 if (rcStrict == VINF_SUCCESS)
171 {
172 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
173 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
174 }
175 if (rcStrict == VINF_SUCCESS)
176 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
177 if (rcStrict == VINF_SUCCESS)
178 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
179 if (rcStrict == VINF_SUCCESS)
180 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
181 if (rcStrict == VINF_SUCCESS)
182 {
183 pCtx->rsp = TmpRsp.u;
184 iemRegAddToRip(pIemCpu, cbInstr);
185 }
186 }
187 else
188 {
189 uint16_t const *pa16Mem = NULL;
190 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
191 if (rcStrict == VINF_SUCCESS)
192 {
193 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
194 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
195 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
196 /* skip sp */
197 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
198 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
199 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
200 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
201 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
202 if (rcStrict == VINF_SUCCESS)
203 {
204 iemRegAddToRsp(pCtx, 16);
205 iemRegAddToRip(pIemCpu, cbInstr);
206 }
207 }
208 }
209 return rcStrict;
210}
211
212
213/**
214 * Implements a 32-bit popa.
215 */
216IEM_CIMPL_DEF_0(iemCImpl_popa_32)
217{
218 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
219 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
220 RTGCPTR GCPtrLast = GCPtrStart + 31;
221 VBOXSTRICTRC rcStrict;
222
223 /*
224 * The docs are a bit hard to comprehend here, but it looks like we wrap
225 * around in real mode as long as none of the individual "popa" crosses the
226 * end of the stack segment. In protected mode we check the whole access
227 * in one go. For efficiency, only do the word-by-word thing if we're in
228 * danger of wrapping around.
229 */
230 /** @todo do popa boundary / wrap-around checks. */
231 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
232 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
233 {
234 /* word-by-word */
235 RTUINT64U TmpRsp;
236 TmpRsp.u = pCtx->rsp;
237 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
238 if (rcStrict == VINF_SUCCESS)
239 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
240 if (rcStrict == VINF_SUCCESS)
241 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
242 if (rcStrict == VINF_SUCCESS)
243 {
244 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
245 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
246 }
247 if (rcStrict == VINF_SUCCESS)
248 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
249 if (rcStrict == VINF_SUCCESS)
250 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
251 if (rcStrict == VINF_SUCCESS)
252 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
253 if (rcStrict == VINF_SUCCESS)
254 {
255#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
256 pCtx->rdi &= UINT32_MAX;
257 pCtx->rsi &= UINT32_MAX;
258 pCtx->rbp &= UINT32_MAX;
259 pCtx->rbx &= UINT32_MAX;
260 pCtx->rdx &= UINT32_MAX;
261 pCtx->rcx &= UINT32_MAX;
262 pCtx->rax &= UINT32_MAX;
263#endif
264 pCtx->rsp = TmpRsp.u;
265 iemRegAddToRip(pIemCpu, cbInstr);
266 }
267 }
268 else
269 {
270 uint32_t const *pa32Mem;
271 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
272 if (rcStrict == VINF_SUCCESS)
273 {
274 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
275 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
276 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
277 /* skip esp */
278 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
279 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
280 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
281 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
282 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
283 if (rcStrict == VINF_SUCCESS)
284 {
285 iemRegAddToRsp(pCtx, 32);
286 iemRegAddToRip(pIemCpu, cbInstr);
287 }
288 }
289 }
290 return rcStrict;
291}
292
293
294/**
295 * Implements a 16-bit pusha.
296 */
297IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
298{
299 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
300 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
301 RTGCPTR GCPtrBottom = GCPtrTop - 15;
302 VBOXSTRICTRC rcStrict;
303
304 /*
305 * The docs are a bit hard to comprehend here, but it looks like we wrap
306 * around in real mode as long as none of the individual "pushd" crosses the
307 * end of the stack segment. In protected mode we check the whole access
308 * in one go. For efficiency, only do the word-by-word thing if we're in
309 * danger of wrapping around.
310 */
311 /** @todo do pusha boundary / wrap-around checks. */
312 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
313 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
314 {
315 /* word-by-word */
316 RTUINT64U TmpRsp;
317 TmpRsp.u = pCtx->rsp;
318 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
319 if (rcStrict == VINF_SUCCESS)
320 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
321 if (rcStrict == VINF_SUCCESS)
322 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
323 if (rcStrict == VINF_SUCCESS)
324 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
325 if (rcStrict == VINF_SUCCESS)
326 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
329 if (rcStrict == VINF_SUCCESS)
330 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
331 if (rcStrict == VINF_SUCCESS)
332 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
333 if (rcStrict == VINF_SUCCESS)
334 {
335 pCtx->rsp = TmpRsp.u;
336 iemRegAddToRip(pIemCpu, cbInstr);
337 }
338 }
339 else
340 {
341 GCPtrBottom--;
342 uint16_t *pa16Mem = NULL;
343 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
344 if (rcStrict == VINF_SUCCESS)
345 {
346 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
347 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
348 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
349 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
350 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
351 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
352 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
353 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
354 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
355 if (rcStrict == VINF_SUCCESS)
356 {
357 iemRegSubFromRsp(pCtx, 16);
358 iemRegAddToRip(pIemCpu, cbInstr);
359 }
360 }
361 }
362 return rcStrict;
363}
364
365
366/**
367 * Implements a 32-bit pusha.
368 */
369IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
370{
371 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
372 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
373 RTGCPTR GCPtrBottom = GCPtrTop - 31;
374 VBOXSTRICTRC rcStrict;
375
376 /*
377 * The docs are a bit hard to comprehend here, but it looks like we wrap
378 * around in real mode as long as none of the individual "pusha" crosses the
379 * end of the stack segment. In protected mode we check the whole access
380 * in one go. For efficiency, only do the word-by-word thing if we're in
381 * danger of wrapping around.
382 */
383 /** @todo do pusha boundary / wrap-around checks. */
384 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
385 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
386 {
387 /* word-by-word */
388 RTUINT64U TmpRsp;
389 TmpRsp.u = pCtx->rsp;
390 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
391 if (rcStrict == VINF_SUCCESS)
392 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
393 if (rcStrict == VINF_SUCCESS)
394 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
395 if (rcStrict == VINF_SUCCESS)
396 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
397 if (rcStrict == VINF_SUCCESS)
398 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
399 if (rcStrict == VINF_SUCCESS)
400 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
401 if (rcStrict == VINF_SUCCESS)
402 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
403 if (rcStrict == VINF_SUCCESS)
404 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
405 if (rcStrict == VINF_SUCCESS)
406 {
407 pCtx->rsp = TmpRsp.u;
408 iemRegAddToRip(pIemCpu, cbInstr);
409 }
410 }
411 else
412 {
413 GCPtrBottom--;
414 uint32_t *pa32Mem;
415 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
416 if (rcStrict == VINF_SUCCESS)
417 {
418 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
419 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
420 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
421 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
422 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
423 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
424 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
425 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
426 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
427 if (rcStrict == VINF_SUCCESS)
428 {
429 iemRegSubFromRsp(pCtx, 32);
430 iemRegAddToRip(pIemCpu, cbInstr);
431 }
432 }
433 }
434 return rcStrict;
435}
436
437
438/**
439 * Implements pushf.
440 *
441 *
442 * @param enmEffOpSize The effective operand size.
443 */
444IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
445{
446 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
447
448 /*
449 * If we're in V8086 mode some care is required (which is why we're in
450 * doing this in a C implementation).
451 */
452 uint32_t fEfl = pCtx->eflags.u;
453 if ( (fEfl & X86_EFL_VM)
454 && X86_EFL_GET_IOPL(fEfl) != 3 )
455 {
456 Assert(pCtx->cr0 & X86_CR0_PE);
457 if ( enmEffOpSize != IEMMODE_16BIT
458 || !(pCtx->cr4 & X86_CR4_VME))
459 return iemRaiseGeneralProtectionFault0(pIemCpu);
460 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
461 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
462 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
463 }
464
465 /*
466 * Ok, clear RF and VM and push the flags.
467 */
468 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
469
470 VBOXSTRICTRC rcStrict;
471 switch (enmEffOpSize)
472 {
473 case IEMMODE_16BIT:
474 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
475 break;
476 case IEMMODE_32BIT:
477 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
478 break;
479 case IEMMODE_64BIT:
480 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
481 break;
482 IEM_NOT_REACHED_DEFAULT_CASE_RET();
483 }
484 if (rcStrict != VINF_SUCCESS)
485 return rcStrict;
486
487 iemRegAddToRip(pIemCpu, cbInstr);
488 return VINF_SUCCESS;
489}
490
491
492/**
493 * Implements popf.
494 *
495 * @param enmEffOpSize The effective operand size.
496 */
497IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
498{
499 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
500 uint32_t const fEflOld = pCtx->eflags.u;
501 VBOXSTRICTRC rcStrict;
502 uint32_t fEflNew;
503
504 /*
505 * V8086 is special as usual.
506 */
507 if (fEflOld & X86_EFL_VM)
508 {
509 /*
510 * Almost anything goes if IOPL is 3.
511 */
512 if (X86_EFL_GET_IOPL(fEflOld) == 3)
513 {
514 switch (enmEffOpSize)
515 {
516 case IEMMODE_16BIT:
517 {
518 uint16_t u16Value;
519 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
520 if (rcStrict != VINF_SUCCESS)
521 return rcStrict;
522 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
523 break;
524 }
525 case IEMMODE_32BIT:
526 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
527 if (rcStrict != VINF_SUCCESS)
528 return rcStrict;
529 break;
530 IEM_NOT_REACHED_DEFAULT_CASE_RET();
531 }
532
533 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
534 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
535 }
536 /*
537 * Interrupt flag virtualization with CR4.VME=1.
538 */
539 else if ( enmEffOpSize == IEMMODE_16BIT
540 && (pCtx->cr4 & X86_CR4_VME) )
541 {
542 uint16_t u16Value;
543 RTUINT64U TmpRsp;
544 TmpRsp.u = pCtx->rsp;
545 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
546 if (rcStrict != VINF_SUCCESS)
547 return rcStrict;
548
549 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
550 * or before? */
551 if ( ( (u16Value & X86_EFL_IF)
552 && (fEflOld & X86_EFL_VIP))
553 || (u16Value & X86_EFL_TF) )
554 return iemRaiseGeneralProtectionFault0(pIemCpu);
555
556 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
557 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
558 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
559 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
560
561 pCtx->rsp = TmpRsp.u;
562 }
563 else
564 return iemRaiseGeneralProtectionFault0(pIemCpu);
565
566 }
567 /*
568 * Not in V8086 mode.
569 */
570 else
571 {
572 /* Pop the flags. */
573 switch (enmEffOpSize)
574 {
575 case IEMMODE_16BIT:
576 {
577 uint16_t u16Value;
578 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
579 if (rcStrict != VINF_SUCCESS)
580 return rcStrict;
581 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
582 break;
583 }
584 case IEMMODE_32BIT:
585 case IEMMODE_64BIT:
586 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
587 if (rcStrict != VINF_SUCCESS)
588 return rcStrict;
589 break;
590 IEM_NOT_REACHED_DEFAULT_CASE_RET();
591 }
592
593 /* Merge them with the current flags. */
594 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
595 || pIemCpu->uCpl == 0)
596 {
597 fEflNew &= X86_EFL_POPF_BITS;
598 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
599 }
600 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
601 {
602 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
603 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
604 }
605 else
606 {
607 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
608 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
609 }
610 }
611
612 /*
613 * Commit the flags.
614 */
615 Assert(fEflNew & RT_BIT_32(1));
616 pCtx->eflags.u = fEflNew;
617 iemRegAddToRip(pIemCpu, cbInstr);
618
619 return VINF_SUCCESS;
620}
621
622
623/**
624 * Implements an indirect call.
625 *
626 * @param uNewPC The new program counter (RIP) value (loaded from the
627 * operand).
628 * @param enmEffOpSize The effective operand size.
629 */
630IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
631{
632 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
633 uint16_t uOldPC = pCtx->ip + cbInstr;
634 if (uNewPC > pCtx->csHid.u32Limit)
635 return iemRaiseGeneralProtectionFault0(pIemCpu);
636
637 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
638 if (rcStrict != VINF_SUCCESS)
639 return rcStrict;
640
641 pCtx->rip = uNewPC;
642 return VINF_SUCCESS;
643
644}
645
646
647/**
648 * Implements a 16-bit relative call.
649 *
650 * @param offDisp The displacment offset.
651 */
652IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
653{
654 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
655 uint16_t uOldPC = pCtx->ip + cbInstr;
656 uint16_t uNewPC = uOldPC + offDisp;
657 if (uNewPC > pCtx->csHid.u32Limit)
658 return iemRaiseGeneralProtectionFault0(pIemCpu);
659
660 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
661 if (rcStrict != VINF_SUCCESS)
662 return rcStrict;
663
664 pCtx->rip = uNewPC;
665 return VINF_SUCCESS;
666}
667
668
669/**
670 * Implements a 32-bit indirect call.
671 *
672 * @param uNewPC The new program counter (RIP) value (loaded from the
673 * operand).
674 * @param enmEffOpSize The effective operand size.
675 */
676IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
677{
678 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
679 uint32_t uOldPC = pCtx->eip + cbInstr;
680 if (uNewPC > pCtx->csHid.u32Limit)
681 return iemRaiseGeneralProtectionFault0(pIemCpu);
682
683 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
684 if (rcStrict != VINF_SUCCESS)
685 return rcStrict;
686
687 pCtx->rip = uNewPC;
688 return VINF_SUCCESS;
689
690}
691
692
693/**
694 * Implements a 32-bit relative call.
695 *
696 * @param offDisp The displacment offset.
697 */
698IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
699{
700 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
701 uint32_t uOldPC = pCtx->eip + cbInstr;
702 uint32_t uNewPC = uOldPC + offDisp;
703 if (uNewPC > pCtx->csHid.u32Limit)
704 return iemRaiseGeneralProtectionFault0(pIemCpu);
705
706 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
707 if (rcStrict != VINF_SUCCESS)
708 return rcStrict;
709
710 pCtx->rip = uNewPC;
711 return VINF_SUCCESS;
712}
713
714
715/**
716 * Implements a 64-bit indirect call.
717 *
718 * @param uNewPC The new program counter (RIP) value (loaded from the
719 * operand).
720 * @param enmEffOpSize The effective operand size.
721 */
722IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
723{
724 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
725 uint64_t uOldPC = pCtx->rip + cbInstr;
726 if (!IEM_IS_CANONICAL(uNewPC))
727 return iemRaiseGeneralProtectionFault0(pIemCpu);
728
729 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
730 if (rcStrict != VINF_SUCCESS)
731 return rcStrict;
732
733 pCtx->rip = uNewPC;
734 return VINF_SUCCESS;
735
736}
737
738
739/**
740 * Implements a 64-bit relative call.
741 *
742 * @param offDisp The displacment offset.
743 */
744IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
745{
746 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
747 uint64_t uOldPC = pCtx->rip + cbInstr;
748 uint64_t uNewPC = uOldPC + offDisp;
749 if (!IEM_IS_CANONICAL(uNewPC))
750 return iemRaiseNotCanonical(pIemCpu);
751
752 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
753 if (rcStrict != VINF_SUCCESS)
754 return rcStrict;
755
756 pCtx->rip = uNewPC;
757 return VINF_SUCCESS;
758}
759
760
761/**
762 * Implements far jumps and calls thru task segments (TSS).
763 *
764 * @param uSel The selector.
765 * @param enmBranch The kind of branching we're performing.
766 * @param enmEffOpSize The effective operand size.
767 * @param pDesc The descriptor corrsponding to @a uSel. The type is
768 * call gate.
769 */
770IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
771{
772 /* Call various functions to do the work. */
773 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
774}
775
776
777/**
778 * Implements far jumps and calls thru task gates.
779 *
780 * @param uSel The selector.
781 * @param enmBranch The kind of branching we're performing.
782 * @param enmEffOpSize The effective operand size.
783 * @param pDesc The descriptor corrsponding to @a uSel. The type is
784 * call gate.
785 */
786IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
787{
788 /* Call various functions to do the work. */
789 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
790}
791
792
793/**
794 * Implements far jumps and calls thru call gates.
795 *
796 * @param uSel The selector.
797 * @param enmBranch The kind of branching we're performing.
798 * @param enmEffOpSize The effective operand size.
799 * @param pDesc The descriptor corrsponding to @a uSel. The type is
800 * call gate.
801 */
802IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
803{
804 /* Call various functions to do the work. */
805 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
806}
807
808
809/**
810 * Implements far jumps and calls thru system selectors.
811 *
812 * @param uSel The selector.
813 * @param enmBranch The kind of branching we're performing.
814 * @param enmEffOpSize The effective operand size.
815 * @param pDesc The descriptor corrsponding to @a uSel.
816 */
817IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
818{
819 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
820 Assert((uSel & (X86_SEL_MASK | X86_SEL_LDT)));
821
822 if (IEM_IS_LONG_MODE(pIemCpu))
823 switch (pDesc->Legacy.Gen.u4Type)
824 {
825 case AMD64_SEL_TYPE_SYS_CALL_GATE:
826 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
827
828 default:
829 case AMD64_SEL_TYPE_SYS_LDT:
830 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
831 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
832 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
833 case AMD64_SEL_TYPE_SYS_INT_GATE:
834 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
835 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
836
837 }
838
839 switch (pDesc->Legacy.Gen.u4Type)
840 {
841 case X86_SEL_TYPE_SYS_286_CALL_GATE:
842 case X86_SEL_TYPE_SYS_386_CALL_GATE:
843 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
844
845 case X86_SEL_TYPE_SYS_TASK_GATE:
846 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
847
848 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
849 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
850 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
851
852 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
853 Log(("branch %04x -> busy 286 TSS\n", uSel));
854 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
855
856 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
857 Log(("branch %04x -> busy 386 TSS\n", uSel));
858 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
859
860 default:
861 case X86_SEL_TYPE_SYS_LDT:
862 case X86_SEL_TYPE_SYS_286_INT_GATE:
863 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
864 case X86_SEL_TYPE_SYS_386_INT_GATE:
865 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
866 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
867 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
868 }
869}
870
871
872/**
873 * Implements far jumps.
874 *
875 * @param uSel The selector.
876 * @param offSeg The segment offset.
877 * @param enmEffOpSize The effective operand size.
878 */
879IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
880{
881 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
882 NOREF(cbInstr);
883 Assert(offSeg <= UINT32_MAX);
884
885 /*
886 * Real mode and V8086 mode are easy. The only snag seems to be that
887 * CS.limit doesn't change and the limit check is done against the current
888 * limit.
889 */
890 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
891 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
892 {
893 if (offSeg > pCtx->csHid.u32Limit)
894 return iemRaiseGeneralProtectionFault0(pIemCpu);
895
896 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
897 pCtx->rip = offSeg;
898 else
899 pCtx->rip = offSeg & UINT16_MAX;
900 pCtx->cs = uSel;
901 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
902 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
903 * PE. Check with VT-x and AMD-V. */
904#ifdef IEM_VERIFICATION_MODE
905 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
906#endif
907 return VINF_SUCCESS;
908 }
909
910 /*
911 * Protected mode. Need to parse the specified descriptor...
912 */
913 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
914 {
915 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
916 return iemRaiseGeneralProtectionFault0(pIemCpu);
917 }
918
919 /* Fetch the descriptor. */
920 IEMSELDESC Desc;
921 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
922 if (rcStrict != VINF_SUCCESS)
923 return rcStrict;
924
925 /* Is it there? */
926 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
927 {
928 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
929 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
930 }
931
932 /*
933 * Deal with it according to its type. We do the standard code selectors
934 * here and dispatch the system selectors to worker functions.
935 */
936 if (!Desc.Legacy.Gen.u1DescType)
937 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
938
939 /* Only code segments. */
940 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
941 {
942 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
943 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
944 }
945
946 /* L vs D. */
947 if ( Desc.Legacy.Gen.u1Long
948 && Desc.Legacy.Gen.u1DefBig
949 && IEM_IS_LONG_MODE(pIemCpu))
950 {
951 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
952 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
953 }
954
955 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
956 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
957 {
958 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
959 {
960 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
961 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
962 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
963 }
964 }
965 else
966 {
967 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
968 {
969 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
970 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
971 }
972 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
973 {
974 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
975 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
976 }
977 }
978
979 /* Chop the high bits if 16-bit (Intel says so). */
980 if (enmEffOpSize == IEMMODE_16BIT)
981 offSeg &= UINT16_MAX;
982
983 /* Limit check. (Should alternatively check for non-canonical addresses
984 here, but that is ruled out by offSeg being 32-bit, right?) */
985 uint64_t u64Base;
986 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
987 if (Desc.Legacy.Gen.u1Granularity)
988 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
989 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
990 u64Base = 0;
991 else
992 {
993 if (offSeg > cbLimit)
994 {
995 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
996 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
997 }
998 u64Base = X86DESC_BASE(Desc.Legacy);
999 }
1000
1001 /*
1002 * Ok, everything checked out fine. Now set the accessed bit before
1003 * committing the result into CS, CSHID and RIP.
1004 */
1005 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1006 {
1007 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1008 if (rcStrict != VINF_SUCCESS)
1009 return rcStrict;
1010#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1011 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1012#endif
1013 }
1014
1015 /* commit */
1016 pCtx->rip = offSeg;
1017 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1018 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1019 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1020 pCtx->csHid.u32Limit = cbLimit;
1021 pCtx->csHid.u64Base = u64Base;
1022 /** @todo check if the hidden bits are loaded correctly for 64-bit
1023 * mode. */
1024 return VINF_SUCCESS;
1025}
1026
1027
1028/**
1029 * Implements far calls.
1030 *
1031 * This very similar to iemCImpl_FarJmp.
1032 *
1033 * @param uSel The selector.
1034 * @param offSeg The segment offset.
1035 * @param enmEffOpSize The operand size (in case we need it).
1036 */
1037IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1038{
1039 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1040 VBOXSTRICTRC rcStrict;
1041 uint64_t uNewRsp;
1042 RTPTRUNION uPtrRet;
1043
1044 /*
1045 * Real mode and V8086 mode are easy. The only snag seems to be that
1046 * CS.limit doesn't change and the limit check is done against the current
1047 * limit.
1048 */
1049 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1050 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1051 {
1052 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1053
1054 /* Check stack first - may #SS(0). */
1055 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1056 &uPtrRet.pv, &uNewRsp);
1057 if (rcStrict != VINF_SUCCESS)
1058 return rcStrict;
1059
1060 /* Check the target address range. */
1061 if (offSeg > UINT32_MAX)
1062 return iemRaiseGeneralProtectionFault0(pIemCpu);
1063
1064 /* Everything is fine, push the return address. */
1065 if (enmEffOpSize == IEMMODE_16BIT)
1066 {
1067 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1068 uPtrRet.pu16[1] = pCtx->cs;
1069 }
1070 else
1071 {
1072 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1073 uPtrRet.pu16[3] = pCtx->cs;
1074 }
1075 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1076 if (rcStrict != VINF_SUCCESS)
1077 return rcStrict;
1078
1079 /* Branch. */
1080 pCtx->rip = offSeg;
1081 pCtx->cs = uSel;
1082 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
1083 /** @todo Does REM reset the accessed bit here too? (See on jmp far16
1084 * after disabling PE.) Check with VT-x and AMD-V. */
1085#ifdef IEM_VERIFICATION_MODE
1086 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
1087#endif
1088 return VINF_SUCCESS;
1089 }
1090
1091 /*
1092 * Protected mode. Need to parse the specified descriptor...
1093 */
1094 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
1095 {
1096 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1097 return iemRaiseGeneralProtectionFault0(pIemCpu);
1098 }
1099
1100 /* Fetch the descriptor. */
1101 IEMSELDESC Desc;
1102 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1103 if (rcStrict != VINF_SUCCESS)
1104 return rcStrict;
1105
1106 /*
1107 * Deal with it according to its type. We do the standard code selectors
1108 * here and dispatch the system selectors to worker functions.
1109 */
1110 if (!Desc.Legacy.Gen.u1DescType)
1111 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1112
1113 /* Only code segments. */
1114 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1115 {
1116 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1117 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1118 }
1119
1120 /* L vs D. */
1121 if ( Desc.Legacy.Gen.u1Long
1122 && Desc.Legacy.Gen.u1DefBig
1123 && IEM_IS_LONG_MODE(pIemCpu))
1124 {
1125 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1126 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1127 }
1128
1129 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1130 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1131 {
1132 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1133 {
1134 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1135 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1136 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1137 }
1138 }
1139 else
1140 {
1141 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1142 {
1143 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1144 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1145 }
1146 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1147 {
1148 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1149 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1150 }
1151 }
1152
1153 /* Is it there? */
1154 if (!Desc.Legacy.Gen.u1Present)
1155 {
1156 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1157 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1158 }
1159
1160 /* Check stack first - may #SS(0). */
1161 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1162 * 16-bit code cause a two or four byte CS to be pushed? */
1163 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1164 enmEffOpSize == IEMMODE_64BIT ? 8+8
1165 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1166 &uPtrRet.pv, &uNewRsp);
1167 if (rcStrict != VINF_SUCCESS)
1168 return rcStrict;
1169
1170 /* Chop the high bits if 16-bit (Intel says so). */
1171 if (enmEffOpSize == IEMMODE_16BIT)
1172 offSeg &= UINT16_MAX;
1173
1174 /* Limit / canonical check. */
1175 uint64_t u64Base;
1176 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
1177 if (Desc.Legacy.Gen.u1Granularity)
1178 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1179
1180 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1181 {
1182 if (!IEM_IS_CANONICAL(offSeg))
1183 {
1184 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1185 return iemRaiseNotCanonical(pIemCpu);
1186 }
1187 u64Base = 0;
1188 }
1189 else
1190 {
1191 if (offSeg > cbLimit)
1192 {
1193 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1194 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1195 }
1196 u64Base = X86DESC_BASE(Desc.Legacy);
1197 }
1198
1199 /*
1200 * Now set the accessed bit before
1201 * writing the return address to the stack and committing the result into
1202 * CS, CSHID and RIP.
1203 */
1204 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1205 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1206 {
1207 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1208 if (rcStrict != VINF_SUCCESS)
1209 return rcStrict;
1210#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1211 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1212#endif
1213 }
1214
1215 /* stack */
1216 if (enmEffOpSize == IEMMODE_16BIT)
1217 {
1218 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1219 uPtrRet.pu16[1] = pCtx->cs;
1220 }
1221 else if (enmEffOpSize == IEMMODE_32BIT)
1222 {
1223 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1224 uPtrRet.pu32[1] = pCtx->cs; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1225 }
1226 else
1227 {
1228 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1229 uPtrRet.pu64[1] = pCtx->cs; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1230 }
1231 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1232 if (rcStrict != VINF_SUCCESS)
1233 return rcStrict;
1234
1235 /* commit */
1236 pCtx->rip = offSeg;
1237 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1238 pCtx->cs |= pIemCpu->uCpl;
1239 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1240 pCtx->csHid.u32Limit = cbLimit;
1241 pCtx->csHid.u64Base = u64Base;
1242 /** @todo check if the hidden bits are loaded correctly for 64-bit
1243 * mode. */
1244 return VINF_SUCCESS;
1245}
1246
1247
1248/**
1249 * Implements retf.
1250 *
1251 * @param enmEffOpSize The effective operand size.
1252 * @param cbPop The amount of arguments to pop from the stack
1253 * (bytes).
1254 */
1255IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1256{
1257 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1258 VBOXSTRICTRC rcStrict;
1259 RTCPTRUNION uPtrFrame;
1260 uint64_t uNewRsp;
1261 uint64_t uNewRip;
1262 uint16_t uNewCs;
1263 NOREF(cbInstr);
1264
1265 /*
1266 * Read the stack values first.
1267 */
1268 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1269 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1270 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1271 if (rcStrict != VINF_SUCCESS)
1272 return rcStrict;
1273 if (enmEffOpSize == IEMMODE_16BIT)
1274 {
1275 uNewRip = uPtrFrame.pu16[0];
1276 uNewCs = uPtrFrame.pu16[1];
1277 }
1278 else if (enmEffOpSize == IEMMODE_32BIT)
1279 {
1280 uNewRip = uPtrFrame.pu32[0];
1281 uNewCs = uPtrFrame.pu16[2];
1282 }
1283 else
1284 {
1285 uNewRip = uPtrFrame.pu64[0];
1286 uNewCs = uPtrFrame.pu16[4];
1287 }
1288
1289 /*
1290 * Real mode and V8086 mode are easy.
1291 */
1292 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1293 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1294 {
1295 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1296 /** @todo check how this is supposed to work if sp=0xfffe. */
1297
1298 /* Check the limit of the new EIP. */
1299 /** @todo Intel pseudo code only does the limit check for 16-bit
1300 * operands, AMD does not make any distinction. What is right? */
1301 if (uNewRip > pCtx->csHid.u32Limit)
1302 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1303
1304 /* commit the operation. */
1305 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1306 if (rcStrict != VINF_SUCCESS)
1307 return rcStrict;
1308 pCtx->rip = uNewRip;
1309 pCtx->cs = uNewCs;
1310 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
1311 /** @todo do we load attribs and limit as well? */
1312 if (cbPop)
1313 iemRegAddToRsp(pCtx, cbPop);
1314 return VINF_SUCCESS;
1315 }
1316
1317 /*
1318 * Protected mode is complicated, of course.
1319 */
1320 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1321 {
1322 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1323 return iemRaiseGeneralProtectionFault0(pIemCpu);
1324 }
1325
1326 /* Fetch the descriptor. */
1327 IEMSELDESC DescCs;
1328 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1329 if (rcStrict != VINF_SUCCESS)
1330 return rcStrict;
1331
1332 /* Can only return to a code selector. */
1333 if ( !DescCs.Legacy.Gen.u1DescType
1334 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1335 {
1336 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1337 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1338 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1339 }
1340
1341 /* L vs D. */
1342 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1343 && DescCs.Legacy.Gen.u1DefBig
1344 && IEM_IS_LONG_MODE(pIemCpu))
1345 {
1346 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1347 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1348 }
1349
1350 /* DPL/RPL/CPL checks. */
1351 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1352 {
1353 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1354 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1355 }
1356
1357 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1358 {
1359 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1360 {
1361 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1362 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1363 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1364 }
1365 }
1366 else
1367 {
1368 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1369 {
1370 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1371 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1372 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1373 }
1374 }
1375
1376 /* Is it there? */
1377 if (!DescCs.Legacy.Gen.u1Present)
1378 {
1379 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1380 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1381 }
1382
1383 /*
1384 * Return to outer privilege? (We'll typically have entered via a call gate.)
1385 */
1386 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1387 {
1388 /* Read the return pointer, it comes before the parameters. */
1389 RTCPTRUNION uPtrStack;
1390 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1391 if (rcStrict != VINF_SUCCESS)
1392 return rcStrict;
1393 uint16_t uNewOuterSs;
1394 uint64_t uNewOuterRsp;
1395 if (enmEffOpSize == IEMMODE_16BIT)
1396 {
1397 uNewOuterRsp = uPtrFrame.pu16[0];
1398 uNewOuterSs = uPtrFrame.pu16[1];
1399 }
1400 else if (enmEffOpSize == IEMMODE_32BIT)
1401 {
1402 uNewOuterRsp = uPtrFrame.pu32[0];
1403 uNewOuterSs = uPtrFrame.pu16[2];
1404 }
1405 else
1406 {
1407 uNewOuterRsp = uPtrFrame.pu64[0];
1408 uNewOuterSs = uPtrFrame.pu16[4];
1409 }
1410
1411 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1412 and read the selector. */
1413 IEMSELDESC DescSs;
1414 if (!(uNewOuterSs & (X86_SEL_MASK | X86_SEL_LDT)))
1415 {
1416 if ( !DescCs.Legacy.Gen.u1Long
1417 || (uNewOuterSs & X86_SEL_RPL) == 3)
1418 {
1419 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1420 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1421 return iemRaiseGeneralProtectionFault0(pIemCpu);
1422 }
1423 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1424 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1425 }
1426 else
1427 {
1428 /* Fetch the descriptor for the new stack segment. */
1429 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1430 if (rcStrict != VINF_SUCCESS)
1431 return rcStrict;
1432 }
1433
1434 /* Check that RPL of stack and code selectors match. */
1435 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1436 {
1437 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1438 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1439 }
1440
1441 /* Must be a writable data segment. */
1442 if ( !DescSs.Legacy.Gen.u1DescType
1443 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1444 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1445 {
1446 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1447 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1448 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1449 }
1450
1451 /* L vs D. (Not mentioned by intel.) */
1452 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1453 && DescSs.Legacy.Gen.u1DefBig
1454 && IEM_IS_LONG_MODE(pIemCpu))
1455 {
1456 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1457 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1458 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1459 }
1460
1461 /* DPL/RPL/CPL checks. */
1462 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1463 {
1464 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1465 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1466 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1467 }
1468
1469 /* Is it there? */
1470 if (!DescSs.Legacy.Gen.u1Present)
1471 {
1472 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1473 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1474 }
1475
1476 /* Calc SS limit.*/
1477 uint32_t cbLimitSs = X86DESC_LIMIT(DescSs.Legacy);
1478 if (DescSs.Legacy.Gen.u1Granularity)
1479 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1480
1481
1482 /* Is RIP canonical or within CS.limit? */
1483 uint64_t u64Base;
1484 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
1485 if (DescCs.Legacy.Gen.u1Granularity)
1486 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1487
1488 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1489 {
1490 if (!IEM_IS_CANONICAL(uNewRip))
1491 {
1492 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1493 return iemRaiseNotCanonical(pIemCpu);
1494 }
1495 u64Base = 0;
1496 }
1497 else
1498 {
1499 if (uNewRip > cbLimitCs)
1500 {
1501 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1502 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1503 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1504 }
1505 u64Base = X86DESC_BASE(DescCs.Legacy);
1506 }
1507
1508 /*
1509 * Now set the accessed bit before
1510 * writing the return address to the stack and committing the result into
1511 * CS, CSHID and RIP.
1512 */
1513 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1514 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1515 {
1516 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1517 if (rcStrict != VINF_SUCCESS)
1518 return rcStrict;
1519#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1520 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1521#endif
1522 }
1523 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1524 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1525 {
1526 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1527 if (rcStrict != VINF_SUCCESS)
1528 return rcStrict;
1529#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1530 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1531#endif
1532 }
1533
1534 /* commit */
1535 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1536 if (rcStrict != VINF_SUCCESS)
1537 return rcStrict;
1538 if (enmEffOpSize == IEMMODE_16BIT)
1539 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1540 else
1541 pCtx->rip = uNewRip;
1542 pCtx->cs = uNewCs;
1543 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy);
1544 pCtx->csHid.u32Limit = cbLimitCs;
1545 pCtx->csHid.u64Base = u64Base;
1546 pCtx->rsp = uNewRsp;
1547 pCtx->ss = uNewCs;
1548 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSs.Legacy);
1549 pCtx->ssHid.u32Limit = cbLimitSs;
1550 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1551 pCtx->ssHid.u64Base = 0;
1552 else
1553 pCtx->ssHid.u64Base = X86DESC_BASE(DescSs.Legacy);
1554
1555 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1556 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid);
1557 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es, &pCtx->esHid);
1558 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid);
1559 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid);
1560
1561 /** @todo check if the hidden bits are loaded correctly for 64-bit
1562 * mode. */
1563
1564 if (cbPop)
1565 iemRegAddToRsp(pCtx, cbPop);
1566
1567 /* Done! */
1568 }
1569 /*
1570 * Return to the same privilege level
1571 */
1572 else
1573 {
1574 /* Limit / canonical check. */
1575 uint64_t u64Base;
1576 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
1577 if (DescCs.Legacy.Gen.u1Granularity)
1578 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1579
1580 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1581 {
1582 if (!IEM_IS_CANONICAL(uNewRip))
1583 {
1584 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1585 return iemRaiseNotCanonical(pIemCpu);
1586 }
1587 u64Base = 0;
1588 }
1589 else
1590 {
1591 if (uNewRip > cbLimitCs)
1592 {
1593 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1594 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1595 }
1596 u64Base = X86DESC_BASE(DescCs.Legacy);
1597 }
1598
1599 /*
1600 * Now set the accessed bit before
1601 * writing the return address to the stack and committing the result into
1602 * CS, CSHID and RIP.
1603 */
1604 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1605 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1606 {
1607 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1608 if (rcStrict != VINF_SUCCESS)
1609 return rcStrict;
1610#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1611 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1612#endif
1613 }
1614
1615 /* commit */
1616 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1617 if (rcStrict != VINF_SUCCESS)
1618 return rcStrict;
1619 if (enmEffOpSize == IEMMODE_16BIT)
1620 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1621 else
1622 pCtx->rip = uNewRip;
1623 pCtx->cs = uNewCs;
1624 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy);
1625 pCtx->csHid.u32Limit = cbLimitCs;
1626 pCtx->csHid.u64Base = u64Base;
1627 /** @todo check if the hidden bits are loaded correctly for 64-bit
1628 * mode. */
1629 if (cbPop)
1630 iemRegAddToRsp(pCtx, cbPop);
1631 }
1632 return VINF_SUCCESS;
1633}
1634
1635
1636/**
1637 * Implements retn.
1638 *
1639 * We're doing this in C because of the \#GP that might be raised if the popped
1640 * program counter is out of bounds.
1641 *
1642 * @param enmEffOpSize The effective operand size.
1643 * @param cbPop The amount of arguments to pop from the stack
1644 * (bytes).
1645 */
1646IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1647{
1648 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1649 NOREF(cbInstr);
1650
1651 /* Fetch the RSP from the stack. */
1652 VBOXSTRICTRC rcStrict;
1653 RTUINT64U NewRip;
1654 RTUINT64U NewRsp;
1655 NewRsp.u = pCtx->rsp;
1656 switch (enmEffOpSize)
1657 {
1658 case IEMMODE_16BIT:
1659 NewRip.u = 0;
1660 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1661 break;
1662 case IEMMODE_32BIT:
1663 NewRip.u = 0;
1664 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1665 break;
1666 case IEMMODE_64BIT:
1667 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1668 break;
1669 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1670 }
1671 if (rcStrict != VINF_SUCCESS)
1672 return rcStrict;
1673
1674 /* Check the new RSP before loading it. */
1675 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1676 * of it. The canonical test is performed here and for call. */
1677 if (enmEffOpSize != IEMMODE_64BIT)
1678 {
1679 if (NewRip.DWords.dw0 > pCtx->csHid.u32Limit)
1680 {
1681 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->csHid.u32Limit));
1682 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1683 }
1684 }
1685 else
1686 {
1687 if (!IEM_IS_CANONICAL(NewRip.u))
1688 {
1689 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1690 return iemRaiseNotCanonical(pIemCpu);
1691 }
1692 }
1693
1694 /* Commit it. */
1695 pCtx->rip = NewRip.u;
1696 pCtx->rsp = NewRsp.u;
1697 if (cbPop)
1698 iemRegAddToRsp(pCtx, cbPop);
1699
1700 return VINF_SUCCESS;
1701}
1702
1703
1704/**
1705 * Implements leave.
1706 *
1707 * We're doing this in C because messing with the stack registers is annoying
1708 * since they depends on SS attributes.
1709 *
1710 * @param enmEffOpSize The effective operand size.
1711 */
1712IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1713{
1714 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1715
1716 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1717 RTUINT64U NewRsp;
1718 if (pCtx->ssHid.Attr.n.u1Long)
1719 {
1720 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1721 NewRsp.u = pCtx->rsp;
1722 NewRsp.Words.w0 = pCtx->bp;
1723 }
1724 else if (pCtx->ssHid.Attr.n.u1DefBig)
1725 NewRsp.u = pCtx->ebp;
1726 else
1727 NewRsp.u = pCtx->rbp;
1728
1729 /* Pop RBP according to the operand size. */
1730 VBOXSTRICTRC rcStrict;
1731 RTUINT64U NewRbp;
1732 switch (enmEffOpSize)
1733 {
1734 case IEMMODE_16BIT:
1735 NewRbp.u = pCtx->rbp;
1736 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1737 break;
1738 case IEMMODE_32BIT:
1739 NewRbp.u = 0;
1740 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1741 break;
1742 case IEMMODE_64BIT:
1743 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1744 break;
1745 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1746 }
1747 if (rcStrict != VINF_SUCCESS)
1748 return rcStrict;
1749
1750
1751 /* Commit it. */
1752 pCtx->rbp = NewRbp.u;
1753 pCtx->rsp = NewRsp.u;
1754 iemRegAddToRip(pIemCpu, cbInstr);
1755
1756 return VINF_SUCCESS;
1757}
1758
1759
1760/**
1761 * Implements int3 and int XX.
1762 *
1763 * @param u8Int The interrupt vector number.
1764 * @param fIsBpInstr Is it the breakpoint instruction.
1765 */
1766IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1767{
1768 Assert(pIemCpu->cXcptRecursions == 0);
1769 return iemRaiseXcptOrInt(pIemCpu,
1770 cbInstr,
1771 u8Int,
1772 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1773 0,
1774 0);
1775}
1776
1777
1778/**
1779 * Implements iret for real mode and V8086 mode.
1780 *
1781 * @param enmEffOpSize The effective operand size.
1782 */
1783IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1784{
1785 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1786 NOREF(cbInstr);
1787
1788 /*
1789 * iret throws an exception if VME isn't enabled.
1790 */
1791 if ( pCtx->eflags.Bits.u1VM
1792 && !(pCtx->cr4 & X86_CR4_VME))
1793 return iemRaiseGeneralProtectionFault0(pIemCpu);
1794
1795 /*
1796 * Do the stack bits, but don't commit RSP before everything checks
1797 * out right.
1798 */
1799 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1800 VBOXSTRICTRC rcStrict;
1801 RTCPTRUNION uFrame;
1802 uint16_t uNewCs;
1803 uint32_t uNewEip;
1804 uint32_t uNewFlags;
1805 uint64_t uNewRsp;
1806 if (enmEffOpSize == IEMMODE_32BIT)
1807 {
1808 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1809 if (rcStrict != VINF_SUCCESS)
1810 return rcStrict;
1811 uNewEip = uFrame.pu32[0];
1812 uNewCs = (uint16_t)uFrame.pu32[1];
1813 uNewFlags = uFrame.pu32[2];
1814 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1815 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1816 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1817 | X86_EFL_ID;
1818 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1819 }
1820 else
1821 {
1822 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1823 if (rcStrict != VINF_SUCCESS)
1824 return rcStrict;
1825 uNewEip = uFrame.pu16[0];
1826 uNewCs = uFrame.pu16[1];
1827 uNewFlags = uFrame.pu16[2];
1828 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1829 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1830 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1831 /** @todo The intel pseudo code does not indicate what happens to
1832 * reserved flags. We just ignore them. */
1833 }
1834 /** @todo Check how this is supposed to work if sp=0xfffe. */
1835
1836 /*
1837 * Check the limit of the new EIP.
1838 */
1839 /** @todo Only the AMD pseudo code check the limit here, what's
1840 * right? */
1841 if (uNewEip > pCtx->csHid.u32Limit)
1842 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1843
1844 /*
1845 * V8086 checks and flag adjustments
1846 */
1847 if (pCtx->eflags.Bits.u1VM)
1848 {
1849 if (pCtx->eflags.Bits.u2IOPL == 3)
1850 {
1851 /* Preserve IOPL and clear RF. */
1852 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1853 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1854 }
1855 else if ( enmEffOpSize == IEMMODE_16BIT
1856 && ( !(uNewFlags & X86_EFL_IF)
1857 || !pCtx->eflags.Bits.u1VIP )
1858 && !(uNewFlags & X86_EFL_TF) )
1859 {
1860 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1861 uNewFlags &= ~X86_EFL_VIF;
1862 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1863 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1864 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1865 }
1866 else
1867 return iemRaiseGeneralProtectionFault0(pIemCpu);
1868 }
1869
1870 /*
1871 * Commit the operation.
1872 */
1873 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1874 if (rcStrict != VINF_SUCCESS)
1875 return rcStrict;
1876 pCtx->rip = uNewEip;
1877 pCtx->cs = uNewCs;
1878 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
1879 /** @todo do we load attribs and limit as well? */
1880 Assert(uNewFlags & X86_EFL_1);
1881 pCtx->eflags.u = uNewFlags;
1882
1883 return VINF_SUCCESS;
1884}
1885
1886
1887/**
1888 * Implements iret for protected mode
1889 *
1890 * @param enmEffOpSize The effective operand size.
1891 */
1892IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
1893{
1894 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1895 NOREF(cbInstr);
1896
1897 /*
1898 * Nested task return.
1899 */
1900 if (pCtx->eflags.Bits.u1NT)
1901 {
1902 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1903 }
1904 /*
1905 * Normal return.
1906 */
1907 else
1908 {
1909 /*
1910 * Do the stack bits, but don't commit RSP before everything checks
1911 * out right.
1912 */
1913 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1914 VBOXSTRICTRC rcStrict;
1915 RTCPTRUNION uFrame;
1916 uint16_t uNewCs;
1917 uint32_t uNewEip;
1918 uint32_t uNewFlags;
1919 uint64_t uNewRsp;
1920 if (enmEffOpSize == IEMMODE_32BIT)
1921 {
1922 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1923 if (rcStrict != VINF_SUCCESS)
1924 return rcStrict;
1925 uNewEip = uFrame.pu32[0];
1926 uNewCs = (uint16_t)uFrame.pu32[1];
1927 uNewFlags = uFrame.pu32[2];
1928 }
1929 else
1930 {
1931 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1932 if (rcStrict != VINF_SUCCESS)
1933 return rcStrict;
1934 uNewEip = uFrame.pu16[0];
1935 uNewCs = uFrame.pu16[1];
1936 uNewFlags = uFrame.pu16[2];
1937 }
1938 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
1939 if (rcStrict != VINF_SUCCESS)
1940 return rcStrict;
1941
1942 /*
1943 * What are we returning to?
1944 */
1945 if ( (uNewFlags & X86_EFL_VM)
1946 && pIemCpu->uCpl == 0)
1947 {
1948 /* V8086 mode! */
1949 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1950 }
1951 else
1952 {
1953 /*
1954 * Protected mode.
1955 */
1956 /* Read the CS descriptor. */
1957 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1958 {
1959 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
1960 return iemRaiseGeneralProtectionFault0(pIemCpu);
1961 }
1962
1963 IEMSELDESC DescCS;
1964 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
1965 if (rcStrict != VINF_SUCCESS)
1966 {
1967 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
1968 return rcStrict;
1969 }
1970
1971 /* Must be a code descriptor. */
1972 if (!DescCS.Legacy.Gen.u1DescType)
1973 {
1974 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1975 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1976 }
1977 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1978 {
1979 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1980 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1981 }
1982
1983 /* Privilege checks. */
1984 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1985 {
1986 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
1987 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1988 }
1989 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1990 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
1991 {
1992 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
1993 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1994 }
1995
1996 /* Present? */
1997 if (!DescCS.Legacy.Gen.u1Present)
1998 {
1999 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2000 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2001 }
2002
2003 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
2004 if (DescCS.Legacy.Gen.u1Granularity)
2005 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2006
2007 /*
2008 * Return to outer level?
2009 */
2010 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2011 {
2012 uint16_t uNewSS;
2013 uint32_t uNewESP;
2014 if (enmEffOpSize == IEMMODE_32BIT)
2015 {
2016 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2017 if (rcStrict != VINF_SUCCESS)
2018 return rcStrict;
2019 uNewESP = uFrame.pu32[0];
2020 uNewSS = (uint16_t)uFrame.pu32[1];
2021 }
2022 else
2023 {
2024 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2025 if (rcStrict != VINF_SUCCESS)
2026 return rcStrict;
2027 uNewESP = uFrame.pu16[0];
2028 uNewSS = uFrame.pu16[1];
2029 }
2030 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2031 if (rcStrict != VINF_SUCCESS)
2032 return rcStrict;
2033
2034 /* Read the SS descriptor. */
2035 if (!(uNewSS & (X86_SEL_MASK | X86_SEL_LDT)))
2036 {
2037 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2038 return iemRaiseGeneralProtectionFault0(pIemCpu);
2039 }
2040
2041 IEMSELDESC DescSS;
2042 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2043 if (rcStrict != VINF_SUCCESS)
2044 {
2045 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2046 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2047 return rcStrict;
2048 }
2049
2050 /* Privilege checks. */
2051 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2052 {
2053 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2054 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2055 }
2056 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2057 {
2058 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2059 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2060 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2061 }
2062
2063 /* Must be a writeable data segment descriptor. */
2064 if (!DescSS.Legacy.Gen.u1DescType)
2065 {
2066 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2067 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2068 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2069 }
2070 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2071 {
2072 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2073 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2074 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2075 }
2076
2077 /* Present? */
2078 if (!DescSS.Legacy.Gen.u1Present)
2079 {
2080 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2081 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2082 }
2083
2084 uint32_t cbLimitSs = X86DESC_LIMIT(DescSS.Legacy);
2085 if (DescSS.Legacy.Gen.u1Granularity)
2086 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2087
2088 /* Check EIP. */
2089 if (uNewEip > cbLimitCS)
2090 {
2091 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2092 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2093 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2094 }
2095
2096 /*
2097 * Commit the changes, marking CS and SS accessed first since
2098 * that may fail.
2099 */
2100 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2101 {
2102 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2103 if (rcStrict != VINF_SUCCESS)
2104 return rcStrict;
2105 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2106 }
2107 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2108 {
2109 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2110 if (rcStrict != VINF_SUCCESS)
2111 return rcStrict;
2112 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2113 }
2114
2115 pCtx->rip = uNewEip;
2116 pCtx->cs = uNewCs;
2117 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2118 pCtx->csHid.u32Limit = cbLimitCS;
2119 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2120 pCtx->rsp = uNewESP;
2121 pCtx->ss = uNewSS;
2122 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
2123 pCtx->ssHid.u32Limit = cbLimitSs;
2124 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy);
2125
2126 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2127 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2128 if (enmEffOpSize != IEMMODE_16BIT)
2129 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2130 if (pIemCpu->uCpl == 0)
2131 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2132 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2133 fEFlagsMask |= X86_EFL_IF;
2134 pCtx->eflags.u &= ~fEFlagsMask;
2135 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2136
2137 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2138 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid);
2139 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es, &pCtx->esHid);
2140 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid);
2141 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid);
2142
2143 /* Done! */
2144
2145 }
2146 /*
2147 * Return to the same level.
2148 */
2149 else
2150 {
2151 /* Check EIP. */
2152 if (uNewEip > cbLimitCS)
2153 {
2154 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2155 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2156 }
2157
2158 /*
2159 * Commit the changes, marking CS first since it may fail.
2160 */
2161 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2162 {
2163 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2164 if (rcStrict != VINF_SUCCESS)
2165 return rcStrict;
2166 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2167 }
2168
2169 pCtx->rip = uNewEip;
2170 pCtx->cs = uNewCs;
2171 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2172 pCtx->csHid.u32Limit = cbLimitCS;
2173 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2174 pCtx->rsp = uNewRsp;
2175
2176 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2177 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2178 if (enmEffOpSize != IEMMODE_16BIT)
2179 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2180 if (pIemCpu->uCpl == 0)
2181 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2182 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2183 fEFlagsMask |= X86_EFL_IF;
2184 pCtx->eflags.u &= ~fEFlagsMask;
2185 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2186 /* Done! */
2187 }
2188 }
2189 }
2190
2191 return VINF_SUCCESS;
2192}
2193
2194
2195/**
2196 * Implements iret for long mode
2197 *
2198 * @param enmEffOpSize The effective operand size.
2199 */
2200IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2201{
2202 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2203 //VBOXSTRICTRC rcStrict;
2204 //uint64_t uNewRsp;
2205
2206 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2207 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2208}
2209
2210
2211/**
2212 * Implements iret.
2213 *
2214 * @param enmEffOpSize The effective operand size.
2215 */
2216IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2217{
2218 /*
2219 * Call a mode specific worker.
2220 */
2221 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2222 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2223 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2224 if (IEM_IS_LONG_MODE(pIemCpu))
2225 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2226
2227 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2228}
2229
2230
2231/**
2232 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2233 *
2234 * @param iSegReg The segment register number (valid).
2235 * @param uSel The new selector value.
2236 */
2237IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2238{
2239 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2240 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2241 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2242
2243 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2244
2245 /*
2246 * Real mode and V8086 mode are easy.
2247 */
2248 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2249 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2250 {
2251 *pSel = uSel;
2252 pHid->u64Base = (uint32_t)uSel << 4;
2253#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2254 /** @todo Does the CPU actually load limits and attributes in the
2255 * real/V8086 mode segment load case? It doesn't for CS in far
2256 * jumps... Affects unreal mode. */
2257 pHid->u32Limit = 0xffff;
2258 pHid->Attr.u = 0;
2259 pHid->Attr.n.u1Present = 1;
2260 pHid->Attr.n.u1DescType = 1;
2261 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2262 ? X86_SEL_TYPE_RW
2263 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2264#endif
2265 iemRegAddToRip(pIemCpu, cbInstr);
2266 return VINF_SUCCESS;
2267 }
2268
2269 /*
2270 * Protected mode.
2271 *
2272 * Check if it's a null segment selector value first, that's OK for DS, ES,
2273 * FS and GS. If not null, then we have to load and parse the descriptor.
2274 */
2275 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
2276 {
2277 if (iSegReg == X86_SREG_SS)
2278 {
2279 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2280 || pIemCpu->uCpl != 0
2281 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2282 {
2283 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2284 return iemRaiseGeneralProtectionFault0(pIemCpu);
2285 }
2286
2287 /* In 64-bit kernel mode, the stack can be 0 because of the way
2288 interrupts are dispatched when in kernel ctx. Just load the
2289 selector value into the register and leave the hidden bits
2290 as is. */
2291 *pSel = uSel;
2292 iemRegAddToRip(pIemCpu, cbInstr);
2293 return VINF_SUCCESS;
2294 }
2295
2296 *pSel = uSel; /* Not RPL, remember :-) */
2297 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2298 && iSegReg != X86_SREG_FS
2299 && iSegReg != X86_SREG_GS)
2300 {
2301 /** @todo figure out what this actually does, it works. Needs
2302 * testcase! */
2303 pHid->Attr.u = 0;
2304 pHid->Attr.n.u1Present = 1;
2305 pHid->Attr.n.u1Long = 1;
2306 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2307 pHid->Attr.n.u2Dpl = 3;
2308 pHid->u32Limit = 0;
2309 pHid->u64Base = 0;
2310 }
2311 else
2312 {
2313 pHid->Attr.u = 0;
2314 pHid->u32Limit = 0;
2315 pHid->u64Base = 0;
2316 }
2317 iemRegAddToRip(pIemCpu, cbInstr);
2318 return VINF_SUCCESS;
2319 }
2320
2321 /* Fetch the descriptor. */
2322 IEMSELDESC Desc;
2323 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2324 if (rcStrict != VINF_SUCCESS)
2325 return rcStrict;
2326
2327 /* Check GPs first. */
2328 if (!Desc.Legacy.Gen.u1DescType)
2329 {
2330 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2331 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2332 }
2333 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2334 {
2335 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2336 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2337 {
2338 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2339 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2340 }
2341 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2342 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2343 {
2344 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2345 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2346 }
2347 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2348 {
2349 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2350 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2351 }
2352 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2353 {
2354 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2355 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2356 }
2357 }
2358 else
2359 {
2360 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2361 {
2362 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2363 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2364 }
2365 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2366 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2367 {
2368#if 0 /* this is what intel says. */
2369 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2370 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2371 {
2372 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2373 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2374 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2375 }
2376#else /* this is what makes more sense. */
2377 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2378 {
2379 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2380 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2381 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2382 }
2383 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2384 {
2385 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2386 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2387 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2388 }
2389#endif
2390 }
2391 }
2392
2393 /* Is it there? */
2394 if (!Desc.Legacy.Gen.u1Present)
2395 {
2396 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2397 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2398 }
2399
2400 /* The base and limit. */
2401 uint64_t u64Base;
2402 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
2403 if (Desc.Legacy.Gen.u1Granularity)
2404 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2405
2406 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2407 && iSegReg < X86_SREG_FS)
2408 u64Base = 0;
2409 else
2410 u64Base = X86DESC_BASE(Desc.Legacy);
2411
2412 /*
2413 * Ok, everything checked out fine. Now set the accessed bit before
2414 * committing the result into the registers.
2415 */
2416 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2417 {
2418 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2419 if (rcStrict != VINF_SUCCESS)
2420 return rcStrict;
2421 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2422 }
2423
2424 /* commit */
2425 *pSel = uSel;
2426 pHid->Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2427 pHid->u32Limit = cbLimit;
2428 pHid->u64Base = u64Base;
2429
2430 /** @todo check if the hidden bits are loaded correctly for 64-bit
2431 * mode. */
2432
2433 iemRegAddToRip(pIemCpu, cbInstr);
2434 return VINF_SUCCESS;
2435}
2436
2437
2438/**
2439 * Implements 'mov SReg, r/m'.
2440 *
2441 * @param iSegReg The segment register number (valid).
2442 * @param uSel The new selector value.
2443 */
2444IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2445{
2446 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2447 if (rcStrict == VINF_SUCCESS)
2448 {
2449 if (iSegReg == X86_SREG_SS)
2450 {
2451 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2452 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2453 }
2454 }
2455 return rcStrict;
2456}
2457
2458
2459/**
2460 * Implements 'pop SReg'.
2461 *
2462 * @param iSegReg The segment register number (valid).
2463 * @param enmEffOpSize The efficient operand size (valid).
2464 */
2465IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2466{
2467 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2468 VBOXSTRICTRC rcStrict;
2469
2470 /*
2471 * Read the selector off the stack and join paths with mov ss, reg.
2472 */
2473 RTUINT64U TmpRsp;
2474 TmpRsp.u = pCtx->rsp;
2475 switch (enmEffOpSize)
2476 {
2477 case IEMMODE_16BIT:
2478 {
2479 uint16_t uSel;
2480 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2481 if (rcStrict == VINF_SUCCESS)
2482 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2483 break;
2484 }
2485
2486 case IEMMODE_32BIT:
2487 {
2488 uint32_t u32Value;
2489 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2490 if (rcStrict == VINF_SUCCESS)
2491 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2492 break;
2493 }
2494
2495 case IEMMODE_64BIT:
2496 {
2497 uint64_t u64Value;
2498 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2499 if (rcStrict == VINF_SUCCESS)
2500 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2501 break;
2502 }
2503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2504 }
2505
2506 /*
2507 * Commit the stack on success.
2508 */
2509 if (rcStrict == VINF_SUCCESS)
2510 {
2511 pCtx->rsp = TmpRsp.u;
2512 if (iSegReg == X86_SREG_SS)
2513 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2514 }
2515 return rcStrict;
2516}
2517
2518
2519/**
2520 * Implements lgs, lfs, les, lds & lss.
2521 */
2522IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2523 uint16_t, uSel,
2524 uint64_t, offSeg,
2525 uint8_t, iSegReg,
2526 uint8_t, iGReg,
2527 IEMMODE, enmEffOpSize)
2528{
2529 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2530 VBOXSTRICTRC rcStrict;
2531
2532 /*
2533 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2534 */
2535 /** @todo verify and test that mov, pop and lXs works the segment
2536 * register loading in the exact same way. */
2537 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2538 if (rcStrict == VINF_SUCCESS)
2539 {
2540 switch (enmEffOpSize)
2541 {
2542 case IEMMODE_16BIT:
2543 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2544 break;
2545 case IEMMODE_32BIT:
2546 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2547 break;
2548 case IEMMODE_64BIT:
2549 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2550 break;
2551 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2552 }
2553 }
2554
2555 return rcStrict;
2556}
2557
2558
2559/**
2560 * Implements lgdt.
2561 *
2562 * @param iEffSeg The segment of the new ldtr contents
2563 * @param GCPtrEffSrc The address of the new ldtr contents.
2564 * @param enmEffOpSize The effective operand size.
2565 */
2566IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2567{
2568 if (pIemCpu->uCpl != 0)
2569 return iemRaiseGeneralProtectionFault0(pIemCpu);
2570 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2571
2572 /*
2573 * Fetch the limit and base address.
2574 */
2575 uint16_t cbLimit;
2576 RTGCPTR GCPtrBase;
2577 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2578 if (rcStrict == VINF_SUCCESS)
2579 {
2580 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2581 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2582 else
2583 {
2584 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2585 pCtx->gdtr.cbGdt = cbLimit;
2586 pCtx->gdtr.pGdt = GCPtrBase;
2587 }
2588 if (rcStrict == VINF_SUCCESS)
2589 iemRegAddToRip(pIemCpu, cbInstr);
2590 }
2591 return rcStrict;
2592}
2593
2594
2595/**
2596 * Implements lidt.
2597 *
2598 * @param iEffSeg The segment of the new ldtr contents
2599 * @param GCPtrEffSrc The address of the new ldtr contents.
2600 * @param enmEffOpSize The effective operand size.
2601 */
2602IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2603{
2604 if (pIemCpu->uCpl != 0)
2605 return iemRaiseGeneralProtectionFault0(pIemCpu);
2606 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2607
2608 /*
2609 * Fetch the limit and base address.
2610 */
2611 uint16_t cbLimit;
2612 RTGCPTR GCPtrBase;
2613 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2614 if (rcStrict == VINF_SUCCESS)
2615 {
2616 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2617 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2618 else
2619 {
2620 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2621 pCtx->idtr.cbIdt = cbLimit;
2622 pCtx->idtr.pIdt = GCPtrBase;
2623 }
2624 if (rcStrict == VINF_SUCCESS)
2625 iemRegAddToRip(pIemCpu, cbInstr);
2626 }
2627 return rcStrict;
2628}
2629
2630
2631/**
2632 * Implements lldt.
2633 *
2634 * @param uNewLdt The new LDT selector value.
2635 */
2636IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2637{
2638 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2639
2640 /*
2641 * Check preconditions.
2642 */
2643 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2644 {
2645 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2646 return iemRaiseUndefinedOpcode(pIemCpu);
2647 }
2648 if (pIemCpu->uCpl != 0)
2649 {
2650 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2651 return iemRaiseGeneralProtectionFault0(pIemCpu);
2652 }
2653 if (uNewLdt & X86_SEL_LDT)
2654 {
2655 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2656 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2657 }
2658
2659 /*
2660 * Now, loading a NULL selector is easy.
2661 */
2662 if ((uNewLdt & X86_SEL_MASK) == 0)
2663 {
2664 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2665 /** @todo check if the actual value is loaded or if it's always 0. */
2666 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2667 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), 0);
2668 else
2669 pCtx->ldtr = 0;
2670 pCtx->ldtrHid.Attr.u = 0;
2671 pCtx->ldtrHid.u64Base = 0;
2672 pCtx->ldtrHid.u32Limit = 0;
2673
2674 iemRegAddToRip(pIemCpu, cbInstr);
2675 return VINF_SUCCESS;
2676 }
2677
2678 /*
2679 * Read the descriptor.
2680 */
2681 IEMSELDESC Desc;
2682 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2683 if (rcStrict != VINF_SUCCESS)
2684 return rcStrict;
2685
2686 /* Check GPs first. */
2687 if (Desc.Legacy.Gen.u1DescType)
2688 {
2689 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2690 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2691 }
2692 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2693 {
2694 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2695 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2696 }
2697 uint64_t u64Base;
2698 if (!IEM_IS_LONG_MODE(pIemCpu))
2699 u64Base = X86DESC_BASE(Desc.Legacy);
2700 else
2701 {
2702 if (Desc.Long.Gen.u5Zeros)
2703 {
2704 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2705 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2706 }
2707
2708 u64Base = X86DESC64_BASE(Desc.Long);
2709 if (!IEM_IS_CANONICAL(u64Base))
2710 {
2711 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2712 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2713 }
2714 }
2715
2716 /* NP */
2717 if (!Desc.Legacy.Gen.u1Present)
2718 {
2719 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2720 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2721 }
2722
2723 /*
2724 * It checks out alright, update the registers.
2725 */
2726/** @todo check if the actual value is loaded or if the RPL is dropped */
2727 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2728 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK);
2729 else
2730 pCtx->ldtr = uNewLdt & X86_SEL_MASK;
2731 pCtx->ldtrHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2732 pCtx->ldtrHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2733 pCtx->ldtrHid.u64Base = u64Base;
2734
2735 iemRegAddToRip(pIemCpu, cbInstr);
2736 return VINF_SUCCESS;
2737}
2738
2739
2740/**
2741 * Implements lldt.
2742 *
2743 * @param uNewLdt The new LDT selector value.
2744 */
2745IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2746{
2747 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2748
2749 /*
2750 * Check preconditions.
2751 */
2752 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2753 {
2754 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2755 return iemRaiseUndefinedOpcode(pIemCpu);
2756 }
2757 if (pIemCpu->uCpl != 0)
2758 {
2759 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2760 return iemRaiseGeneralProtectionFault0(pIemCpu);
2761 }
2762 if (uNewTr & X86_SEL_LDT)
2763 {
2764 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2765 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
2766 }
2767 if ((uNewTr & X86_SEL_MASK) == 0)
2768 {
2769 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2770 return iemRaiseGeneralProtectionFault0(pIemCpu);
2771 }
2772
2773 /*
2774 * Read the descriptor.
2775 */
2776 IEMSELDESC Desc;
2777 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2778 if (rcStrict != VINF_SUCCESS)
2779 return rcStrict;
2780
2781 /* Check GPs first. */
2782 if (Desc.Legacy.Gen.u1DescType)
2783 {
2784 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2785 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2786 }
2787 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2788 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2789 || IEM_IS_LONG_MODE(pIemCpu)) )
2790 {
2791 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2792 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2793 }
2794 uint64_t u64Base;
2795 if (!IEM_IS_LONG_MODE(pIemCpu))
2796 u64Base = X86DESC_BASE(Desc.Legacy);
2797 else
2798 {
2799 if (Desc.Long.Gen.u5Zeros)
2800 {
2801 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2802 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2803 }
2804
2805 u64Base = X86DESC64_BASE(Desc.Long);
2806 if (!IEM_IS_CANONICAL(u64Base))
2807 {
2808 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2809 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2810 }
2811 }
2812
2813 /* NP */
2814 if (!Desc.Legacy.Gen.u1Present)
2815 {
2816 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
2817 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
2818 }
2819
2820 /*
2821 * Set it busy.
2822 * Note! Intel says this should lock down the whole descriptor, but we'll
2823 * restrict our selves to 32-bit for now due to lack of inline
2824 * assembly and such.
2825 */
2826 void *pvDesc;
2827 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2828 if (rcStrict != VINF_SUCCESS)
2829 return rcStrict;
2830 switch ((uintptr_t)pvDesc & 3)
2831 {
2832 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
2833 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
2834 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
2835 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
2836 }
2837 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2838 if (rcStrict != VINF_SUCCESS)
2839 return rcStrict;
2840 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2841
2842 /*
2843 * It checks out alright, update the registers.
2844 */
2845/** @todo check if the actual value is loaded or if the RPL is dropped */
2846 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2847 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK);
2848 else
2849 pCtx->tr = uNewTr & X86_SEL_MASK;
2850 pCtx->trHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2851 pCtx->trHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2852 pCtx->trHid.u64Base = u64Base;
2853
2854 iemRegAddToRip(pIemCpu, cbInstr);
2855 return VINF_SUCCESS;
2856}
2857
2858
2859/**
2860 * Implements mov GReg,CRx.
2861 *
2862 * @param iGReg The general register to store the CRx value in.
2863 * @param iCrReg The CRx register to read (valid).
2864 */
2865IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
2866{
2867 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2868 if (pIemCpu->uCpl != 0)
2869 return iemRaiseGeneralProtectionFault0(pIemCpu);
2870 Assert(!pCtx->eflags.Bits.u1VM);
2871
2872 /* read it */
2873 uint64_t crX;
2874 switch (iCrReg)
2875 {
2876 case 0: crX = pCtx->cr0; break;
2877 case 2: crX = pCtx->cr2; break;
2878 case 3: crX = pCtx->cr3; break;
2879 case 4: crX = pCtx->cr4; break;
2880 case 8:
2881 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2882 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
2883 else
2884 crX = 0xff;
2885 break;
2886 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2887 }
2888
2889 /* store it */
2890 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2891 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
2892 else
2893 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
2894
2895 iemRegAddToRip(pIemCpu, cbInstr);
2896 return VINF_SUCCESS;
2897}
2898
2899
2900/**
2901 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
2902 *
2903 * @param iCrReg The CRx register to write (valid).
2904 * @param uNewCrX The new value.
2905 */
2906IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
2907{
2908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2909 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2910 VBOXSTRICTRC rcStrict;
2911 int rc;
2912
2913 /*
2914 * Try store it.
2915 * Unfortunately, CPUM only does a tiny bit of the work.
2916 */
2917 switch (iCrReg)
2918 {
2919 case 0:
2920 {
2921 /*
2922 * Perform checks.
2923 */
2924 uint64_t const uOldCrX = pCtx->cr0;
2925 uNewCrX |= X86_CR0_ET; /* hardcoded */
2926
2927 /* Check for reserved bits. */
2928 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
2929 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
2930 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
2931 if (uNewCrX & ~(uint64_t)fValid)
2932 {
2933 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2934 return iemRaiseGeneralProtectionFault0(pIemCpu);
2935 }
2936
2937 /* Check for invalid combinations. */
2938 if ( (uNewCrX & X86_CR0_PG)
2939 && !(uNewCrX & X86_CR0_PE) )
2940 {
2941 Log(("Trying to set CR0.PG without CR0.PE\n"));
2942 return iemRaiseGeneralProtectionFault0(pIemCpu);
2943 }
2944
2945 if ( !(uNewCrX & X86_CR0_CD)
2946 && (uNewCrX & X86_CR0_NW) )
2947 {
2948 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
2949 return iemRaiseGeneralProtectionFault0(pIemCpu);
2950 }
2951
2952 /* Long mode consistency checks. */
2953 if ( (uNewCrX & X86_CR0_PG)
2954 && !(uOldCrX & X86_CR0_PG)
2955 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2956 {
2957 if (!(pCtx->cr4 & X86_CR4_PAE))
2958 {
2959 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
2960 return iemRaiseGeneralProtectionFault0(pIemCpu);
2961 }
2962 if (pCtx->csHid.Attr.n.u1Long)
2963 {
2964 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
2965 return iemRaiseGeneralProtectionFault0(pIemCpu);
2966 }
2967 }
2968
2969 /** @todo check reserved PDPTR bits as AMD states. */
2970
2971 /*
2972 * Change CR0.
2973 */
2974 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2975 {
2976 rc = CPUMSetGuestCR0(pVCpu, uNewCrX);
2977 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
2978 }
2979 else
2980 pCtx->cr0 = uNewCrX;
2981 Assert(pCtx->cr0 == uNewCrX);
2982
2983 /*
2984 * Change EFER.LMA if entering or leaving long mode.
2985 */
2986 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
2987 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2988 {
2989 uint64_t NewEFER = pCtx->msrEFER;
2990 if (uNewCrX & X86_CR0_PG)
2991 NewEFER |= MSR_K6_EFER_LME;
2992 else
2993 NewEFER &= ~MSR_K6_EFER_LME;
2994
2995 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2996 CPUMSetGuestEFER(pVCpu, NewEFER);
2997 else
2998 pCtx->msrEFER = NewEFER;
2999 Assert(pCtx->msrEFER == NewEFER);
3000 }
3001
3002 /*
3003 * Inform PGM.
3004 */
3005 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3006 {
3007 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3008 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3009 {
3010 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3011 AssertRCReturn(rc, rc);
3012 /* ignore informational status codes */
3013 }
3014 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3015 /** @todo Status code management. */
3016 }
3017 else
3018 rcStrict = VINF_SUCCESS;
3019 break;
3020 }
3021
3022 /*
3023 * CR2 can be changed without any restrictions.
3024 */
3025 case 2:
3026 pCtx->cr2 = uNewCrX;
3027 rcStrict = VINF_SUCCESS;
3028 break;
3029
3030 /*
3031 * CR3 is relatively simple, although AMD and Intel have different
3032 * accounts of how setting reserved bits are handled. We take intel's
3033 * word for the lower bits and AMD's for the high bits (63:52).
3034 */
3035 /** @todo Testcase: Setting reserved bits in CR3, especially before
3036 * enabling paging. */
3037 case 3:
3038 {
3039 /* check / mask the value. */
3040 if (uNewCrX & UINT64_C(0xfff0000000000000))
3041 {
3042 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3043 return iemRaiseGeneralProtectionFault0(pIemCpu);
3044 }
3045
3046 uint64_t fValid;
3047 if ( (pCtx->cr4 & X86_CR4_PAE)
3048 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3049 fValid = UINT64_C(0x000ffffffffff014);
3050 else if (pCtx->cr4 & X86_CR4_PAE)
3051 fValid = UINT64_C(0xfffffff4);
3052 else
3053 fValid = UINT64_C(0xfffff014);
3054 if (uNewCrX & ~fValid)
3055 {
3056 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3057 uNewCrX, uNewCrX & ~fValid));
3058 uNewCrX &= fValid;
3059 }
3060
3061 /** @todo If we're in PAE mode we should check the PDPTRs for
3062 * invalid bits. */
3063
3064 /* Make the change. */
3065 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3066 {
3067 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3068 AssertRCSuccessReturn(rc, rc);
3069 }
3070 else
3071 pCtx->cr3 = uNewCrX;
3072
3073 /* Inform PGM. */
3074 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3075 {
3076 if (pCtx->cr0 & X86_CR0_PG)
3077 {
3078 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
3079 AssertRCReturn(rc, rc);
3080 /* ignore informational status codes */
3081 /** @todo status code management */
3082 }
3083 }
3084 rcStrict = VINF_SUCCESS;
3085 break;
3086 }
3087
3088 /*
3089 * CR4 is a bit more tedious as there are bits which cannot be cleared
3090 * under some circumstances and such.
3091 */
3092 case 4:
3093 {
3094 uint64_t const uOldCrX = pCtx->cr0;
3095
3096 /* reserved bits */
3097 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3098 | X86_CR4_TSD | X86_CR4_DE
3099 | X86_CR4_PSE | X86_CR4_PAE
3100 | X86_CR4_MCE | X86_CR4_PGE
3101 | X86_CR4_PCE | X86_CR4_OSFSXR
3102 | X86_CR4_OSXMMEEXCPT;
3103 //if (xxx)
3104 // fValid |= X86_CR4_VMXE;
3105 //if (xxx)
3106 // fValid |= X86_CR4_OSXSAVE;
3107 if (uNewCrX & ~(uint64_t)fValid)
3108 {
3109 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3110 return iemRaiseGeneralProtectionFault0(pIemCpu);
3111 }
3112
3113 /* long mode checks. */
3114 if ( (uOldCrX & X86_CR4_PAE)
3115 && !(uNewCrX & X86_CR4_PAE)
3116 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3117 {
3118 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3119 return iemRaiseGeneralProtectionFault0(pIemCpu);
3120 }
3121
3122
3123 /*
3124 * Change it.
3125 */
3126 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3127 {
3128 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3129 AssertRCSuccessReturn(rc, rc);
3130 }
3131 else
3132 pCtx->cr4 = uNewCrX;
3133 Assert(pCtx->cr4 == uNewCrX);
3134
3135 /*
3136 * Notify SELM and PGM.
3137 */
3138 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3139 {
3140 /* SELM - VME may change things wrt to the TSS shadowing. */
3141 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3142 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3143
3144 /* PGM - flushing and mode. */
3145 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3146 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3147 {
3148 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3149 AssertRCReturn(rc, rc);
3150 /* ignore informational status codes */
3151 }
3152 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3153 /** @todo Status code management. */
3154 }
3155 else
3156 rcStrict = VINF_SUCCESS;
3157 break;
3158 }
3159
3160 /*
3161 * CR8 maps to the APIC TPR.
3162 */
3163 case 8:
3164 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3165 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
3166 else
3167 rcStrict = VINF_SUCCESS;
3168 break;
3169
3170 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3171 }
3172
3173 /*
3174 * Advance the RIP on success.
3175 */
3176 /** @todo Status code management. */
3177 if (rcStrict == VINF_SUCCESS)
3178 iemRegAddToRip(pIemCpu, cbInstr);
3179 return rcStrict;
3180
3181}
3182
3183
3184/**
3185 * Implements mov CRx,GReg.
3186 *
3187 * @param iCrReg The CRx register to write (valid).
3188 * @param iGReg The general register to load the DRx value from.
3189 */
3190IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3191{
3192 if (pIemCpu->uCpl != 0)
3193 return iemRaiseGeneralProtectionFault0(pIemCpu);
3194 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3195
3196 /*
3197 * Read the new value from the source register and call common worker.
3198 */
3199 uint64_t uNewCrX;
3200 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3201 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3202 else
3203 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3204 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3205}
3206
3207
3208/**
3209 * Implements 'LMSW r/m16'
3210 *
3211 * @param u16NewMsw The new value.
3212 */
3213IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3214{
3215 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3216
3217 if (pIemCpu->uCpl != 0)
3218 return iemRaiseGeneralProtectionFault0(pIemCpu);
3219 Assert(!pCtx->eflags.Bits.u1VM);
3220
3221 /*
3222 * Compose the new CR0 value and call common worker.
3223 */
3224 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3225 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3226 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3227}
3228
3229
3230/**
3231 * Implements 'CLTS'.
3232 */
3233IEM_CIMPL_DEF_0(iemCImpl_clts)
3234{
3235 if (pIemCpu->uCpl != 0)
3236 return iemRaiseGeneralProtectionFault0(pIemCpu);
3237
3238 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3239 uint64_t uNewCr0 = pCtx->cr0;
3240 uNewCr0 &= ~X86_CR0_TS;
3241 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3242}
3243
3244
3245/**
3246 * Implements mov GReg,DRx.
3247 *
3248 * @param iGReg The general register to store the DRx value in.
3249 * @param iDrReg The DRx register to read (0-7).
3250 */
3251IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3252{
3253 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3254
3255 /*
3256 * Check preconditions.
3257 */
3258
3259 /* Raise GPs. */
3260 if (pIemCpu->uCpl != 0)
3261 return iemRaiseGeneralProtectionFault0(pIemCpu);
3262 Assert(!pCtx->eflags.Bits.u1VM);
3263
3264 if ( (iDrReg == 4 || iDrReg == 5)
3265 && (pCtx->cr4 & X86_CR4_DE) )
3266 {
3267 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3268 return iemRaiseGeneralProtectionFault0(pIemCpu);
3269 }
3270
3271 /* Raise #DB if general access detect is enabled. */
3272 if (pCtx->dr[7] & X86_DR7_GD)
3273 {
3274 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3275 return iemRaiseDebugException(pIemCpu);
3276 }
3277
3278 /*
3279 * Read the debug register and store it in the specified general register.
3280 */
3281 uint64_t drX;
3282 switch (iDrReg)
3283 {
3284 case 0: drX = pCtx->dr[0]; break;
3285 case 1: drX = pCtx->dr[1]; break;
3286 case 2: drX = pCtx->dr[2]; break;
3287 case 3: drX = pCtx->dr[3]; break;
3288 case 6:
3289 case 4:
3290 drX = pCtx->dr[6];
3291 drX &= ~RT_BIT_32(12);
3292 drX |= UINT32_C(0xffff0ff0);
3293 break;
3294 case 7:
3295 case 5:
3296 drX = pCtx->dr[7];
3297 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3298 drX |= RT_BIT_32(10);
3299 break;
3300 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3301 }
3302
3303 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3304 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3305 else
3306 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3307
3308 iemRegAddToRip(pIemCpu, cbInstr);
3309 return VINF_SUCCESS;
3310}
3311
3312
3313/**
3314 * Implements mov DRx,GReg.
3315 *
3316 * @param iDrReg The DRx register to write (valid).
3317 * @param iGReg The general register to load the DRx value from.
3318 */
3319IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3320{
3321 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3322
3323 /*
3324 * Check preconditions.
3325 */
3326 if (pIemCpu->uCpl != 0)
3327 return iemRaiseGeneralProtectionFault0(pIemCpu);
3328 Assert(!pCtx->eflags.Bits.u1VM);
3329
3330 if ( (iDrReg == 4 || iDrReg == 5)
3331 && (pCtx->cr4 & X86_CR4_DE) )
3332 {
3333 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3334 return iemRaiseGeneralProtectionFault0(pIemCpu);
3335 }
3336
3337 /* Raise #DB if general access detect is enabled. */
3338 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3339 * \#GP? */
3340 if (pCtx->dr[7] & X86_DR7_GD)
3341 {
3342 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3343 return iemRaiseDebugException(pIemCpu);
3344 }
3345
3346 /*
3347 * Read the new value from the source register.
3348 */
3349 uint64_t uNewDrX;
3350 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3351 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3352 else
3353 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3354
3355 /*
3356 * Adjust it.
3357 */
3358 switch (iDrReg)
3359 {
3360 case 0:
3361 case 1:
3362 case 2:
3363 case 3:
3364 /* nothing to adjust */
3365 break;
3366
3367 case 6:
3368 case 4:
3369 if (uNewDrX & UINT64_C(0xffffffff00000000))
3370 {
3371 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3372 return iemRaiseGeneralProtectionFault0(pIemCpu);
3373 }
3374 uNewDrX &= ~RT_BIT_32(12);
3375 uNewDrX |= UINT32_C(0xffff0ff0);
3376 break;
3377
3378 case 7:
3379 case 5:
3380 if (uNewDrX & UINT64_C(0xffffffff00000000))
3381 {
3382 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3383 return iemRaiseGeneralProtectionFault0(pIemCpu);
3384 }
3385 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3386 uNewDrX |= RT_BIT_32(10);
3387 break;
3388
3389 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3390 }
3391
3392 /*
3393 * Do the actual setting.
3394 */
3395 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3396 {
3397 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3398 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3399 }
3400 else
3401 pCtx->dr[iDrReg] = uNewDrX;
3402
3403 iemRegAddToRip(pIemCpu, cbInstr);
3404 return VINF_SUCCESS;
3405}
3406
3407
3408/**
3409 * Implements 'INVLPG m'.
3410 *
3411 * @param GCPtrPage The effective address of the page to invalidate.
3412 * @remarks Updates the RIP.
3413 */
3414IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3415{
3416 /* ring-0 only. */
3417 if (pIemCpu->uCpl != 0)
3418 return iemRaiseGeneralProtectionFault0(pIemCpu);
3419 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3420
3421 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3422 iemRegAddToRip(pIemCpu, cbInstr);
3423
3424 if ( rc == VINF_SUCCESS
3425 || rc == VINF_PGM_SYNC_CR3)
3426 return VINF_SUCCESS;
3427 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3428 return rc;
3429}
3430
3431
3432/**
3433 * Implements RDTSC.
3434 */
3435IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3436{
3437 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3438
3439 /*
3440 * Check preconditions.
3441 */
3442 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3443 return iemRaiseUndefinedOpcode(pIemCpu);
3444
3445 if ( (pCtx->cr4 & X86_CR4_TSD)
3446 && pIemCpu->uCpl != 0)
3447 {
3448 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3449 return iemRaiseGeneralProtectionFault0(pIemCpu);
3450 }
3451
3452 /*
3453 * Do the job.
3454 */
3455 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3456 pCtx->rax = (uint32_t)uTicks;
3457 pCtx->rdx = uTicks >> 32;
3458#ifdef IEM_VERIFICATION_MODE
3459 pIemCpu->fIgnoreRaxRdx = true;
3460#endif
3461
3462 iemRegAddToRip(pIemCpu, cbInstr);
3463 return VINF_SUCCESS;
3464}
3465
3466
3467/**
3468 * Implements RDMSR.
3469 */
3470IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3471{
3472 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3473
3474 /*
3475 * Check preconditions.
3476 */
3477 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3478 return iemRaiseUndefinedOpcode(pIemCpu);
3479 if (pIemCpu->uCpl != 0)
3480 return iemRaiseGeneralProtectionFault0(pIemCpu);
3481
3482 /*
3483 * Do the job.
3484 */
3485 RTUINT64U uValue;
3486 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3487 if (rc != VINF_SUCCESS)
3488 {
3489 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3490 return iemRaiseGeneralProtectionFault0(pIemCpu);
3491 }
3492
3493 pCtx->rax = uValue.au32[0];
3494 pCtx->rdx = uValue.au32[1];
3495
3496 iemRegAddToRip(pIemCpu, cbInstr);
3497 return VINF_SUCCESS;
3498}
3499
3500
3501/**
3502 * Implements 'IN eAX, port'.
3503 *
3504 * @param u16Port The source port.
3505 * @param cbReg The register size.
3506 */
3507IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3508{
3509 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3510
3511 /*
3512 * CPL check
3513 */
3514 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3515 if (rcStrict != VINF_SUCCESS)
3516 return rcStrict;
3517
3518 /*
3519 * Perform the I/O.
3520 */
3521 uint32_t u32Value;
3522 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3523 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
3524 else
3525 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3526 if (IOM_SUCCESS(rcStrict))
3527 {
3528 switch (cbReg)
3529 {
3530 case 1: pCtx->al = (uint8_t)u32Value; break;
3531 case 2: pCtx->ax = (uint16_t)u32Value; break;
3532 case 4: pCtx->rax = u32Value; break;
3533 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3534 }
3535 iemRegAddToRip(pIemCpu, cbInstr);
3536 pIemCpu->cPotentialExits++;
3537 }
3538 /** @todo massage rcStrict. */
3539 return rcStrict;
3540}
3541
3542
3543/**
3544 * Implements 'IN eAX, DX'.
3545 *
3546 * @param cbReg The register size.
3547 */
3548IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3549{
3550 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3551}
3552
3553
3554/**
3555 * Implements 'OUT port, eAX'.
3556 *
3557 * @param u16Port The destination port.
3558 * @param cbReg The register size.
3559 */
3560IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3561{
3562 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3563
3564 /*
3565 * CPL check
3566 */
3567 if ( (pCtx->cr0 & X86_CR0_PE)
3568 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3569 || pCtx->eflags.Bits.u1VM) )
3570 {
3571 /** @todo I/O port permission bitmap check */
3572 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
3573 }
3574
3575 /*
3576 * Perform the I/O.
3577 */
3578 uint32_t u32Value;
3579 switch (cbReg)
3580 {
3581 case 1: u32Value = pCtx->al; break;
3582 case 2: u32Value = pCtx->ax; break;
3583 case 4: u32Value = pCtx->eax; break;
3584 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3585 }
3586 VBOXSTRICTRC rc;
3587 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3588 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
3589 else
3590 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3591 if (IOM_SUCCESS(rc))
3592 {
3593 iemRegAddToRip(pIemCpu, cbInstr);
3594 pIemCpu->cPotentialExits++;
3595 /** @todo massage rc. */
3596 }
3597 return rc;
3598}
3599
3600
3601/**
3602 * Implements 'OUT DX, eAX'.
3603 *
3604 * @param cbReg The register size.
3605 */
3606IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3607{
3608 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3609}
3610
3611
3612/**
3613 * Implements 'CLI'.
3614 */
3615IEM_CIMPL_DEF_0(iemCImpl_cli)
3616{
3617 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3618
3619 if (pCtx->cr0 & X86_CR0_PE)
3620 {
3621 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3622 if (!pCtx->eflags.Bits.u1VM)
3623 {
3624 if (pIemCpu->uCpl <= uIopl)
3625 pCtx->eflags.Bits.u1IF = 0;
3626 else if ( pIemCpu->uCpl == 3
3627 && (pCtx->cr4 & X86_CR4_PVI) )
3628 pCtx->eflags.Bits.u1VIF = 0;
3629 else
3630 return iemRaiseGeneralProtectionFault0(pIemCpu);
3631 }
3632 /* V8086 */
3633 else if (uIopl == 3)
3634 pCtx->eflags.Bits.u1IF = 0;
3635 else if ( uIopl < 3
3636 && (pCtx->cr4 & X86_CR4_VME) )
3637 pCtx->eflags.Bits.u1VIF = 0;
3638 else
3639 return iemRaiseGeneralProtectionFault0(pIemCpu);
3640 }
3641 /* real mode */
3642 else
3643 pCtx->eflags.Bits.u1IF = 0;
3644 iemRegAddToRip(pIemCpu, cbInstr);
3645 return VINF_SUCCESS;
3646}
3647
3648
3649/**
3650 * Implements 'STI'.
3651 */
3652IEM_CIMPL_DEF_0(iemCImpl_sti)
3653{
3654 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3655
3656 if (pCtx->cr0 & X86_CR0_PE)
3657 {
3658 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3659 if (!pCtx->eflags.Bits.u1VM)
3660 {
3661 if (pIemCpu->uCpl <= uIopl)
3662 pCtx->eflags.Bits.u1IF = 1;
3663 else if ( pIemCpu->uCpl == 3
3664 && (pCtx->cr4 & X86_CR4_PVI)
3665 && !pCtx->eflags.Bits.u1VIP )
3666 pCtx->eflags.Bits.u1VIF = 1;
3667 else
3668 return iemRaiseGeneralProtectionFault0(pIemCpu);
3669 }
3670 /* V8086 */
3671 else if (uIopl == 3)
3672 pCtx->eflags.Bits.u1IF = 1;
3673 else if ( uIopl < 3
3674 && (pCtx->cr4 & X86_CR4_VME)
3675 && !pCtx->eflags.Bits.u1VIP )
3676 pCtx->eflags.Bits.u1VIF = 1;
3677 else
3678 return iemRaiseGeneralProtectionFault0(pIemCpu);
3679 }
3680 /* real mode */
3681 else
3682 pCtx->eflags.Bits.u1IF = 1;
3683
3684 iemRegAddToRip(pIemCpu, cbInstr);
3685 /** @todo don't do this unconditionally... */
3686 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3687 return VINF_SUCCESS;
3688}
3689
3690
3691/**
3692 * Implements 'HLT'.
3693 */
3694IEM_CIMPL_DEF_0(iemCImpl_hlt)
3695{
3696 if (pIemCpu->uCpl != 0)
3697 return iemRaiseGeneralProtectionFault0(pIemCpu);
3698 iemRegAddToRip(pIemCpu, cbInstr);
3699 return VINF_EM_HALT;
3700}
3701
3702
3703/**
3704 * Implements 'CPUID'.
3705 */
3706IEM_CIMPL_DEF_0(iemCImpl_cpuid)
3707{
3708 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3709
3710 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
3711 pCtx->rax &= UINT32_C(0xffffffff);
3712 pCtx->rbx &= UINT32_C(0xffffffff);
3713 pCtx->rcx &= UINT32_C(0xffffffff);
3714 pCtx->rdx &= UINT32_C(0xffffffff);
3715
3716 iemRegAddToRip(pIemCpu, cbInstr);
3717 return VINF_SUCCESS;
3718}
3719
3720
3721/**
3722 * Implements 'AAD'.
3723 *
3724 * @param enmEffOpSize The effective operand size.
3725 */
3726IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
3727{
3728 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3729
3730 uint16_t const ax = pCtx->ax;
3731 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
3732 pCtx->ax = al;
3733 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3734 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3735 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3736
3737 iemRegAddToRip(pIemCpu, cbInstr);
3738 return VINF_SUCCESS;
3739}
3740
3741
3742/**
3743 * Implements 'AAM'.
3744 *
3745 * @param bImm The immediate operand. Cannot be 0.
3746 */
3747IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
3748{
3749 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3750 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
3751
3752 uint16_t const ax = pCtx->ax;
3753 uint8_t const al = (uint8_t)ax % bImm;
3754 uint8_t const ah = (uint8_t)ax / bImm;
3755 pCtx->ax = (ah << 8) + al;
3756 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3757 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3758 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3759
3760 iemRegAddToRip(pIemCpu, cbInstr);
3761 return VINF_SUCCESS;
3762}
3763
3764
3765
3766
3767/*
3768 * Instantiate the various string operation combinations.
3769 */
3770#define OP_SIZE 8
3771#define ADDR_SIZE 16
3772#include "IEMAllCImplStrInstr.cpp.h"
3773#define OP_SIZE 8
3774#define ADDR_SIZE 32
3775#include "IEMAllCImplStrInstr.cpp.h"
3776#define OP_SIZE 8
3777#define ADDR_SIZE 64
3778#include "IEMAllCImplStrInstr.cpp.h"
3779
3780#define OP_SIZE 16
3781#define ADDR_SIZE 16
3782#include "IEMAllCImplStrInstr.cpp.h"
3783#define OP_SIZE 16
3784#define ADDR_SIZE 32
3785#include "IEMAllCImplStrInstr.cpp.h"
3786#define OP_SIZE 16
3787#define ADDR_SIZE 64
3788#include "IEMAllCImplStrInstr.cpp.h"
3789
3790#define OP_SIZE 32
3791#define ADDR_SIZE 16
3792#include "IEMAllCImplStrInstr.cpp.h"
3793#define OP_SIZE 32
3794#define ADDR_SIZE 32
3795#include "IEMAllCImplStrInstr.cpp.h"
3796#define OP_SIZE 32
3797#define ADDR_SIZE 64
3798#include "IEMAllCImplStrInstr.cpp.h"
3799
3800#define OP_SIZE 64
3801#define ADDR_SIZE 32
3802#include "IEMAllCImplStrInstr.cpp.h"
3803#define OP_SIZE 64
3804#define ADDR_SIZE 64
3805#include "IEMAllCImplStrInstr.cpp.h"
3806
3807
3808/**
3809 * Implements 'FINIT' and 'FNINIT'.
3810 *
3811 * @param fCheckXcpts Whether to check for umasked pending exceptions or
3812 * not.
3813 */
3814IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
3815{
3816 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3817
3818 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
3819 return iemRaiseDeviceNotAvailable(pIemCpu);
3820
3821 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
3822 if (fCheckXcpts && TODO )
3823 return iemRaiseMathFault(pIemCpu);
3824 */
3825
3826 if (iemFRegIsFxSaveFormat(pIemCpu))
3827 {
3828 pCtx->fpu.FCW = 0x37f;
3829 pCtx->fpu.FSW = 0;
3830 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
3831 pCtx->fpu.FPUDP = 0;
3832 pCtx->fpu.DS = 0; //??
3833 pCtx->fpu.FPUIP = 0;
3834 pCtx->fpu.CS = 0; //??
3835 pCtx->fpu.FOP = 0;
3836 }
3837 else
3838 {
3839 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
3840 pFpu->FCW = 0x37f;
3841 pFpu->FSW = 0;
3842 pFpu->FTW = 0xffff; /* 11 - empty */
3843 pFpu->FPUOO = 0; //??
3844 pFpu->FPUOS = 0; //??
3845 pFpu->FPUIP = 0;
3846 pFpu->CS = 0; //??
3847 pFpu->FOP = 0;
3848 }
3849
3850 iemRegAddToRip(pIemCpu, cbInstr);
3851 return VINF_SUCCESS;
3852}
3853
3854
3855/**
3856 * Implements 'FXSAVE'.
3857 *
3858 * @param iEffSeg The effective segment.
3859 * @param GCPtrEff The address of the image.
3860 * @param enmEffOpSize The operand size (only REX.W really matters).
3861 */
3862IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3863{
3864 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3865
3866 /*
3867 * Raise exceptions.
3868 */
3869 if (pCtx->cr0 & X86_CR0_EM)
3870 return iemRaiseUndefinedOpcode(pIemCpu);
3871 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3872 return iemRaiseDeviceNotAvailable(pIemCpu);
3873 if (GCPtrEff & 15)
3874 {
3875 /** @todo CPU/VM detection possible! \#AC might not be signal for
3876 * all/any misalignment sizes, intel says its an implementation detail. */
3877 if ( (pCtx->cr0 & X86_CR0_AM)
3878 && pCtx->eflags.Bits.u1AC
3879 && pIemCpu->uCpl == 3)
3880 return iemRaiseAlignmentCheckException(pIemCpu);
3881 return iemRaiseGeneralProtectionFault0(pIemCpu);
3882 }
3883 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3884
3885 /*
3886 * Access the memory.
3887 */
3888 void *pvMem512;
3889 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3890 if (rcStrict != VINF_SUCCESS)
3891 return rcStrict;
3892 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
3893
3894 /*
3895 * Store the registers.
3896 */
3897 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
3898 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
3899
3900 /* common for all formats */
3901 pDst->FCW = pCtx->fpu.FCW;
3902 pDst->FSW = pCtx->fpu.FSW;
3903 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
3904 pDst->FOP = pCtx->fpu.FOP;
3905 pDst->MXCSR = pCtx->fpu.MXCSR;
3906 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
3907 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
3908 {
3909 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
3910 * them for now... */
3911 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
3912 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
3913 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
3914 pDst->aRegs[i].au32[3] = 0;
3915 }
3916
3917 /* FPU IP, CS, DP and DS. */
3918 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
3919 * state information. :-/
3920 * Storing zeros now to prevent any potential leakage of host info. */
3921 pDst->FPUIP = 0;
3922 pDst->CS = 0;
3923 pDst->Rsrvd1 = 0;
3924 pDst->FPUDP = 0;
3925 pDst->DS = 0;
3926 pDst->Rsrvd2 = 0;
3927
3928 /* XMM registers. */
3929 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
3930 || pIemCpu->enmCpuMode != IEMMODE_64BIT
3931 || pIemCpu->uCpl != 0)
3932 {
3933 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
3934 for (uint32_t i = 0; i < cXmmRegs; i++)
3935 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
3936 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
3937 * right? */
3938 }
3939
3940 /*
3941 * Commit the memory.
3942 */
3943 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3944 if (rcStrict != VINF_SUCCESS)
3945 return rcStrict;
3946
3947 iemRegAddToRip(pIemCpu, cbInstr);
3948 return VINF_SUCCESS;
3949}
3950
3951
3952/**
3953 * Implements 'FXRSTOR'.
3954 *
3955 * @param GCPtrEff The address of the image.
3956 * @param enmEffOpSize The operand size (only REX.W really matters).
3957 */
3958IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3959{
3960 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3961
3962 /*
3963 * Raise exceptions.
3964 */
3965 if (pCtx->cr0 & X86_CR0_EM)
3966 return iemRaiseUndefinedOpcode(pIemCpu);
3967 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3968 return iemRaiseDeviceNotAvailable(pIemCpu);
3969 if (GCPtrEff & 15)
3970 {
3971 /** @todo CPU/VM detection possible! \#AC might not be signal for
3972 * all/any misalignment sizes, intel says its an implementation detail. */
3973 if ( (pCtx->cr0 & X86_CR0_AM)
3974 && pCtx->eflags.Bits.u1AC
3975 && pIemCpu->uCpl == 3)
3976 return iemRaiseAlignmentCheckException(pIemCpu);
3977 return iemRaiseGeneralProtectionFault0(pIemCpu);
3978 }
3979 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3980
3981 /*
3982 * Access the memory.
3983 */
3984 void *pvMem512;
3985 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
3986 if (rcStrict != VINF_SUCCESS)
3987 return rcStrict;
3988 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
3989
3990 /*
3991 * Check the state for stuff which will GP(0).
3992 */
3993 uint32_t const fMXCSR = pSrc->MXCSR;
3994 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
3995 if (fMXCSR & ~fMXCSR_MASK)
3996 {
3997 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
3998 return iemRaiseGeneralProtectionFault0(pIemCpu);
3999 }
4000
4001 /*
4002 * Load the registers.
4003 */
4004 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4005 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4006
4007 /* common for all formats */
4008 pCtx->fpu.FCW = pSrc->FCW;
4009 pCtx->fpu.FSW = pSrc->FSW;
4010 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4011 pCtx->fpu.FOP = pSrc->FOP;
4012 pCtx->fpu.MXCSR = fMXCSR;
4013 /* (MXCSR_MASK is read-only) */
4014 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4015 {
4016 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4017 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4018 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4019 pCtx->fpu.aRegs[i].au32[3] = 0;
4020 }
4021
4022 /* FPU IP, CS, DP and DS. */
4023 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4024 {
4025 pCtx->fpu.FPUIP = pSrc->FPUIP;
4026 pCtx->fpu.CS = pSrc->CS;
4027 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4028 pCtx->fpu.FPUDP = pSrc->FPUDP;
4029 pCtx->fpu.DS = pSrc->DS;
4030 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4031 }
4032 else
4033 {
4034 pCtx->fpu.FPUIP = pSrc->FPUIP;
4035 pCtx->fpu.CS = pSrc->CS;
4036 pCtx->fpu.Rsrvd1 = 0;
4037 pCtx->fpu.FPUDP = pSrc->FPUDP;
4038 pCtx->fpu.DS = pSrc->DS;
4039 pCtx->fpu.Rsrvd2 = 0;
4040 }
4041
4042 /* XMM registers. */
4043 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4044 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4045 || pIemCpu->uCpl != 0)
4046 {
4047 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4048 for (uint32_t i = 0; i < cXmmRegs; i++)
4049 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4050 }
4051
4052 /*
4053 * Commit the memory.
4054 */
4055 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4056 if (rcStrict != VINF_SUCCESS)
4057 return rcStrict;
4058
4059 iemRegAddToRip(pIemCpu, cbInstr);
4060 return VINF_SUCCESS;
4061}
4062
4063
4064/**
4065 * Commmon routine for fnstenv and fnsave.
4066 *
4067 * @param uPtr Where to store the state.
4068 * @param pCtx The CPU context.
4069 */
4070static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4071{
4072 if (enmEffOpSize == IEMMODE_16BIT)
4073 {
4074 uPtr.pu16[0] = pCtx->fpu.FCW;
4075 uPtr.pu16[1] = pCtx->fpu.FSW;
4076 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4077 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4078 {
4079 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4080 * protected mode or long mode and we save it in real mode? And vice
4081 * versa? And with 32-bit operand size? I think CPU is storing the
4082 * effective address ((CS << 4) + IP) in the offset register and not
4083 * doing any address calculations here. */
4084 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4085 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4086 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4087 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4088 }
4089 else
4090 {
4091 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4092 uPtr.pu16[4] = pCtx->fpu.CS;
4093 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4094 uPtr.pu16[6] = pCtx->fpu.DS;
4095 }
4096 }
4097 else
4098 {
4099 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4100 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4101 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4102 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4103 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4104 {
4105 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4106 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4107 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4108 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4109 }
4110 else
4111 {
4112 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4113 uPtr.pu16[4*2] = pCtx->fpu.CS;
4114 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4115 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4116 uPtr.pu16[6*2] = pCtx->fpu.DS;
4117 }
4118 }
4119}
4120
4121
4122/**
4123 * Commmon routine for fnstenv and fnsave.
4124 *
4125 * @param uPtr Where to store the state.
4126 * @param pCtx The CPU context.
4127 */
4128static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4129{
4130 if (enmEffOpSize == IEMMODE_16BIT)
4131 {
4132 pCtx->fpu.FCW = uPtr.pu16[0];
4133 pCtx->fpu.FSW = uPtr.pu16[1];
4134 pCtx->fpu.FTW = uPtr.pu16[2];
4135 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4136 {
4137 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4138 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4139 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4140 pCtx->fpu.CS = 0;
4141 pCtx->fpu.DS = 0;
4142 }
4143 else
4144 {
4145 pCtx->fpu.FPUIP = uPtr.pu16[3];
4146 pCtx->fpu.CS = uPtr.pu16[4];
4147 pCtx->fpu.FPUDP = uPtr.pu16[5];
4148 pCtx->fpu.DS = uPtr.pu16[6];
4149 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4150 }
4151 }
4152 else
4153 {
4154 pCtx->fpu.FCW = uPtr.pu16[0*2];
4155 pCtx->fpu.FSW = uPtr.pu16[1*2];
4156 pCtx->fpu.FTW = uPtr.pu16[2*2];
4157 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4158 {
4159 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4160 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4161 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4162 pCtx->fpu.CS = 0;
4163 pCtx->fpu.DS = 0;
4164 }
4165 else
4166 {
4167 pCtx->fpu.FPUIP = uPtr.pu32[3];
4168 pCtx->fpu.CS = uPtr.pu16[4*2];
4169 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4170 pCtx->fpu.FPUDP = uPtr.pu32[5];
4171 pCtx->fpu.DS = uPtr.pu16[6*2];
4172 }
4173 }
4174
4175 /* Make adjustments. */
4176 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4177 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4178 iemFpuRecalcExceptionStatus(pCtx);
4179 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4180 * exceptions are pending after loading the saved state? */
4181}
4182
4183
4184/**
4185 * Implements 'FNSTENV'.
4186 *
4187 * @param enmEffOpSize The operand size (only REX.W really matters).
4188 * @param iEffSeg The effective segment register for @a GCPtrEff.
4189 * @param GCPtrEffDst The address of the image.
4190 */
4191IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4192{
4193 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4194 RTPTRUNION uPtr;
4195 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4196 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4197 if (rcStrict != VINF_SUCCESS)
4198 return rcStrict;
4199
4200 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4201
4202 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4203 if (rcStrict != VINF_SUCCESS)
4204 return rcStrict;
4205
4206 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4207 iemRegAddToRip(pIemCpu, cbInstr);
4208 return VINF_SUCCESS;
4209}
4210
4211
4212/**
4213 * Implements 'FLDENV'.
4214 *
4215 * @param enmEffOpSize The operand size (only REX.W really matters).
4216 * @param iEffSeg The effective segment register for @a GCPtrEff.
4217 * @param GCPtrEffSrc The address of the image.
4218 */
4219IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4220{
4221 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4222 RTCPTRUNION uPtr;
4223 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4224 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4225 if (rcStrict != VINF_SUCCESS)
4226 return rcStrict;
4227
4228 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4229
4230 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4231 if (rcStrict != VINF_SUCCESS)
4232 return rcStrict;
4233
4234 iemRegAddToRip(pIemCpu, cbInstr);
4235 return VINF_SUCCESS;
4236}
4237
4238
4239/**
4240 * Implements 'FLDCW'.
4241 *
4242 * @param u16Fcw The new FCW.
4243 */
4244IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4245{
4246 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4247
4248 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4249 /** @todo Testcase: Try see what happens when trying to set undefined bits
4250 * (other than 6 and 7). Currently ignoring them. */
4251 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4252 * according to FSW. (This is was is currently implemented.) */
4253 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4254 iemFpuRecalcExceptionStatus(pCtx);
4255
4256 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4257 iemRegAddToRip(pIemCpu, cbInstr);
4258 return VINF_SUCCESS;
4259}
4260
4261
4262
4263/**
4264 * Implements the underflow case of fxch.
4265 *
4266 * @param iStReg The other stack register.
4267 */
4268IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4269{
4270 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4271
4272 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4273 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4274 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4275
4276 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4277 * registers are read as QNaN and then exchanged. This could be
4278 * wrong... */
4279 if (pCtx->fpu.FCW & X86_FCW_IM)
4280 {
4281 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4282 {
4283 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4284 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4285 else
4286 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4287 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4288 }
4289 else
4290 {
4291 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4292 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4293 }
4294 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4295 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4296 }
4297 else
4298 {
4299 /* raise underflow exception, don't change anything. */
4300 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
4301 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4302 }
4303 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4304
4305 iemRegAddToRip(pIemCpu, cbInstr);
4306 return VINF_SUCCESS;
4307}
4308
4309
4310/**
4311 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
4312 *
4313 * @param cToAdd 1 or 7.
4314 */
4315IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
4316{
4317 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4318 Assert(iStReg < 8);
4319
4320 /*
4321 * Raise exceptions.
4322 */
4323 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4324 return iemRaiseDeviceNotAvailable(pIemCpu);
4325 uint16_t u16Fsw = pCtx->fpu.FSW;
4326 if (u16Fsw & X86_FSW_ES)
4327 return iemRaiseMathFault(pIemCpu);
4328
4329 /*
4330 * Check if any of the register accesses causes #SF + #IA.
4331 */
4332 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
4333 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4334 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
4335 {
4336 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
4337 pCtx->fpu.FSW &= ~X86_FSW_C1;
4338 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
4339 if ( !(u16Fsw & X86_FSW_IE)
4340 || (pCtx->fpu.FCW & X86_FCW_IM) )
4341 {
4342 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4343 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4344 }
4345 }
4346 else if (pCtx->fpu.FCW & X86_FCW_IM)
4347 {
4348 /* Masked underflow. */
4349 pCtx->fpu.FSW &= ~X86_FSW_C1;
4350 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4351 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4352 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
4353 }
4354 else
4355 {
4356 /* Raise underflow - don't touch EFLAGS or TOP. */
4357 pCtx->fpu.FSW &= ~X86_FSW_C1;
4358 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4359 fPop = false;
4360 }
4361
4362 /*
4363 * Pop if necessary.
4364 */
4365 if (fPop)
4366 {
4367 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
4368 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
4369 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
4370 }
4371
4372 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4373 iemRegAddToRip(pIemCpu, cbInstr);
4374 return VINF_SUCCESS;
4375}
4376
4377/** @} */
4378
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette