VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 40251

Last change on this file since 40251 was 40251, checked in by vboxsync, 13 years ago

fcomi, fcomip, fucomi and fucomip.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 145.6 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 40251 2012-02-24 21:24:23Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
42 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
43 }
44 return VINF_SUCCESS;
45}
46
47
48#if 0
49/**
50 * Calculates the parity bit.
51 *
52 * @returns true if the bit is set, false if not.
53 * @param u8Result The least significant byte of the result.
54 */
55static bool iemHlpCalcParityFlag(uint8_t u8Result)
56{
57 /*
58 * Parity is set if the number of bits in the least significant byte of
59 * the result is even.
60 */
61 uint8_t cBits;
62 cBits = u8Result & 1; /* 0 */
63 u8Result >>= 1;
64 cBits += u8Result & 1;
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1; /* 4 */
71 u8Result >>= 1;
72 cBits += u8Result & 1;
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 return !(cBits & 1);
78}
79#endif /* not used */
80
81
82/**
83 * Updates the specified flags according to a 8-bit result.
84 *
85 * @param pIemCpu The.
86 * @param u8Result The result to set the flags according to.
87 * @param fToUpdate The flags to update.
88 * @param fUndefined The flags that are specified as undefined.
89 */
90static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
91{
92 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
93
94 uint32_t fEFlags = pCtx->eflags.u;
95 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
96 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
97 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
98}
99
100
101/**
102 * Loads a NULL data selector into a selector register, both the hidden and
103 * visible parts, in protected mode.
104 *
105 * @param puSel The selector register.
106 * @param pHid The hidden register part.
107 */
108static void iemHlpLoadNullDataSelectorProt(PRTSEL puSel, PCPUMSELREGHID pHid)
109{
110 /** @todo Testcase: write a testcase checking what happends when loading a NULL
111 * data selector in protected mode. */
112 pHid->u64Base = 0;
113 pHid->u32Limit = 0;
114 pHid->Attr.u = 0;
115 *puSel = 0;
116}
117
118
119/**
120 * Helper used by iret.
121 *
122 * @param uCpl The new CPL.
123 * @param puSel The selector register.
124 * @param pHid The corresponding hidden register.
125 */
126static void iemHlpAdjustSelectorForNewCpl(uint8_t uCpl, PRTSEL puSel, PCPUMSELREGHID pHid)
127{
128 if ( uCpl > pHid->Attr.n.u2Dpl
129 && pHid->Attr.n.u1DescType /* code or data, not system */
130 && (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
131 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
132 iemHlpLoadNullDataSelectorProt(puSel, pHid);
133}
134
135/** @} */
136
137/** @name C Implementations
138 * @{
139 */
140
141/**
142 * Implements a 16-bit popa.
143 */
144IEM_CIMPL_DEF_0(iemCImpl_popa_16)
145{
146 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
147 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
148 RTGCPTR GCPtrLast = GCPtrStart + 15;
149 VBOXSTRICTRC rcStrict;
150
151 /*
152 * The docs are a bit hard to comprehend here, but it looks like we wrap
153 * around in real mode as long as none of the individual "popa" crosses the
154 * end of the stack segment. In protected mode we check the whole access
155 * in one go. For efficiency, only do the word-by-word thing if we're in
156 * danger of wrapping around.
157 */
158 /** @todo do popa boundary / wrap-around checks. */
159 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
160 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
161 {
162 /* word-by-word */
163 RTUINT64U TmpRsp;
164 TmpRsp.u = pCtx->rsp;
165 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
166 if (rcStrict == VINF_SUCCESS)
167 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
168 if (rcStrict == VINF_SUCCESS)
169 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
170 if (rcStrict == VINF_SUCCESS)
171 {
172 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
173 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
174 }
175 if (rcStrict == VINF_SUCCESS)
176 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
177 if (rcStrict == VINF_SUCCESS)
178 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
179 if (rcStrict == VINF_SUCCESS)
180 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
181 if (rcStrict == VINF_SUCCESS)
182 {
183 pCtx->rsp = TmpRsp.u;
184 iemRegAddToRip(pIemCpu, cbInstr);
185 }
186 }
187 else
188 {
189 uint16_t const *pa16Mem = NULL;
190 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
191 if (rcStrict == VINF_SUCCESS)
192 {
193 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
194 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
195 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
196 /* skip sp */
197 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
198 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
199 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
200 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
201 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
202 if (rcStrict == VINF_SUCCESS)
203 {
204 iemRegAddToRsp(pCtx, 16);
205 iemRegAddToRip(pIemCpu, cbInstr);
206 }
207 }
208 }
209 return rcStrict;
210}
211
212
213/**
214 * Implements a 32-bit popa.
215 */
216IEM_CIMPL_DEF_0(iemCImpl_popa_32)
217{
218 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
219 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
220 RTGCPTR GCPtrLast = GCPtrStart + 31;
221 VBOXSTRICTRC rcStrict;
222
223 /*
224 * The docs are a bit hard to comprehend here, but it looks like we wrap
225 * around in real mode as long as none of the individual "popa" crosses the
226 * end of the stack segment. In protected mode we check the whole access
227 * in one go. For efficiency, only do the word-by-word thing if we're in
228 * danger of wrapping around.
229 */
230 /** @todo do popa boundary / wrap-around checks. */
231 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
232 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
233 {
234 /* word-by-word */
235 RTUINT64U TmpRsp;
236 TmpRsp.u = pCtx->rsp;
237 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
238 if (rcStrict == VINF_SUCCESS)
239 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
240 if (rcStrict == VINF_SUCCESS)
241 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
242 if (rcStrict == VINF_SUCCESS)
243 {
244 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
245 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
246 }
247 if (rcStrict == VINF_SUCCESS)
248 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
249 if (rcStrict == VINF_SUCCESS)
250 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
251 if (rcStrict == VINF_SUCCESS)
252 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
253 if (rcStrict == VINF_SUCCESS)
254 {
255#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
256 pCtx->rdi &= UINT32_MAX;
257 pCtx->rsi &= UINT32_MAX;
258 pCtx->rbp &= UINT32_MAX;
259 pCtx->rbx &= UINT32_MAX;
260 pCtx->rdx &= UINT32_MAX;
261 pCtx->rcx &= UINT32_MAX;
262 pCtx->rax &= UINT32_MAX;
263#endif
264 pCtx->rsp = TmpRsp.u;
265 iemRegAddToRip(pIemCpu, cbInstr);
266 }
267 }
268 else
269 {
270 uint32_t const *pa32Mem;
271 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
272 if (rcStrict == VINF_SUCCESS)
273 {
274 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
275 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
276 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
277 /* skip esp */
278 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
279 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
280 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
281 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
282 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
283 if (rcStrict == VINF_SUCCESS)
284 {
285 iemRegAddToRsp(pCtx, 32);
286 iemRegAddToRip(pIemCpu, cbInstr);
287 }
288 }
289 }
290 return rcStrict;
291}
292
293
294/**
295 * Implements a 16-bit pusha.
296 */
297IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
298{
299 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
300 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
301 RTGCPTR GCPtrBottom = GCPtrTop - 15;
302 VBOXSTRICTRC rcStrict;
303
304 /*
305 * The docs are a bit hard to comprehend here, but it looks like we wrap
306 * around in real mode as long as none of the individual "pushd" crosses the
307 * end of the stack segment. In protected mode we check the whole access
308 * in one go. For efficiency, only do the word-by-word thing if we're in
309 * danger of wrapping around.
310 */
311 /** @todo do pusha boundary / wrap-around checks. */
312 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
313 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
314 {
315 /* word-by-word */
316 RTUINT64U TmpRsp;
317 TmpRsp.u = pCtx->rsp;
318 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
319 if (rcStrict == VINF_SUCCESS)
320 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
321 if (rcStrict == VINF_SUCCESS)
322 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
323 if (rcStrict == VINF_SUCCESS)
324 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
325 if (rcStrict == VINF_SUCCESS)
326 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
329 if (rcStrict == VINF_SUCCESS)
330 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
331 if (rcStrict == VINF_SUCCESS)
332 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
333 if (rcStrict == VINF_SUCCESS)
334 {
335 pCtx->rsp = TmpRsp.u;
336 iemRegAddToRip(pIemCpu, cbInstr);
337 }
338 }
339 else
340 {
341 GCPtrBottom--;
342 uint16_t *pa16Mem = NULL;
343 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
344 if (rcStrict == VINF_SUCCESS)
345 {
346 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
347 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
348 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
349 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
350 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
351 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
352 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
353 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
354 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
355 if (rcStrict == VINF_SUCCESS)
356 {
357 iemRegSubFromRsp(pCtx, 16);
358 iemRegAddToRip(pIemCpu, cbInstr);
359 }
360 }
361 }
362 return rcStrict;
363}
364
365
366/**
367 * Implements a 32-bit pusha.
368 */
369IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
370{
371 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
372 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
373 RTGCPTR GCPtrBottom = GCPtrTop - 31;
374 VBOXSTRICTRC rcStrict;
375
376 /*
377 * The docs are a bit hard to comprehend here, but it looks like we wrap
378 * around in real mode as long as none of the individual "pusha" crosses the
379 * end of the stack segment. In protected mode we check the whole access
380 * in one go. For efficiency, only do the word-by-word thing if we're in
381 * danger of wrapping around.
382 */
383 /** @todo do pusha boundary / wrap-around checks. */
384 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
385 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
386 {
387 /* word-by-word */
388 RTUINT64U TmpRsp;
389 TmpRsp.u = pCtx->rsp;
390 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
391 if (rcStrict == VINF_SUCCESS)
392 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
393 if (rcStrict == VINF_SUCCESS)
394 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
395 if (rcStrict == VINF_SUCCESS)
396 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
397 if (rcStrict == VINF_SUCCESS)
398 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
399 if (rcStrict == VINF_SUCCESS)
400 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
401 if (rcStrict == VINF_SUCCESS)
402 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
403 if (rcStrict == VINF_SUCCESS)
404 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
405 if (rcStrict == VINF_SUCCESS)
406 {
407 pCtx->rsp = TmpRsp.u;
408 iemRegAddToRip(pIemCpu, cbInstr);
409 }
410 }
411 else
412 {
413 GCPtrBottom--;
414 uint32_t *pa32Mem;
415 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
416 if (rcStrict == VINF_SUCCESS)
417 {
418 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
419 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
420 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
421 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
422 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
423 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
424 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
425 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
426 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
427 if (rcStrict == VINF_SUCCESS)
428 {
429 iemRegSubFromRsp(pCtx, 32);
430 iemRegAddToRip(pIemCpu, cbInstr);
431 }
432 }
433 }
434 return rcStrict;
435}
436
437
438/**
439 * Implements pushf.
440 *
441 *
442 * @param enmEffOpSize The effective operand size.
443 */
444IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
445{
446 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
447
448 /*
449 * If we're in V8086 mode some care is required (which is why we're in
450 * doing this in a C implementation).
451 */
452 uint32_t fEfl = pCtx->eflags.u;
453 if ( (fEfl & X86_EFL_VM)
454 && X86_EFL_GET_IOPL(fEfl) != 3 )
455 {
456 Assert(pCtx->cr0 & X86_CR0_PE);
457 if ( enmEffOpSize != IEMMODE_16BIT
458 || !(pCtx->cr4 & X86_CR4_VME))
459 return iemRaiseGeneralProtectionFault0(pIemCpu);
460 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
461 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
462 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
463 }
464
465 /*
466 * Ok, clear RF and VM and push the flags.
467 */
468 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
469
470 VBOXSTRICTRC rcStrict;
471 switch (enmEffOpSize)
472 {
473 case IEMMODE_16BIT:
474 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
475 break;
476 case IEMMODE_32BIT:
477 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
478 break;
479 case IEMMODE_64BIT:
480 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
481 break;
482 IEM_NOT_REACHED_DEFAULT_CASE_RET();
483 }
484 if (rcStrict != VINF_SUCCESS)
485 return rcStrict;
486
487 iemRegAddToRip(pIemCpu, cbInstr);
488 return VINF_SUCCESS;
489}
490
491
492/**
493 * Implements popf.
494 *
495 * @param enmEffOpSize The effective operand size.
496 */
497IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
498{
499 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
500 uint32_t const fEflOld = pCtx->eflags.u;
501 VBOXSTRICTRC rcStrict;
502 uint32_t fEflNew;
503
504 /*
505 * V8086 is special as usual.
506 */
507 if (fEflOld & X86_EFL_VM)
508 {
509 /*
510 * Almost anything goes if IOPL is 3.
511 */
512 if (X86_EFL_GET_IOPL(fEflOld) == 3)
513 {
514 switch (enmEffOpSize)
515 {
516 case IEMMODE_16BIT:
517 {
518 uint16_t u16Value;
519 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
520 if (rcStrict != VINF_SUCCESS)
521 return rcStrict;
522 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
523 break;
524 }
525 case IEMMODE_32BIT:
526 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
527 if (rcStrict != VINF_SUCCESS)
528 return rcStrict;
529 break;
530 IEM_NOT_REACHED_DEFAULT_CASE_RET();
531 }
532
533 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
534 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
535 }
536 /*
537 * Interrupt flag virtualization with CR4.VME=1.
538 */
539 else if ( enmEffOpSize == IEMMODE_16BIT
540 && (pCtx->cr4 & X86_CR4_VME) )
541 {
542 uint16_t u16Value;
543 RTUINT64U TmpRsp;
544 TmpRsp.u = pCtx->rsp;
545 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
546 if (rcStrict != VINF_SUCCESS)
547 return rcStrict;
548
549 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
550 * or before? */
551 if ( ( (u16Value & X86_EFL_IF)
552 && (fEflOld & X86_EFL_VIP))
553 || (u16Value & X86_EFL_TF) )
554 return iemRaiseGeneralProtectionFault0(pIemCpu);
555
556 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
557 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
558 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
559 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
560
561 pCtx->rsp = TmpRsp.u;
562 }
563 else
564 return iemRaiseGeneralProtectionFault0(pIemCpu);
565
566 }
567 /*
568 * Not in V8086 mode.
569 */
570 else
571 {
572 /* Pop the flags. */
573 switch (enmEffOpSize)
574 {
575 case IEMMODE_16BIT:
576 {
577 uint16_t u16Value;
578 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
579 if (rcStrict != VINF_SUCCESS)
580 return rcStrict;
581 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
582 break;
583 }
584 case IEMMODE_32BIT:
585 case IEMMODE_64BIT:
586 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
587 if (rcStrict != VINF_SUCCESS)
588 return rcStrict;
589 break;
590 IEM_NOT_REACHED_DEFAULT_CASE_RET();
591 }
592
593 /* Merge them with the current flags. */
594 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
595 || pIemCpu->uCpl == 0)
596 {
597 fEflNew &= X86_EFL_POPF_BITS;
598 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
599 }
600 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
601 {
602 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
603 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
604 }
605 else
606 {
607 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
608 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
609 }
610 }
611
612 /*
613 * Commit the flags.
614 */
615 Assert(fEflNew & RT_BIT_32(1));
616 pCtx->eflags.u = fEflNew;
617 iemRegAddToRip(pIemCpu, cbInstr);
618
619 return VINF_SUCCESS;
620}
621
622
623/**
624 * Implements an indirect call.
625 *
626 * @param uNewPC The new program counter (RIP) value (loaded from the
627 * operand).
628 * @param enmEffOpSize The effective operand size.
629 */
630IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
631{
632 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
633 uint16_t uOldPC = pCtx->ip + cbInstr;
634 if (uNewPC > pCtx->csHid.u32Limit)
635 return iemRaiseGeneralProtectionFault0(pIemCpu);
636
637 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
638 if (rcStrict != VINF_SUCCESS)
639 return rcStrict;
640
641 pCtx->rip = uNewPC;
642 return VINF_SUCCESS;
643
644}
645
646
647/**
648 * Implements a 16-bit relative call.
649 *
650 * @param offDisp The displacment offset.
651 */
652IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
653{
654 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
655 uint16_t uOldPC = pCtx->ip + cbInstr;
656 uint16_t uNewPC = uOldPC + offDisp;
657 if (uNewPC > pCtx->csHid.u32Limit)
658 return iemRaiseGeneralProtectionFault0(pIemCpu);
659
660 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
661 if (rcStrict != VINF_SUCCESS)
662 return rcStrict;
663
664 pCtx->rip = uNewPC;
665 return VINF_SUCCESS;
666}
667
668
669/**
670 * Implements a 32-bit indirect call.
671 *
672 * @param uNewPC The new program counter (RIP) value (loaded from the
673 * operand).
674 * @param enmEffOpSize The effective operand size.
675 */
676IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
677{
678 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
679 uint32_t uOldPC = pCtx->eip + cbInstr;
680 if (uNewPC > pCtx->csHid.u32Limit)
681 return iemRaiseGeneralProtectionFault0(pIemCpu);
682
683 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
684 if (rcStrict != VINF_SUCCESS)
685 return rcStrict;
686
687 pCtx->rip = uNewPC;
688 return VINF_SUCCESS;
689
690}
691
692
693/**
694 * Implements a 32-bit relative call.
695 *
696 * @param offDisp The displacment offset.
697 */
698IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
699{
700 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
701 uint32_t uOldPC = pCtx->eip + cbInstr;
702 uint32_t uNewPC = uOldPC + offDisp;
703 if (uNewPC > pCtx->csHid.u32Limit)
704 return iemRaiseGeneralProtectionFault0(pIemCpu);
705
706 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
707 if (rcStrict != VINF_SUCCESS)
708 return rcStrict;
709
710 pCtx->rip = uNewPC;
711 return VINF_SUCCESS;
712}
713
714
715/**
716 * Implements a 64-bit indirect call.
717 *
718 * @param uNewPC The new program counter (RIP) value (loaded from the
719 * operand).
720 * @param enmEffOpSize The effective operand size.
721 */
722IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
723{
724 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
725 uint64_t uOldPC = pCtx->rip + cbInstr;
726 if (!IEM_IS_CANONICAL(uNewPC))
727 return iemRaiseGeneralProtectionFault0(pIemCpu);
728
729 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
730 if (rcStrict != VINF_SUCCESS)
731 return rcStrict;
732
733 pCtx->rip = uNewPC;
734 return VINF_SUCCESS;
735
736}
737
738
739/**
740 * Implements a 64-bit relative call.
741 *
742 * @param offDisp The displacment offset.
743 */
744IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
745{
746 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
747 uint64_t uOldPC = pCtx->rip + cbInstr;
748 uint64_t uNewPC = uOldPC + offDisp;
749 if (!IEM_IS_CANONICAL(uNewPC))
750 return iemRaiseNotCanonical(pIemCpu);
751
752 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
753 if (rcStrict != VINF_SUCCESS)
754 return rcStrict;
755
756 pCtx->rip = uNewPC;
757 return VINF_SUCCESS;
758}
759
760
761/**
762 * Implements far jumps and calls thru task segments (TSS).
763 *
764 * @param uSel The selector.
765 * @param enmBranch The kind of branching we're performing.
766 * @param enmEffOpSize The effective operand size.
767 * @param pDesc The descriptor corrsponding to @a uSel. The type is
768 * call gate.
769 */
770IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
771{
772 /* Call various functions to do the work. */
773 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
774}
775
776
777/**
778 * Implements far jumps and calls thru task gates.
779 *
780 * @param uSel The selector.
781 * @param enmBranch The kind of branching we're performing.
782 * @param enmEffOpSize The effective operand size.
783 * @param pDesc The descriptor corrsponding to @a uSel. The type is
784 * call gate.
785 */
786IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
787{
788 /* Call various functions to do the work. */
789 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
790}
791
792
793/**
794 * Implements far jumps and calls thru call gates.
795 *
796 * @param uSel The selector.
797 * @param enmBranch The kind of branching we're performing.
798 * @param enmEffOpSize The effective operand size.
799 * @param pDesc The descriptor corrsponding to @a uSel. The type is
800 * call gate.
801 */
802IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
803{
804 /* Call various functions to do the work. */
805 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
806}
807
808
809/**
810 * Implements far jumps and calls thru system selectors.
811 *
812 * @param uSel The selector.
813 * @param enmBranch The kind of branching we're performing.
814 * @param enmEffOpSize The effective operand size.
815 * @param pDesc The descriptor corrsponding to @a uSel.
816 */
817IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
818{
819 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
820 Assert((uSel & (X86_SEL_MASK | X86_SEL_LDT)));
821
822 if (IEM_IS_LONG_MODE(pIemCpu))
823 switch (pDesc->Legacy.Gen.u4Type)
824 {
825 case AMD64_SEL_TYPE_SYS_CALL_GATE:
826 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
827
828 default:
829 case AMD64_SEL_TYPE_SYS_LDT:
830 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
831 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
832 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
833 case AMD64_SEL_TYPE_SYS_INT_GATE:
834 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
835 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
836
837 }
838
839 switch (pDesc->Legacy.Gen.u4Type)
840 {
841 case X86_SEL_TYPE_SYS_286_CALL_GATE:
842 case X86_SEL_TYPE_SYS_386_CALL_GATE:
843 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
844
845 case X86_SEL_TYPE_SYS_TASK_GATE:
846 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
847
848 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
849 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
850 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
851
852 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
853 Log(("branch %04x -> busy 286 TSS\n", uSel));
854 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
855
856 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
857 Log(("branch %04x -> busy 386 TSS\n", uSel));
858 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
859
860 default:
861 case X86_SEL_TYPE_SYS_LDT:
862 case X86_SEL_TYPE_SYS_286_INT_GATE:
863 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
864 case X86_SEL_TYPE_SYS_386_INT_GATE:
865 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
866 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
867 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
868 }
869}
870
871
872/**
873 * Implements far jumps.
874 *
875 * @param uSel The selector.
876 * @param offSeg The segment offset.
877 * @param enmEffOpSize The effective operand size.
878 */
879IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
880{
881 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
882 NOREF(cbInstr);
883 Assert(offSeg <= UINT32_MAX);
884
885 /*
886 * Real mode and V8086 mode are easy. The only snag seems to be that
887 * CS.limit doesn't change and the limit check is done against the current
888 * limit.
889 */
890 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
891 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
892 {
893 if (offSeg > pCtx->csHid.u32Limit)
894 return iemRaiseGeneralProtectionFault0(pIemCpu);
895
896 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
897 pCtx->rip = offSeg;
898 else
899 pCtx->rip = offSeg & UINT16_MAX;
900 pCtx->cs = uSel;
901 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
902 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
903 * PE. Check with VT-x and AMD-V. */
904#ifdef IEM_VERIFICATION_MODE
905 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
906#endif
907 return VINF_SUCCESS;
908 }
909
910 /*
911 * Protected mode. Need to parse the specified descriptor...
912 */
913 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
914 {
915 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
916 return iemRaiseGeneralProtectionFault0(pIemCpu);
917 }
918
919 /* Fetch the descriptor. */
920 IEMSELDESC Desc;
921 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
922 if (rcStrict != VINF_SUCCESS)
923 return rcStrict;
924
925 /* Is it there? */
926 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
927 {
928 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
929 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
930 }
931
932 /*
933 * Deal with it according to its type. We do the standard code selectors
934 * here and dispatch the system selectors to worker functions.
935 */
936 if (!Desc.Legacy.Gen.u1DescType)
937 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
938
939 /* Only code segments. */
940 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
941 {
942 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
943 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
944 }
945
946 /* L vs D. */
947 if ( Desc.Legacy.Gen.u1Long
948 && Desc.Legacy.Gen.u1DefBig
949 && IEM_IS_LONG_MODE(pIemCpu))
950 {
951 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
952 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
953 }
954
955 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
956 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
957 {
958 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
959 {
960 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
961 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
962 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
963 }
964 }
965 else
966 {
967 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
968 {
969 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
970 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
971 }
972 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
973 {
974 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
975 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
976 }
977 }
978
979 /* Chop the high bits if 16-bit (Intel says so). */
980 if (enmEffOpSize == IEMMODE_16BIT)
981 offSeg &= UINT16_MAX;
982
983 /* Limit check. (Should alternatively check for non-canonical addresses
984 here, but that is ruled out by offSeg being 32-bit, right?) */
985 uint64_t u64Base;
986 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
987 if (Desc.Legacy.Gen.u1Granularity)
988 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
989 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
990 u64Base = 0;
991 else
992 {
993 if (offSeg > cbLimit)
994 {
995 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
996 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
997 }
998 u64Base = X86DESC_BASE(Desc.Legacy);
999 }
1000
1001 /*
1002 * Ok, everything checked out fine. Now set the accessed bit before
1003 * committing the result into CS, CSHID and RIP.
1004 */
1005 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1006 {
1007 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1008 if (rcStrict != VINF_SUCCESS)
1009 return rcStrict;
1010#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1011 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1012#endif
1013 }
1014
1015 /* commit */
1016 pCtx->rip = offSeg;
1017 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1018 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1019 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1020 pCtx->csHid.u32Limit = cbLimit;
1021 pCtx->csHid.u64Base = u64Base;
1022 /** @todo check if the hidden bits are loaded correctly for 64-bit
1023 * mode. */
1024 return VINF_SUCCESS;
1025}
1026
1027
1028/**
1029 * Implements far calls.
1030 *
1031 * This very similar to iemCImpl_FarJmp.
1032 *
1033 * @param uSel The selector.
1034 * @param offSeg The segment offset.
1035 * @param enmEffOpSize The operand size (in case we need it).
1036 */
1037IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1038{
1039 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1040 VBOXSTRICTRC rcStrict;
1041 uint64_t uNewRsp;
1042 RTPTRUNION uPtrRet;
1043
1044 /*
1045 * Real mode and V8086 mode are easy. The only snag seems to be that
1046 * CS.limit doesn't change and the limit check is done against the current
1047 * limit.
1048 */
1049 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1050 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1051 {
1052 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1053
1054 /* Check stack first - may #SS(0). */
1055 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1056 &uPtrRet.pv, &uNewRsp);
1057 if (rcStrict != VINF_SUCCESS)
1058 return rcStrict;
1059
1060 /* Check the target address range. */
1061 if (offSeg > UINT32_MAX)
1062 return iemRaiseGeneralProtectionFault0(pIemCpu);
1063
1064 /* Everything is fine, push the return address. */
1065 if (enmEffOpSize == IEMMODE_16BIT)
1066 {
1067 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1068 uPtrRet.pu16[1] = pCtx->cs;
1069 }
1070 else
1071 {
1072 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1073 uPtrRet.pu16[3] = pCtx->cs;
1074 }
1075 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1076 if (rcStrict != VINF_SUCCESS)
1077 return rcStrict;
1078
1079 /* Branch. */
1080 pCtx->rip = offSeg;
1081 pCtx->cs = uSel;
1082 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
1083 /** @todo Does REM reset the accessed bit here too? (See on jmp far16
1084 * after disabling PE.) Check with VT-x and AMD-V. */
1085#ifdef IEM_VERIFICATION_MODE
1086 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
1087#endif
1088 return VINF_SUCCESS;
1089 }
1090
1091 /*
1092 * Protected mode. Need to parse the specified descriptor...
1093 */
1094 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
1095 {
1096 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1097 return iemRaiseGeneralProtectionFault0(pIemCpu);
1098 }
1099
1100 /* Fetch the descriptor. */
1101 IEMSELDESC Desc;
1102 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1103 if (rcStrict != VINF_SUCCESS)
1104 return rcStrict;
1105
1106 /*
1107 * Deal with it according to its type. We do the standard code selectors
1108 * here and dispatch the system selectors to worker functions.
1109 */
1110 if (!Desc.Legacy.Gen.u1DescType)
1111 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1112
1113 /* Only code segments. */
1114 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1115 {
1116 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1117 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1118 }
1119
1120 /* L vs D. */
1121 if ( Desc.Legacy.Gen.u1Long
1122 && Desc.Legacy.Gen.u1DefBig
1123 && IEM_IS_LONG_MODE(pIemCpu))
1124 {
1125 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1126 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1127 }
1128
1129 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1130 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1131 {
1132 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1133 {
1134 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1135 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1136 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1137 }
1138 }
1139 else
1140 {
1141 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1142 {
1143 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1144 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1145 }
1146 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1147 {
1148 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1149 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1150 }
1151 }
1152
1153 /* Is it there? */
1154 if (!Desc.Legacy.Gen.u1Present)
1155 {
1156 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1157 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1158 }
1159
1160 /* Check stack first - may #SS(0). */
1161 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1162 * 16-bit code cause a two or four byte CS to be pushed? */
1163 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1164 enmEffOpSize == IEMMODE_64BIT ? 8+8
1165 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1166 &uPtrRet.pv, &uNewRsp);
1167 if (rcStrict != VINF_SUCCESS)
1168 return rcStrict;
1169
1170 /* Chop the high bits if 16-bit (Intel says so). */
1171 if (enmEffOpSize == IEMMODE_16BIT)
1172 offSeg &= UINT16_MAX;
1173
1174 /* Limit / canonical check. */
1175 uint64_t u64Base;
1176 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
1177 if (Desc.Legacy.Gen.u1Granularity)
1178 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1179
1180 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1181 {
1182 if (!IEM_IS_CANONICAL(offSeg))
1183 {
1184 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1185 return iemRaiseNotCanonical(pIemCpu);
1186 }
1187 u64Base = 0;
1188 }
1189 else
1190 {
1191 if (offSeg > cbLimit)
1192 {
1193 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1194 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1195 }
1196 u64Base = X86DESC_BASE(Desc.Legacy);
1197 }
1198
1199 /*
1200 * Now set the accessed bit before
1201 * writing the return address to the stack and committing the result into
1202 * CS, CSHID and RIP.
1203 */
1204 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1205 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1206 {
1207 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1208 if (rcStrict != VINF_SUCCESS)
1209 return rcStrict;
1210#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1211 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1212#endif
1213 }
1214
1215 /* stack */
1216 if (enmEffOpSize == IEMMODE_16BIT)
1217 {
1218 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1219 uPtrRet.pu16[1] = pCtx->cs;
1220 }
1221 else if (enmEffOpSize == IEMMODE_32BIT)
1222 {
1223 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1224 uPtrRet.pu32[1] = pCtx->cs; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1225 }
1226 else
1227 {
1228 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1229 uPtrRet.pu64[1] = pCtx->cs; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1230 }
1231 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1232 if (rcStrict != VINF_SUCCESS)
1233 return rcStrict;
1234
1235 /* commit */
1236 pCtx->rip = offSeg;
1237 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1238 pCtx->cs |= pIemCpu->uCpl;
1239 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1240 pCtx->csHid.u32Limit = cbLimit;
1241 pCtx->csHid.u64Base = u64Base;
1242 /** @todo check if the hidden bits are loaded correctly for 64-bit
1243 * mode. */
1244 return VINF_SUCCESS;
1245}
1246
1247
1248/**
1249 * Implements retf.
1250 *
1251 * @param enmEffOpSize The effective operand size.
1252 * @param cbPop The amount of arguments to pop from the stack
1253 * (bytes).
1254 */
1255IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1256{
1257 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1258 VBOXSTRICTRC rcStrict;
1259 RTCPTRUNION uPtrFrame;
1260 uint64_t uNewRsp;
1261 uint64_t uNewRip;
1262 uint16_t uNewCs;
1263 NOREF(cbInstr);
1264
1265 /*
1266 * Read the stack values first.
1267 */
1268 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1269 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1270 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1271 if (rcStrict != VINF_SUCCESS)
1272 return rcStrict;
1273 if (enmEffOpSize == IEMMODE_16BIT)
1274 {
1275 uNewRip = uPtrFrame.pu16[0];
1276 uNewCs = uPtrFrame.pu16[1];
1277 }
1278 else if (enmEffOpSize == IEMMODE_32BIT)
1279 {
1280 uNewRip = uPtrFrame.pu32[0];
1281 uNewCs = uPtrFrame.pu16[2];
1282 }
1283 else
1284 {
1285 uNewRip = uPtrFrame.pu64[0];
1286 uNewCs = uPtrFrame.pu16[4];
1287 }
1288
1289 /*
1290 * Real mode and V8086 mode are easy.
1291 */
1292 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1293 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1294 {
1295 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1296 /** @todo check how this is supposed to work if sp=0xfffe. */
1297
1298 /* Check the limit of the new EIP. */
1299 /** @todo Intel pseudo code only does the limit check for 16-bit
1300 * operands, AMD does not make any distinction. What is right? */
1301 if (uNewRip > pCtx->csHid.u32Limit)
1302 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1303
1304 /* commit the operation. */
1305 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1306 if (rcStrict != VINF_SUCCESS)
1307 return rcStrict;
1308 pCtx->rip = uNewRip;
1309 pCtx->cs = uNewCs;
1310 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
1311 /** @todo do we load attribs and limit as well? */
1312 if (cbPop)
1313 iemRegAddToRsp(pCtx, cbPop);
1314 return VINF_SUCCESS;
1315 }
1316
1317 /*
1318 * Protected mode is complicated, of course.
1319 */
1320 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1321 {
1322 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1323 return iemRaiseGeneralProtectionFault0(pIemCpu);
1324 }
1325
1326 /* Fetch the descriptor. */
1327 IEMSELDESC DescCs;
1328 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1329 if (rcStrict != VINF_SUCCESS)
1330 return rcStrict;
1331
1332 /* Can only return to a code selector. */
1333 if ( !DescCs.Legacy.Gen.u1DescType
1334 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1335 {
1336 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1337 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1338 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1339 }
1340
1341 /* L vs D. */
1342 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1343 && DescCs.Legacy.Gen.u1DefBig
1344 && IEM_IS_LONG_MODE(pIemCpu))
1345 {
1346 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1347 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1348 }
1349
1350 /* DPL/RPL/CPL checks. */
1351 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1352 {
1353 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1354 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1355 }
1356
1357 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1358 {
1359 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1360 {
1361 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1362 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1363 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1364 }
1365 }
1366 else
1367 {
1368 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1369 {
1370 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1371 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1372 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1373 }
1374 }
1375
1376 /* Is it there? */
1377 if (!DescCs.Legacy.Gen.u1Present)
1378 {
1379 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1380 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1381 }
1382
1383 /*
1384 * Return to outer privilege? (We'll typically have entered via a call gate.)
1385 */
1386 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1387 {
1388 /* Read the return pointer, it comes before the parameters. */
1389 RTCPTRUNION uPtrStack;
1390 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1391 if (rcStrict != VINF_SUCCESS)
1392 return rcStrict;
1393 uint16_t uNewOuterSs;
1394 uint64_t uNewOuterRsp;
1395 if (enmEffOpSize == IEMMODE_16BIT)
1396 {
1397 uNewOuterRsp = uPtrFrame.pu16[0];
1398 uNewOuterSs = uPtrFrame.pu16[1];
1399 }
1400 else if (enmEffOpSize == IEMMODE_32BIT)
1401 {
1402 uNewOuterRsp = uPtrFrame.pu32[0];
1403 uNewOuterSs = uPtrFrame.pu16[2];
1404 }
1405 else
1406 {
1407 uNewOuterRsp = uPtrFrame.pu64[0];
1408 uNewOuterSs = uPtrFrame.pu16[4];
1409 }
1410
1411 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1412 and read the selector. */
1413 IEMSELDESC DescSs;
1414 if (!(uNewOuterSs & (X86_SEL_MASK | X86_SEL_LDT)))
1415 {
1416 if ( !DescCs.Legacy.Gen.u1Long
1417 || (uNewOuterSs & X86_SEL_RPL) == 3)
1418 {
1419 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1420 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1421 return iemRaiseGeneralProtectionFault0(pIemCpu);
1422 }
1423 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1424 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1425 }
1426 else
1427 {
1428 /* Fetch the descriptor for the new stack segment. */
1429 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1430 if (rcStrict != VINF_SUCCESS)
1431 return rcStrict;
1432 }
1433
1434 /* Check that RPL of stack and code selectors match. */
1435 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1436 {
1437 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1438 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1439 }
1440
1441 /* Must be a writable data segment. */
1442 if ( !DescSs.Legacy.Gen.u1DescType
1443 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1444 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1445 {
1446 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1447 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1448 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1449 }
1450
1451 /* L vs D. (Not mentioned by intel.) */
1452 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1453 && DescSs.Legacy.Gen.u1DefBig
1454 && IEM_IS_LONG_MODE(pIemCpu))
1455 {
1456 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1457 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1458 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1459 }
1460
1461 /* DPL/RPL/CPL checks. */
1462 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1463 {
1464 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1465 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1466 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1467 }
1468
1469 /* Is it there? */
1470 if (!DescSs.Legacy.Gen.u1Present)
1471 {
1472 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1473 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1474 }
1475
1476 /* Calc SS limit.*/
1477 uint32_t cbLimitSs = X86DESC_LIMIT(DescSs.Legacy);
1478 if (DescSs.Legacy.Gen.u1Granularity)
1479 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1480
1481
1482 /* Is RIP canonical or within CS.limit? */
1483 uint64_t u64Base;
1484 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
1485 if (DescCs.Legacy.Gen.u1Granularity)
1486 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1487
1488 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1489 {
1490 if (!IEM_IS_CANONICAL(uNewRip))
1491 {
1492 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1493 return iemRaiseNotCanonical(pIemCpu);
1494 }
1495 u64Base = 0;
1496 }
1497 else
1498 {
1499 if (uNewRip > cbLimitCs)
1500 {
1501 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1502 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1503 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1504 }
1505 u64Base = X86DESC_BASE(DescCs.Legacy);
1506 }
1507
1508 /*
1509 * Now set the accessed bit before
1510 * writing the return address to the stack and committing the result into
1511 * CS, CSHID and RIP.
1512 */
1513 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1514 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1515 {
1516 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1517 if (rcStrict != VINF_SUCCESS)
1518 return rcStrict;
1519#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1520 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1521#endif
1522 }
1523 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1524 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1525 {
1526 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1527 if (rcStrict != VINF_SUCCESS)
1528 return rcStrict;
1529#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1530 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1531#endif
1532 }
1533
1534 /* commit */
1535 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1536 if (rcStrict != VINF_SUCCESS)
1537 return rcStrict;
1538 if (enmEffOpSize == IEMMODE_16BIT)
1539 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1540 else
1541 pCtx->rip = uNewRip;
1542 pCtx->cs = uNewCs;
1543 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy);
1544 pCtx->csHid.u32Limit = cbLimitCs;
1545 pCtx->csHid.u64Base = u64Base;
1546 pCtx->rsp = uNewRsp;
1547 pCtx->ss = uNewCs;
1548 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSs.Legacy);
1549 pCtx->ssHid.u32Limit = cbLimitSs;
1550 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1551 pCtx->ssHid.u64Base = 0;
1552 else
1553 pCtx->ssHid.u64Base = X86DESC_BASE(DescSs.Legacy);
1554
1555 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1556 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid);
1557 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es, &pCtx->esHid);
1558 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid);
1559 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid);
1560
1561 /** @todo check if the hidden bits are loaded correctly for 64-bit
1562 * mode. */
1563
1564 if (cbPop)
1565 iemRegAddToRsp(pCtx, cbPop);
1566
1567 /* Done! */
1568 }
1569 /*
1570 * Return to the same privilege level
1571 */
1572 else
1573 {
1574 /* Limit / canonical check. */
1575 uint64_t u64Base;
1576 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
1577 if (DescCs.Legacy.Gen.u1Granularity)
1578 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1579
1580 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1581 {
1582 if (!IEM_IS_CANONICAL(uNewRip))
1583 {
1584 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1585 return iemRaiseNotCanonical(pIemCpu);
1586 }
1587 u64Base = 0;
1588 }
1589 else
1590 {
1591 if (uNewRip > cbLimitCs)
1592 {
1593 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1594 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1595 }
1596 u64Base = X86DESC_BASE(DescCs.Legacy);
1597 }
1598
1599 /*
1600 * Now set the accessed bit before
1601 * writing the return address to the stack and committing the result into
1602 * CS, CSHID and RIP.
1603 */
1604 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1605 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1606 {
1607 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1608 if (rcStrict != VINF_SUCCESS)
1609 return rcStrict;
1610#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1611 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1612#endif
1613 }
1614
1615 /* commit */
1616 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1617 if (rcStrict != VINF_SUCCESS)
1618 return rcStrict;
1619 if (enmEffOpSize == IEMMODE_16BIT)
1620 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1621 else
1622 pCtx->rip = uNewRip;
1623 pCtx->cs = uNewCs;
1624 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy);
1625 pCtx->csHid.u32Limit = cbLimitCs;
1626 pCtx->csHid.u64Base = u64Base;
1627 /** @todo check if the hidden bits are loaded correctly for 64-bit
1628 * mode. */
1629 if (cbPop)
1630 iemRegAddToRsp(pCtx, cbPop);
1631 }
1632 return VINF_SUCCESS;
1633}
1634
1635
1636/**
1637 * Implements retn.
1638 *
1639 * We're doing this in C because of the \#GP that might be raised if the popped
1640 * program counter is out of bounds.
1641 *
1642 * @param enmEffOpSize The effective operand size.
1643 * @param cbPop The amount of arguments to pop from the stack
1644 * (bytes).
1645 */
1646IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1647{
1648 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1649 NOREF(cbInstr);
1650
1651 /* Fetch the RSP from the stack. */
1652 VBOXSTRICTRC rcStrict;
1653 RTUINT64U NewRip;
1654 RTUINT64U NewRsp;
1655 NewRsp.u = pCtx->rsp;
1656 switch (enmEffOpSize)
1657 {
1658 case IEMMODE_16BIT:
1659 NewRip.u = 0;
1660 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1661 break;
1662 case IEMMODE_32BIT:
1663 NewRip.u = 0;
1664 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1665 break;
1666 case IEMMODE_64BIT:
1667 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1668 break;
1669 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1670 }
1671 if (rcStrict != VINF_SUCCESS)
1672 return rcStrict;
1673
1674 /* Check the new RSP before loading it. */
1675 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1676 * of it. The canonical test is performed here and for call. */
1677 if (enmEffOpSize != IEMMODE_64BIT)
1678 {
1679 if (NewRip.DWords.dw0 > pCtx->csHid.u32Limit)
1680 {
1681 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->csHid.u32Limit));
1682 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1683 }
1684 }
1685 else
1686 {
1687 if (!IEM_IS_CANONICAL(NewRip.u))
1688 {
1689 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1690 return iemRaiseNotCanonical(pIemCpu);
1691 }
1692 }
1693
1694 /* Commit it. */
1695 pCtx->rip = NewRip.u;
1696 pCtx->rsp = NewRsp.u;
1697 if (cbPop)
1698 iemRegAddToRsp(pCtx, cbPop);
1699
1700 return VINF_SUCCESS;
1701}
1702
1703
1704/**
1705 * Implements leave.
1706 *
1707 * We're doing this in C because messing with the stack registers is annoying
1708 * since they depends on SS attributes.
1709 *
1710 * @param enmEffOpSize The effective operand size.
1711 */
1712IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1713{
1714 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1715
1716 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1717 RTUINT64U NewRsp;
1718 if (pCtx->ssHid.Attr.n.u1Long)
1719 {
1720 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1721 NewRsp.u = pCtx->rsp;
1722 NewRsp.Words.w0 = pCtx->bp;
1723 }
1724 else if (pCtx->ssHid.Attr.n.u1DefBig)
1725 NewRsp.u = pCtx->ebp;
1726 else
1727 NewRsp.u = pCtx->rbp;
1728
1729 /* Pop RBP according to the operand size. */
1730 VBOXSTRICTRC rcStrict;
1731 RTUINT64U NewRbp;
1732 switch (enmEffOpSize)
1733 {
1734 case IEMMODE_16BIT:
1735 NewRbp.u = pCtx->rbp;
1736 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1737 break;
1738 case IEMMODE_32BIT:
1739 NewRbp.u = 0;
1740 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1741 break;
1742 case IEMMODE_64BIT:
1743 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1744 break;
1745 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1746 }
1747 if (rcStrict != VINF_SUCCESS)
1748 return rcStrict;
1749
1750
1751 /* Commit it. */
1752 pCtx->rbp = NewRbp.u;
1753 pCtx->rsp = NewRsp.u;
1754 iemRegAddToRip(pIemCpu, cbInstr);
1755
1756 return VINF_SUCCESS;
1757}
1758
1759
1760/**
1761 * Implements int3 and int XX.
1762 *
1763 * @param u8Int The interrupt vector number.
1764 * @param fIsBpInstr Is it the breakpoint instruction.
1765 */
1766IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1767{
1768 Assert(pIemCpu->cXcptRecursions == 0);
1769 return iemRaiseXcptOrInt(pIemCpu,
1770 cbInstr,
1771 u8Int,
1772 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1773 0,
1774 0);
1775}
1776
1777
1778/**
1779 * Implements iret for real mode and V8086 mode.
1780 *
1781 * @param enmEffOpSize The effective operand size.
1782 */
1783IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1784{
1785 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1786 NOREF(cbInstr);
1787
1788 /*
1789 * iret throws an exception if VME isn't enabled.
1790 */
1791 if ( pCtx->eflags.Bits.u1VM
1792 && !(pCtx->cr4 & X86_CR4_VME))
1793 return iemRaiseGeneralProtectionFault0(pIemCpu);
1794
1795 /*
1796 * Do the stack bits, but don't commit RSP before everything checks
1797 * out right.
1798 */
1799 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1800 VBOXSTRICTRC rcStrict;
1801 RTCPTRUNION uFrame;
1802 uint16_t uNewCs;
1803 uint32_t uNewEip;
1804 uint32_t uNewFlags;
1805 uint64_t uNewRsp;
1806 if (enmEffOpSize == IEMMODE_32BIT)
1807 {
1808 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1809 if (rcStrict != VINF_SUCCESS)
1810 return rcStrict;
1811 uNewEip = uFrame.pu32[0];
1812 uNewCs = (uint16_t)uFrame.pu32[1];
1813 uNewFlags = uFrame.pu32[2];
1814 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1815 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1816 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1817 | X86_EFL_ID;
1818 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1819 }
1820 else
1821 {
1822 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1823 if (rcStrict != VINF_SUCCESS)
1824 return rcStrict;
1825 uNewEip = uFrame.pu16[0];
1826 uNewCs = uFrame.pu16[1];
1827 uNewFlags = uFrame.pu16[2];
1828 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1829 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1830 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1831 /** @todo The intel pseudo code does not indicate what happens to
1832 * reserved flags. We just ignore them. */
1833 }
1834 /** @todo Check how this is supposed to work if sp=0xfffe. */
1835
1836 /*
1837 * Check the limit of the new EIP.
1838 */
1839 /** @todo Only the AMD pseudo code check the limit here, what's
1840 * right? */
1841 if (uNewEip > pCtx->csHid.u32Limit)
1842 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1843
1844 /*
1845 * V8086 checks and flag adjustments
1846 */
1847 if (pCtx->eflags.Bits.u1VM)
1848 {
1849 if (pCtx->eflags.Bits.u2IOPL == 3)
1850 {
1851 /* Preserve IOPL and clear RF. */
1852 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1853 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1854 }
1855 else if ( enmEffOpSize == IEMMODE_16BIT
1856 && ( !(uNewFlags & X86_EFL_IF)
1857 || !pCtx->eflags.Bits.u1VIP )
1858 && !(uNewFlags & X86_EFL_TF) )
1859 {
1860 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1861 uNewFlags &= ~X86_EFL_VIF;
1862 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1863 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1864 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1865 }
1866 else
1867 return iemRaiseGeneralProtectionFault0(pIemCpu);
1868 }
1869
1870 /*
1871 * Commit the operation.
1872 */
1873 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1874 if (rcStrict != VINF_SUCCESS)
1875 return rcStrict;
1876 pCtx->rip = uNewEip;
1877 pCtx->cs = uNewCs;
1878 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
1879 /** @todo do we load attribs and limit as well? */
1880 Assert(uNewFlags & X86_EFL_1);
1881 pCtx->eflags.u = uNewFlags;
1882
1883 return VINF_SUCCESS;
1884}
1885
1886
1887/**
1888 * Implements iret for protected mode
1889 *
1890 * @param enmEffOpSize The effective operand size.
1891 */
1892IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
1893{
1894 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1895 NOREF(cbInstr);
1896
1897 /*
1898 * Nested task return.
1899 */
1900 if (pCtx->eflags.Bits.u1NT)
1901 {
1902 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1903 }
1904 /*
1905 * Normal return.
1906 */
1907 else
1908 {
1909 /*
1910 * Do the stack bits, but don't commit RSP before everything checks
1911 * out right.
1912 */
1913 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1914 VBOXSTRICTRC rcStrict;
1915 RTCPTRUNION uFrame;
1916 uint16_t uNewCs;
1917 uint32_t uNewEip;
1918 uint32_t uNewFlags;
1919 uint64_t uNewRsp;
1920 if (enmEffOpSize == IEMMODE_32BIT)
1921 {
1922 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1923 if (rcStrict != VINF_SUCCESS)
1924 return rcStrict;
1925 uNewEip = uFrame.pu32[0];
1926 uNewCs = (uint16_t)uFrame.pu32[1];
1927 uNewFlags = uFrame.pu32[2];
1928 }
1929 else
1930 {
1931 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1932 if (rcStrict != VINF_SUCCESS)
1933 return rcStrict;
1934 uNewEip = uFrame.pu16[0];
1935 uNewCs = uFrame.pu16[1];
1936 uNewFlags = uFrame.pu16[2];
1937 }
1938 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
1939 if (rcStrict != VINF_SUCCESS)
1940 return rcStrict;
1941
1942 /*
1943 * What are we returning to?
1944 */
1945 if ( (uNewFlags & X86_EFL_VM)
1946 && pIemCpu->uCpl == 0)
1947 {
1948 /* V8086 mode! */
1949 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1950 }
1951 else
1952 {
1953 /*
1954 * Protected mode.
1955 */
1956 /* Read the CS descriptor. */
1957 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1958 {
1959 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
1960 return iemRaiseGeneralProtectionFault0(pIemCpu);
1961 }
1962
1963 IEMSELDESC DescCS;
1964 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
1965 if (rcStrict != VINF_SUCCESS)
1966 {
1967 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
1968 return rcStrict;
1969 }
1970
1971 /* Must be a code descriptor. */
1972 if (!DescCS.Legacy.Gen.u1DescType)
1973 {
1974 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1975 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1976 }
1977 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1978 {
1979 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1980 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1981 }
1982
1983 /* Privilege checks. */
1984 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1985 {
1986 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
1987 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1988 }
1989 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1990 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
1991 {
1992 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
1993 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1994 }
1995
1996 /* Present? */
1997 if (!DescCS.Legacy.Gen.u1Present)
1998 {
1999 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2000 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2001 }
2002
2003 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
2004 if (DescCS.Legacy.Gen.u1Granularity)
2005 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2006
2007 /*
2008 * Return to outer level?
2009 */
2010 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2011 {
2012 uint16_t uNewSS;
2013 uint32_t uNewESP;
2014 if (enmEffOpSize == IEMMODE_32BIT)
2015 {
2016 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2017 if (rcStrict != VINF_SUCCESS)
2018 return rcStrict;
2019 uNewESP = uFrame.pu32[0];
2020 uNewSS = (uint16_t)uFrame.pu32[1];
2021 }
2022 else
2023 {
2024 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2025 if (rcStrict != VINF_SUCCESS)
2026 return rcStrict;
2027 uNewESP = uFrame.pu16[0];
2028 uNewSS = uFrame.pu16[1];
2029 }
2030 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2031 if (rcStrict != VINF_SUCCESS)
2032 return rcStrict;
2033
2034 /* Read the SS descriptor. */
2035 if (!(uNewSS & (X86_SEL_MASK | X86_SEL_LDT)))
2036 {
2037 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2038 return iemRaiseGeneralProtectionFault0(pIemCpu);
2039 }
2040
2041 IEMSELDESC DescSS;
2042 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2043 if (rcStrict != VINF_SUCCESS)
2044 {
2045 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2046 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2047 return rcStrict;
2048 }
2049
2050 /* Privilege checks. */
2051 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2052 {
2053 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2054 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2055 }
2056 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2057 {
2058 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2059 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2060 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2061 }
2062
2063 /* Must be a writeable data segment descriptor. */
2064 if (!DescSS.Legacy.Gen.u1DescType)
2065 {
2066 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2067 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2068 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2069 }
2070 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2071 {
2072 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2073 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2074 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2075 }
2076
2077 /* Present? */
2078 if (!DescSS.Legacy.Gen.u1Present)
2079 {
2080 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2081 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2082 }
2083
2084 uint32_t cbLimitSs = X86DESC_LIMIT(DescSS.Legacy);
2085 if (DescSS.Legacy.Gen.u1Granularity)
2086 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2087
2088 /* Check EIP. */
2089 if (uNewEip > cbLimitCS)
2090 {
2091 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2092 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2093 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2094 }
2095
2096 /*
2097 * Commit the changes, marking CS and SS accessed first since
2098 * that may fail.
2099 */
2100 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2101 {
2102 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2103 if (rcStrict != VINF_SUCCESS)
2104 return rcStrict;
2105 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2106 }
2107 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2108 {
2109 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2110 if (rcStrict != VINF_SUCCESS)
2111 return rcStrict;
2112 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2113 }
2114
2115 pCtx->rip = uNewEip;
2116 pCtx->cs = uNewCs;
2117 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2118 pCtx->csHid.u32Limit = cbLimitCS;
2119 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2120 pCtx->rsp = uNewESP;
2121 pCtx->ss = uNewSS;
2122 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
2123 pCtx->ssHid.u32Limit = cbLimitSs;
2124 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy);
2125
2126 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2127 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2128 if (enmEffOpSize != IEMMODE_16BIT)
2129 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2130 if (pIemCpu->uCpl == 0)
2131 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2132 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2133 fEFlagsMask |= X86_EFL_IF;
2134 pCtx->eflags.u &= ~fEFlagsMask;
2135 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2136
2137 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2138 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid);
2139 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es, &pCtx->esHid);
2140 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid);
2141 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid);
2142
2143 /* Done! */
2144
2145 }
2146 /*
2147 * Return to the same level.
2148 */
2149 else
2150 {
2151 /* Check EIP. */
2152 if (uNewEip > cbLimitCS)
2153 {
2154 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2155 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2156 }
2157
2158 /*
2159 * Commit the changes, marking CS first since it may fail.
2160 */
2161 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2162 {
2163 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2164 if (rcStrict != VINF_SUCCESS)
2165 return rcStrict;
2166 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2167 }
2168
2169 pCtx->rip = uNewEip;
2170 pCtx->cs = uNewCs;
2171 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2172 pCtx->csHid.u32Limit = cbLimitCS;
2173 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2174 pCtx->rsp = uNewRsp;
2175
2176 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2177 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2178 if (enmEffOpSize != IEMMODE_16BIT)
2179 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2180 if (pIemCpu->uCpl == 0)
2181 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2182 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2183 fEFlagsMask |= X86_EFL_IF;
2184 pCtx->eflags.u &= ~fEFlagsMask;
2185 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2186 /* Done! */
2187 }
2188 }
2189 }
2190
2191 return VINF_SUCCESS;
2192}
2193
2194
2195/**
2196 * Implements iret for long mode
2197 *
2198 * @param enmEffOpSize The effective operand size.
2199 */
2200IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2201{
2202 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2203 //VBOXSTRICTRC rcStrict;
2204 //uint64_t uNewRsp;
2205
2206 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2207 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2208}
2209
2210
2211/**
2212 * Implements iret.
2213 *
2214 * @param enmEffOpSize The effective operand size.
2215 */
2216IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2217{
2218 /*
2219 * Call a mode specific worker.
2220 */
2221 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2222 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2223 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2224 if (IEM_IS_LONG_MODE(pIemCpu))
2225 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2226
2227 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2228}
2229
2230
2231/**
2232 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2233 *
2234 * @param iSegReg The segment register number (valid).
2235 * @param uSel The new selector value.
2236 */
2237IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2238{
2239 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2240 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2241 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2242
2243 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2244
2245 /*
2246 * Real mode and V8086 mode are easy.
2247 */
2248 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2249 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2250 {
2251 *pSel = uSel;
2252 pHid->u64Base = (uint32_t)uSel << 4;
2253 /** @todo Does the CPU actually load limits and attributes in the
2254 * real/V8086 mode segment load case? It doesn't for CS in far
2255 * jumps... Affects unreal mode. */
2256 pHid->u32Limit = 0xffff;
2257 pHid->Attr.u = 0;
2258 pHid->Attr.n.u1Present = 1;
2259 pHid->Attr.n.u1DescType = 1;
2260 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2261 ? X86_SEL_TYPE_RW
2262 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2263
2264 iemRegAddToRip(pIemCpu, cbInstr);
2265 return VINF_SUCCESS;
2266 }
2267
2268 /*
2269 * Protected mode.
2270 *
2271 * Check if it's a null segment selector value first, that's OK for DS, ES,
2272 * FS and GS. If not null, then we have to load and parse the descriptor.
2273 */
2274 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
2275 {
2276 if (iSegReg == X86_SREG_SS)
2277 {
2278 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2279 || pIemCpu->uCpl != 0
2280 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2281 {
2282 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2283 return iemRaiseGeneralProtectionFault0(pIemCpu);
2284 }
2285
2286 /* In 64-bit kernel mode, the stack can be 0 because of the way
2287 interrupts are dispatched when in kernel ctx. Just load the
2288 selector value into the register and leave the hidden bits
2289 as is. */
2290 *pSel = uSel;
2291 iemRegAddToRip(pIemCpu, cbInstr);
2292 return VINF_SUCCESS;
2293 }
2294
2295 *pSel = uSel; /* Not RPL, remember :-) */
2296 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2297 && iSegReg != X86_SREG_FS
2298 && iSegReg != X86_SREG_GS)
2299 {
2300 /** @todo figure out what this actually does, it works. Needs
2301 * testcase! */
2302 pHid->Attr.u = 0;
2303 pHid->Attr.n.u1Present = 1;
2304 pHid->Attr.n.u1Long = 1;
2305 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2306 pHid->Attr.n.u2Dpl = 3;
2307 pHid->u32Limit = 0;
2308 pHid->u64Base = 0;
2309 }
2310 else
2311 {
2312 pHid->Attr.u = 0;
2313 pHid->u32Limit = 0;
2314 pHid->u64Base = 0;
2315 }
2316 iemRegAddToRip(pIemCpu, cbInstr);
2317 return VINF_SUCCESS;
2318 }
2319
2320 /* Fetch the descriptor. */
2321 IEMSELDESC Desc;
2322 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2323 if (rcStrict != VINF_SUCCESS)
2324 return rcStrict;
2325
2326 /* Check GPs first. */
2327 if (!Desc.Legacy.Gen.u1DescType)
2328 {
2329 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2330 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2331 }
2332 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2333 {
2334 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2335 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2336 {
2337 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2338 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2339 }
2340 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2341 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2342 {
2343 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2344 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2345 }
2346 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2347 {
2348 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2349 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2350 }
2351 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2352 {
2353 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2354 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2355 }
2356 }
2357 else
2358 {
2359 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2360 {
2361 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2362 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2363 }
2364 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2365 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2366 {
2367#if 0 /* this is what intel says. */
2368 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2369 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2370 {
2371 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2372 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2373 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2374 }
2375#else /* this is what makes more sense. */
2376 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2377 {
2378 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2379 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2380 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2381 }
2382 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2383 {
2384 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2385 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2386 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2387 }
2388#endif
2389 }
2390 }
2391
2392 /* Is it there? */
2393 if (!Desc.Legacy.Gen.u1Present)
2394 {
2395 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2396 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2397 }
2398
2399 /* The the base and limit. */
2400 uint64_t u64Base;
2401 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
2402 if (Desc.Legacy.Gen.u1Granularity)
2403 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2404
2405 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2406 && iSegReg < X86_SREG_FS)
2407 u64Base = 0;
2408 else
2409 u64Base = X86DESC_BASE(Desc.Legacy);
2410
2411 /*
2412 * Ok, everything checked out fine. Now set the accessed bit before
2413 * committing the result into the registers.
2414 */
2415 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2416 {
2417 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2418 if (rcStrict != VINF_SUCCESS)
2419 return rcStrict;
2420 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2421 }
2422
2423 /* commit */
2424 *pSel = uSel;
2425 pHid->Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2426 pHid->u32Limit = cbLimit;
2427 pHid->u64Base = u64Base;
2428
2429 /** @todo check if the hidden bits are loaded correctly for 64-bit
2430 * mode. */
2431
2432 iemRegAddToRip(pIemCpu, cbInstr);
2433 return VINF_SUCCESS;
2434}
2435
2436
2437/**
2438 * Implements 'mov SReg, r/m'.
2439 *
2440 * @param iSegReg The segment register number (valid).
2441 * @param uSel The new selector value.
2442 */
2443IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2444{
2445 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2446 if (rcStrict == VINF_SUCCESS)
2447 {
2448 if (iSegReg == X86_SREG_SS)
2449 {
2450 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2451 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2452 }
2453 }
2454 return rcStrict;
2455}
2456
2457
2458/**
2459 * Implements 'pop SReg'.
2460 *
2461 * @param iSegReg The segment register number (valid).
2462 * @param enmEffOpSize The efficient operand size (valid).
2463 */
2464IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2465{
2466 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2467 VBOXSTRICTRC rcStrict;
2468
2469 /*
2470 * Read the selector off the stack and join paths with mov ss, reg.
2471 */
2472 RTUINT64U TmpRsp;
2473 TmpRsp.u = pCtx->rsp;
2474 switch (enmEffOpSize)
2475 {
2476 case IEMMODE_16BIT:
2477 {
2478 uint16_t uSel;
2479 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2480 if (rcStrict == VINF_SUCCESS)
2481 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2482 break;
2483 }
2484
2485 case IEMMODE_32BIT:
2486 {
2487 uint32_t u32Value;
2488 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2489 if (rcStrict == VINF_SUCCESS)
2490 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2491 break;
2492 }
2493
2494 case IEMMODE_64BIT:
2495 {
2496 uint64_t u64Value;
2497 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2498 if (rcStrict == VINF_SUCCESS)
2499 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2500 break;
2501 }
2502 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2503 }
2504
2505 /*
2506 * Commit the stack on success.
2507 */
2508 if (rcStrict == VINF_SUCCESS)
2509 {
2510 pCtx->rsp = TmpRsp.u;
2511 if (iSegReg == X86_SREG_SS)
2512 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2513 }
2514 return rcStrict;
2515}
2516
2517
2518/**
2519 * Implements lgs, lfs, les, lds & lss.
2520 */
2521IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2522 uint16_t, uSel,
2523 uint64_t, offSeg,
2524 uint8_t, iSegReg,
2525 uint8_t, iGReg,
2526 IEMMODE, enmEffOpSize)
2527{
2528 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2529 VBOXSTRICTRC rcStrict;
2530
2531 /*
2532 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2533 */
2534 /** @todo verify and test that mov, pop and lXs works the segment
2535 * register loading in the exact same way. */
2536 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2537 if (rcStrict == VINF_SUCCESS)
2538 {
2539 switch (enmEffOpSize)
2540 {
2541 case IEMMODE_16BIT:
2542 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2543 break;
2544 case IEMMODE_32BIT:
2545 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2546 break;
2547 case IEMMODE_64BIT:
2548 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2549 break;
2550 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2551 }
2552 }
2553
2554 return rcStrict;
2555}
2556
2557
2558/**
2559 * Implements lgdt.
2560 *
2561 * @param iEffSeg The segment of the new ldtr contents
2562 * @param GCPtrEffSrc The address of the new ldtr contents.
2563 * @param enmEffOpSize The effective operand size.
2564 */
2565IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2566{
2567 if (pIemCpu->uCpl != 0)
2568 return iemRaiseGeneralProtectionFault0(pIemCpu);
2569 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2570
2571 /*
2572 * Fetch the limit and base address.
2573 */
2574 uint16_t cbLimit;
2575 RTGCPTR GCPtrBase;
2576 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2577 if (rcStrict == VINF_SUCCESS)
2578 {
2579 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2580 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2581 else
2582 {
2583 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2584 pCtx->gdtr.cbGdt = cbLimit;
2585 pCtx->gdtr.pGdt = GCPtrBase;
2586 }
2587 if (rcStrict == VINF_SUCCESS)
2588 iemRegAddToRip(pIemCpu, cbInstr);
2589 }
2590 return rcStrict;
2591}
2592
2593
2594/**
2595 * Implements lidt.
2596 *
2597 * @param iEffSeg The segment of the new ldtr contents
2598 * @param GCPtrEffSrc The address of the new ldtr contents.
2599 * @param enmEffOpSize The effective operand size.
2600 */
2601IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2602{
2603 if (pIemCpu->uCpl != 0)
2604 return iemRaiseGeneralProtectionFault0(pIemCpu);
2605 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2606
2607 /*
2608 * Fetch the limit and base address.
2609 */
2610 uint16_t cbLimit;
2611 RTGCPTR GCPtrBase;
2612 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2613 if (rcStrict == VINF_SUCCESS)
2614 {
2615 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2616 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2617 else
2618 {
2619 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2620 pCtx->idtr.cbIdt = cbLimit;
2621 pCtx->idtr.pIdt = GCPtrBase;
2622 }
2623 if (rcStrict == VINF_SUCCESS)
2624 iemRegAddToRip(pIemCpu, cbInstr);
2625 }
2626 return rcStrict;
2627}
2628
2629
2630/**
2631 * Implements lldt.
2632 *
2633 * @param uNewLdt The new LDT selector value.
2634 */
2635IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2636{
2637 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2638
2639 /*
2640 * Check preconditions.
2641 */
2642 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2643 {
2644 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2645 return iemRaiseUndefinedOpcode(pIemCpu);
2646 }
2647 if (pIemCpu->uCpl != 0)
2648 {
2649 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2650 return iemRaiseGeneralProtectionFault0(pIemCpu);
2651 }
2652 if (uNewLdt & X86_SEL_LDT)
2653 {
2654 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2655 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2656 }
2657
2658 /*
2659 * Now, loading a NULL selector is easy.
2660 */
2661 if ((uNewLdt & X86_SEL_MASK) == 0)
2662 {
2663 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2664 /** @todo check if the actual value is loaded or if it's always 0. */
2665 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2666 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), 0);
2667 else
2668 pCtx->ldtr = 0;
2669 pCtx->ldtrHid.Attr.u = 0;
2670 pCtx->ldtrHid.u64Base = 0;
2671 pCtx->ldtrHid.u32Limit = 0;
2672
2673 iemRegAddToRip(pIemCpu, cbInstr);
2674 return VINF_SUCCESS;
2675 }
2676
2677 /*
2678 * Read the descriptor.
2679 */
2680 IEMSELDESC Desc;
2681 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2682 if (rcStrict != VINF_SUCCESS)
2683 return rcStrict;
2684
2685 /* Check GPs first. */
2686 if (Desc.Legacy.Gen.u1DescType)
2687 {
2688 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2689 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2690 }
2691 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2692 {
2693 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2694 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2695 }
2696 uint64_t u64Base;
2697 if (!IEM_IS_LONG_MODE(pIemCpu))
2698 u64Base = X86DESC_BASE(Desc.Legacy);
2699 else
2700 {
2701 if (Desc.Long.Gen.u5Zeros)
2702 {
2703 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2704 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2705 }
2706
2707 u64Base = X86DESC64_BASE(Desc.Long);
2708 if (!IEM_IS_CANONICAL(u64Base))
2709 {
2710 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2711 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2712 }
2713 }
2714
2715 /* NP */
2716 if (!Desc.Legacy.Gen.u1Present)
2717 {
2718 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2719 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2720 }
2721
2722 /*
2723 * It checks out alright, update the registers.
2724 */
2725/** @todo check if the actual value is loaded or if the RPL is dropped */
2726 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2727 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK);
2728 else
2729 pCtx->ldtr = uNewLdt & X86_SEL_MASK;
2730 pCtx->ldtrHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2731 pCtx->ldtrHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2732 pCtx->ldtrHid.u64Base = u64Base;
2733
2734 iemRegAddToRip(pIemCpu, cbInstr);
2735 return VINF_SUCCESS;
2736}
2737
2738
2739/**
2740 * Implements lldt.
2741 *
2742 * @param uNewLdt The new LDT selector value.
2743 */
2744IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2745{
2746 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2747
2748 /*
2749 * Check preconditions.
2750 */
2751 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2752 {
2753 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2754 return iemRaiseUndefinedOpcode(pIemCpu);
2755 }
2756 if (pIemCpu->uCpl != 0)
2757 {
2758 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2759 return iemRaiseGeneralProtectionFault0(pIemCpu);
2760 }
2761 if (uNewTr & X86_SEL_LDT)
2762 {
2763 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2764 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
2765 }
2766 if ((uNewTr & X86_SEL_MASK) == 0)
2767 {
2768 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2769 return iemRaiseGeneralProtectionFault0(pIemCpu);
2770 }
2771
2772 /*
2773 * Read the descriptor.
2774 */
2775 IEMSELDESC Desc;
2776 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2777 if (rcStrict != VINF_SUCCESS)
2778 return rcStrict;
2779
2780 /* Check GPs first. */
2781 if (Desc.Legacy.Gen.u1DescType)
2782 {
2783 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2784 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2785 }
2786 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2787 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2788 || IEM_IS_LONG_MODE(pIemCpu)) )
2789 {
2790 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2791 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2792 }
2793 uint64_t u64Base;
2794 if (!IEM_IS_LONG_MODE(pIemCpu))
2795 u64Base = X86DESC_BASE(Desc.Legacy);
2796 else
2797 {
2798 if (Desc.Long.Gen.u5Zeros)
2799 {
2800 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2801 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2802 }
2803
2804 u64Base = X86DESC64_BASE(Desc.Long);
2805 if (!IEM_IS_CANONICAL(u64Base))
2806 {
2807 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2808 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2809 }
2810 }
2811
2812 /* NP */
2813 if (!Desc.Legacy.Gen.u1Present)
2814 {
2815 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
2816 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
2817 }
2818
2819 /*
2820 * Set it busy.
2821 * Note! Intel says this should lock down the whole descriptor, but we'll
2822 * restrict our selves to 32-bit for now due to lack of inline
2823 * assembly and such.
2824 */
2825 void *pvDesc;
2826 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2827 if (rcStrict != VINF_SUCCESS)
2828 return rcStrict;
2829 switch ((uintptr_t)pvDesc & 3)
2830 {
2831 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
2832 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
2833 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
2834 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
2835 }
2836 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2837 if (rcStrict != VINF_SUCCESS)
2838 return rcStrict;
2839 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2840
2841 /*
2842 * It checks out alright, update the registers.
2843 */
2844/** @todo check if the actual value is loaded or if the RPL is dropped */
2845 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2846 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK);
2847 else
2848 pCtx->tr = uNewTr & X86_SEL_MASK;
2849 pCtx->trHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2850 pCtx->trHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2851 pCtx->trHid.u64Base = u64Base;
2852
2853 iemRegAddToRip(pIemCpu, cbInstr);
2854 return VINF_SUCCESS;
2855}
2856
2857
2858/**
2859 * Implements mov GReg,CRx.
2860 *
2861 * @param iGReg The general register to store the CRx value in.
2862 * @param iCrReg The CRx register to read (valid).
2863 */
2864IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
2865{
2866 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2867 if (pIemCpu->uCpl != 0)
2868 return iemRaiseGeneralProtectionFault0(pIemCpu);
2869 Assert(!pCtx->eflags.Bits.u1VM);
2870
2871 /* read it */
2872 uint64_t crX;
2873 switch (iCrReg)
2874 {
2875 case 0: crX = pCtx->cr0; break;
2876 case 2: crX = pCtx->cr2; break;
2877 case 3: crX = pCtx->cr3; break;
2878 case 4: crX = pCtx->cr4; break;
2879 case 8:
2880 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2881 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
2882 else
2883 crX = 0xff;
2884 break;
2885 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2886 }
2887
2888 /* store it */
2889 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2890 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
2891 else
2892 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
2893
2894 iemRegAddToRip(pIemCpu, cbInstr);
2895 return VINF_SUCCESS;
2896}
2897
2898
2899/**
2900 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
2901 *
2902 * @param iCrReg The CRx register to write (valid).
2903 * @param uNewCrX The new value.
2904 */
2905IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
2906{
2907 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2908 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2909 VBOXSTRICTRC rcStrict;
2910 int rc;
2911
2912 /*
2913 * Try store it.
2914 * Unfortunately, CPUM only does a tiny bit of the work.
2915 */
2916 switch (iCrReg)
2917 {
2918 case 0:
2919 {
2920 /*
2921 * Perform checks.
2922 */
2923 uint64_t const uOldCrX = pCtx->cr0;
2924 uNewCrX |= X86_CR0_ET; /* hardcoded */
2925
2926 /* Check for reserved bits. */
2927 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
2928 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
2929 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
2930 if (uNewCrX & ~(uint64_t)fValid)
2931 {
2932 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2933 return iemRaiseGeneralProtectionFault0(pIemCpu);
2934 }
2935
2936 /* Check for invalid combinations. */
2937 if ( (uNewCrX & X86_CR0_PG)
2938 && !(uNewCrX & X86_CR0_PE) )
2939 {
2940 Log(("Trying to set CR0.PG without CR0.PE\n"));
2941 return iemRaiseGeneralProtectionFault0(pIemCpu);
2942 }
2943
2944 if ( !(uNewCrX & X86_CR0_CD)
2945 && (uNewCrX & X86_CR0_NW) )
2946 {
2947 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
2948 return iemRaiseGeneralProtectionFault0(pIemCpu);
2949 }
2950
2951 /* Long mode consistency checks. */
2952 if ( (uNewCrX & X86_CR0_PG)
2953 && !(uOldCrX & X86_CR0_PG)
2954 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2955 {
2956 if (!(pCtx->cr4 & X86_CR4_PAE))
2957 {
2958 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
2959 return iemRaiseGeneralProtectionFault0(pIemCpu);
2960 }
2961 if (pCtx->csHid.Attr.n.u1Long)
2962 {
2963 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
2964 return iemRaiseGeneralProtectionFault0(pIemCpu);
2965 }
2966 }
2967
2968 /** @todo check reserved PDPTR bits as AMD states. */
2969
2970 /*
2971 * Change CR0.
2972 */
2973 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2974 {
2975 rc = CPUMSetGuestCR0(pVCpu, uNewCrX);
2976 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
2977 }
2978 else
2979 pCtx->cr0 = uNewCrX;
2980 Assert(pCtx->cr0 == uNewCrX);
2981
2982 /*
2983 * Change EFER.LMA if entering or leaving long mode.
2984 */
2985 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
2986 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2987 {
2988 uint64_t NewEFER = pCtx->msrEFER;
2989 if (uNewCrX & X86_CR0_PG)
2990 NewEFER |= MSR_K6_EFER_LME;
2991 else
2992 NewEFER &= ~MSR_K6_EFER_LME;
2993
2994 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2995 CPUMSetGuestEFER(pVCpu, NewEFER);
2996 else
2997 pCtx->msrEFER = NewEFER;
2998 Assert(pCtx->msrEFER == NewEFER);
2999 }
3000
3001 /*
3002 * Inform PGM.
3003 */
3004 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3005 {
3006 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3007 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3008 {
3009 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3010 AssertRCReturn(rc, rc);
3011 /* ignore informational status codes */
3012 }
3013 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3014 /** @todo Status code management. */
3015 }
3016 else
3017 rcStrict = VINF_SUCCESS;
3018 break;
3019 }
3020
3021 /*
3022 * CR2 can be changed without any restrictions.
3023 */
3024 case 2:
3025 pCtx->cr2 = uNewCrX;
3026 rcStrict = VINF_SUCCESS;
3027 break;
3028
3029 /*
3030 * CR3 is relatively simple, although AMD and Intel have different
3031 * accounts of how setting reserved bits are handled. We take intel's
3032 * word for the lower bits and AMD's for the high bits (63:52).
3033 */
3034 /** @todo Testcase: Setting reserved bits in CR3, especially before
3035 * enabling paging. */
3036 case 3:
3037 {
3038 /* check / mask the value. */
3039 if (uNewCrX & UINT64_C(0xfff0000000000000))
3040 {
3041 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3042 return iemRaiseGeneralProtectionFault0(pIemCpu);
3043 }
3044
3045 uint64_t fValid;
3046 if ( (pCtx->cr4 & X86_CR4_PAE)
3047 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3048 fValid = UINT64_C(0x000ffffffffff014);
3049 else if (pCtx->cr4 & X86_CR4_PAE)
3050 fValid = UINT64_C(0xfffffff4);
3051 else
3052 fValid = UINT64_C(0xfffff014);
3053 if (uNewCrX & ~fValid)
3054 {
3055 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3056 uNewCrX, uNewCrX & ~fValid));
3057 uNewCrX &= fValid;
3058 }
3059
3060 /** @todo If we're in PAE mode we should check the PDPTRs for
3061 * invalid bits. */
3062
3063 /* Make the change. */
3064 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3065 {
3066 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3067 AssertRCSuccessReturn(rc, rc);
3068 }
3069 else
3070 pCtx->cr3 = uNewCrX;
3071
3072 /* Inform PGM. */
3073 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3074 {
3075 if (pCtx->cr0 & X86_CR0_PG)
3076 {
3077 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
3078 AssertRCReturn(rc, rc);
3079 /* ignore informational status codes */
3080 /** @todo status code management */
3081 }
3082 }
3083 rcStrict = VINF_SUCCESS;
3084 break;
3085 }
3086
3087 /*
3088 * CR4 is a bit more tedious as there are bits which cannot be cleared
3089 * under some circumstances and such.
3090 */
3091 case 4:
3092 {
3093 uint64_t const uOldCrX = pCtx->cr0;
3094
3095 /* reserved bits */
3096 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3097 | X86_CR4_TSD | X86_CR4_DE
3098 | X86_CR4_PSE | X86_CR4_PAE
3099 | X86_CR4_MCE | X86_CR4_PGE
3100 | X86_CR4_PCE | X86_CR4_OSFSXR
3101 | X86_CR4_OSXMMEEXCPT;
3102 //if (xxx)
3103 // fValid |= X86_CR4_VMXE;
3104 //if (xxx)
3105 // fValid |= X86_CR4_OSXSAVE;
3106 if (uNewCrX & ~(uint64_t)fValid)
3107 {
3108 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3109 return iemRaiseGeneralProtectionFault0(pIemCpu);
3110 }
3111
3112 /* long mode checks. */
3113 if ( (uOldCrX & X86_CR4_PAE)
3114 && !(uNewCrX & X86_CR4_PAE)
3115 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3116 {
3117 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3118 return iemRaiseGeneralProtectionFault0(pIemCpu);
3119 }
3120
3121
3122 /*
3123 * Change it.
3124 */
3125 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3126 {
3127 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3128 AssertRCSuccessReturn(rc, rc);
3129 }
3130 else
3131 pCtx->cr4 = uNewCrX;
3132 Assert(pCtx->cr4 == uNewCrX);
3133
3134 /*
3135 * Notify SELM and PGM.
3136 */
3137 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3138 {
3139 /* SELM - VME may change things wrt to the TSS shadowing. */
3140 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3141 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3142
3143 /* PGM - flushing and mode. */
3144 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3145 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3146 {
3147 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3148 AssertRCReturn(rc, rc);
3149 /* ignore informational status codes */
3150 }
3151 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3152 /** @todo Status code management. */
3153 }
3154 else
3155 rcStrict = VINF_SUCCESS;
3156 break;
3157 }
3158
3159 /*
3160 * CR8 maps to the APIC TPR.
3161 */
3162 case 8:
3163 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3164 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
3165 else
3166 rcStrict = VINF_SUCCESS;
3167 break;
3168
3169 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3170 }
3171
3172 /*
3173 * Advance the RIP on success.
3174 */
3175 /** @todo Status code management. */
3176 if (rcStrict == VINF_SUCCESS)
3177 iemRegAddToRip(pIemCpu, cbInstr);
3178 return rcStrict;
3179
3180}
3181
3182
3183/**
3184 * Implements mov CRx,GReg.
3185 *
3186 * @param iCrReg The CRx register to write (valid).
3187 * @param iGReg The general register to load the DRx value from.
3188 */
3189IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3190{
3191 if (pIemCpu->uCpl != 0)
3192 return iemRaiseGeneralProtectionFault0(pIemCpu);
3193 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3194
3195 /*
3196 * Read the new value from the source register and call common worker.
3197 */
3198 uint64_t uNewCrX;
3199 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3200 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3201 else
3202 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3203 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3204}
3205
3206
3207/**
3208 * Implements 'LMSW r/m16'
3209 *
3210 * @param u16NewMsw The new value.
3211 */
3212IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3213{
3214 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3215
3216 if (pIemCpu->uCpl != 0)
3217 return iemRaiseGeneralProtectionFault0(pIemCpu);
3218 Assert(!pCtx->eflags.Bits.u1VM);
3219
3220 /*
3221 * Compose the new CR0 value and call common worker.
3222 */
3223 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3224 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3225 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3226}
3227
3228
3229/**
3230 * Implements 'CLTS'.
3231 */
3232IEM_CIMPL_DEF_0(iemCImpl_clts)
3233{
3234 if (pIemCpu->uCpl != 0)
3235 return iemRaiseGeneralProtectionFault0(pIemCpu);
3236
3237 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3238 uint64_t uNewCr0 = pCtx->cr0;
3239 uNewCr0 &= ~X86_CR0_TS;
3240 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3241}
3242
3243
3244/**
3245 * Implements mov GReg,DRx.
3246 *
3247 * @param iGReg The general register to store the DRx value in.
3248 * @param iDrReg The DRx register to read (0-7).
3249 */
3250IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3251{
3252 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3253
3254 /*
3255 * Check preconditions.
3256 */
3257
3258 /* Raise GPs. */
3259 if (pIemCpu->uCpl != 0)
3260 return iemRaiseGeneralProtectionFault0(pIemCpu);
3261 Assert(!pCtx->eflags.Bits.u1VM);
3262
3263 if ( (iDrReg == 4 || iDrReg == 5)
3264 && (pCtx->cr4 & X86_CR4_DE) )
3265 {
3266 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3267 return iemRaiseGeneralProtectionFault0(pIemCpu);
3268 }
3269
3270 /* Raise #DB if general access detect is enabled. */
3271 if (pCtx->dr[7] & X86_DR7_GD)
3272 {
3273 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3274 return iemRaiseDebugException(pIemCpu);
3275 }
3276
3277 /*
3278 * Read the debug register and store it in the specified general register.
3279 */
3280 uint64_t drX;
3281 switch (iDrReg)
3282 {
3283 case 0: drX = pCtx->dr[0]; break;
3284 case 1: drX = pCtx->dr[1]; break;
3285 case 2: drX = pCtx->dr[2]; break;
3286 case 3: drX = pCtx->dr[3]; break;
3287 case 6:
3288 case 4:
3289 drX = pCtx->dr[6];
3290 drX &= ~RT_BIT_32(12);
3291 drX |= UINT32_C(0xffff0ff0);
3292 break;
3293 case 7:
3294 case 5:
3295 drX = pCtx->dr[7];
3296 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3297 drX |= RT_BIT_32(10);
3298 break;
3299 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3300 }
3301
3302 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3303 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3304 else
3305 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3306
3307 iemRegAddToRip(pIemCpu, cbInstr);
3308 return VINF_SUCCESS;
3309}
3310
3311
3312/**
3313 * Implements mov DRx,GReg.
3314 *
3315 * @param iDrReg The DRx register to write (valid).
3316 * @param iGReg The general register to load the DRx value from.
3317 */
3318IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3319{
3320 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3321
3322 /*
3323 * Check preconditions.
3324 */
3325 if (pIemCpu->uCpl != 0)
3326 return iemRaiseGeneralProtectionFault0(pIemCpu);
3327 Assert(!pCtx->eflags.Bits.u1VM);
3328
3329 if ( (iDrReg == 4 || iDrReg == 5)
3330 && (pCtx->cr4 & X86_CR4_DE) )
3331 {
3332 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3333 return iemRaiseGeneralProtectionFault0(pIemCpu);
3334 }
3335
3336 /* Raise #DB if general access detect is enabled. */
3337 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3338 * \#GP? */
3339 if (pCtx->dr[7] & X86_DR7_GD)
3340 {
3341 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3342 return iemRaiseDebugException(pIemCpu);
3343 }
3344
3345 /*
3346 * Read the new value from the source register.
3347 */
3348 uint64_t uNewDrX;
3349 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3350 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3351 else
3352 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3353
3354 /*
3355 * Adjust it.
3356 */
3357 switch (iDrReg)
3358 {
3359 case 0:
3360 case 1:
3361 case 2:
3362 case 3:
3363 /* nothing to adjust */
3364 break;
3365
3366 case 6:
3367 case 4:
3368 if (uNewDrX & UINT64_C(0xffffffff00000000))
3369 {
3370 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3371 return iemRaiseGeneralProtectionFault0(pIemCpu);
3372 }
3373 uNewDrX &= ~RT_BIT_32(12);
3374 uNewDrX |= UINT32_C(0xffff0ff0);
3375 break;
3376
3377 case 7:
3378 case 5:
3379 if (uNewDrX & UINT64_C(0xffffffff00000000))
3380 {
3381 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3382 return iemRaiseGeneralProtectionFault0(pIemCpu);
3383 }
3384 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3385 uNewDrX |= RT_BIT_32(10);
3386 break;
3387
3388 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3389 }
3390
3391 /*
3392 * Do the actual setting.
3393 */
3394 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3395 {
3396 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3397 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3398 }
3399 else
3400 pCtx->dr[iDrReg] = uNewDrX;
3401
3402 iemRegAddToRip(pIemCpu, cbInstr);
3403 return VINF_SUCCESS;
3404}
3405
3406
3407/**
3408 * Implements 'INVLPG m'.
3409 *
3410 * @param GCPtrPage The effective address of the page to invalidate.
3411 * @remarks Updates the RIP.
3412 */
3413IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3414{
3415 /* ring-0 only. */
3416 if (pIemCpu->uCpl != 0)
3417 return iemRaiseGeneralProtectionFault0(pIemCpu);
3418 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3419
3420 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3421 iemRegAddToRip(pIemCpu, cbInstr);
3422
3423 if ( rc == VINF_SUCCESS
3424 || rc == VINF_PGM_SYNC_CR3)
3425 return VINF_SUCCESS;
3426 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3427 return rc;
3428}
3429
3430
3431/**
3432 * Implements RDTSC.
3433 */
3434IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3435{
3436 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3437
3438 /*
3439 * Check preconditions.
3440 */
3441 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3442 return iemRaiseUndefinedOpcode(pIemCpu);
3443
3444 if ( (pCtx->cr4 & X86_CR4_TSD)
3445 && pIemCpu->uCpl != 0)
3446 {
3447 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3448 return iemRaiseGeneralProtectionFault0(pIemCpu);
3449 }
3450
3451 /*
3452 * Do the job.
3453 */
3454 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3455 pCtx->rax = (uint32_t)uTicks;
3456 pCtx->rdx = uTicks >> 32;
3457#ifdef IEM_VERIFICATION_MODE
3458 pIemCpu->fIgnoreRaxRdx = true;
3459#endif
3460
3461 iemRegAddToRip(pIemCpu, cbInstr);
3462 return VINF_SUCCESS;
3463}
3464
3465
3466/**
3467 * Implements RDMSR.
3468 */
3469IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3470{
3471 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3472
3473 /*
3474 * Check preconditions.
3475 */
3476 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3477 return iemRaiseUndefinedOpcode(pIemCpu);
3478 if (pIemCpu->uCpl != 0)
3479 return iemRaiseGeneralProtectionFault0(pIemCpu);
3480
3481 /*
3482 * Do the job.
3483 */
3484 RTUINT64U uValue;
3485 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3486 if (rc != VINF_SUCCESS)
3487 {
3488 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3489 return iemRaiseGeneralProtectionFault0(pIemCpu);
3490 }
3491
3492 pCtx->rax = uValue.au32[0];
3493 pCtx->rdx = uValue.au32[1];
3494
3495 iemRegAddToRip(pIemCpu, cbInstr);
3496 return VINF_SUCCESS;
3497}
3498
3499
3500/**
3501 * Implements 'IN eAX, port'.
3502 *
3503 * @param u16Port The source port.
3504 * @param cbReg The register size.
3505 */
3506IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3507{
3508 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3509
3510 /*
3511 * CPL check
3512 */
3513 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3514 if (rcStrict != VINF_SUCCESS)
3515 return rcStrict;
3516
3517 /*
3518 * Perform the I/O.
3519 */
3520 uint32_t u32Value;
3521 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3522 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
3523 else
3524 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3525 if (IOM_SUCCESS(rcStrict))
3526 {
3527 switch (cbReg)
3528 {
3529 case 1: pCtx->al = (uint8_t)u32Value; break;
3530 case 2: pCtx->ax = (uint16_t)u32Value; break;
3531 case 4: pCtx->rax = u32Value; break;
3532 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3533 }
3534 iemRegAddToRip(pIemCpu, cbInstr);
3535 pIemCpu->cPotentialExits++;
3536 }
3537 /** @todo massage rcStrict. */
3538 return rcStrict;
3539}
3540
3541
3542/**
3543 * Implements 'IN eAX, DX'.
3544 *
3545 * @param cbReg The register size.
3546 */
3547IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3548{
3549 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3550}
3551
3552
3553/**
3554 * Implements 'OUT port, eAX'.
3555 *
3556 * @param u16Port The destination port.
3557 * @param cbReg The register size.
3558 */
3559IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3560{
3561 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3562
3563 /*
3564 * CPL check
3565 */
3566 if ( (pCtx->cr0 & X86_CR0_PE)
3567 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3568 || pCtx->eflags.Bits.u1VM) )
3569 {
3570 /** @todo I/O port permission bitmap check */
3571 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
3572 }
3573
3574 /*
3575 * Perform the I/O.
3576 */
3577 uint32_t u32Value;
3578 switch (cbReg)
3579 {
3580 case 1: u32Value = pCtx->al; break;
3581 case 2: u32Value = pCtx->ax; break;
3582 case 4: u32Value = pCtx->eax; break;
3583 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3584 }
3585 VBOXSTRICTRC rc;
3586 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3587 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
3588 else
3589 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3590 if (IOM_SUCCESS(rc))
3591 {
3592 iemRegAddToRip(pIemCpu, cbInstr);
3593 pIemCpu->cPotentialExits++;
3594 /** @todo massage rc. */
3595 }
3596 return rc;
3597}
3598
3599
3600/**
3601 * Implements 'OUT DX, eAX'.
3602 *
3603 * @param cbReg The register size.
3604 */
3605IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3606{
3607 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3608}
3609
3610
3611/**
3612 * Implements 'CLI'.
3613 */
3614IEM_CIMPL_DEF_0(iemCImpl_cli)
3615{
3616 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3617
3618 if (pCtx->cr0 & X86_CR0_PE)
3619 {
3620 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3621 if (!pCtx->eflags.Bits.u1VM)
3622 {
3623 if (pIemCpu->uCpl <= uIopl)
3624 pCtx->eflags.Bits.u1IF = 0;
3625 else if ( pIemCpu->uCpl == 3
3626 && (pCtx->cr4 & X86_CR4_PVI) )
3627 pCtx->eflags.Bits.u1VIF = 0;
3628 else
3629 return iemRaiseGeneralProtectionFault0(pIemCpu);
3630 }
3631 /* V8086 */
3632 else if (uIopl == 3)
3633 pCtx->eflags.Bits.u1IF = 0;
3634 else if ( uIopl < 3
3635 && (pCtx->cr4 & X86_CR4_VME) )
3636 pCtx->eflags.Bits.u1VIF = 0;
3637 else
3638 return iemRaiseGeneralProtectionFault0(pIemCpu);
3639 }
3640 /* real mode */
3641 else
3642 pCtx->eflags.Bits.u1IF = 0;
3643 iemRegAddToRip(pIemCpu, cbInstr);
3644 return VINF_SUCCESS;
3645}
3646
3647
3648/**
3649 * Implements 'STI'.
3650 */
3651IEM_CIMPL_DEF_0(iemCImpl_sti)
3652{
3653 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3654
3655 if (pCtx->cr0 & X86_CR0_PE)
3656 {
3657 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3658 if (!pCtx->eflags.Bits.u1VM)
3659 {
3660 if (pIemCpu->uCpl <= uIopl)
3661 pCtx->eflags.Bits.u1IF = 1;
3662 else if ( pIemCpu->uCpl == 3
3663 && (pCtx->cr4 & X86_CR4_PVI)
3664 && !pCtx->eflags.Bits.u1VIP )
3665 pCtx->eflags.Bits.u1VIF = 1;
3666 else
3667 return iemRaiseGeneralProtectionFault0(pIemCpu);
3668 }
3669 /* V8086 */
3670 else if (uIopl == 3)
3671 pCtx->eflags.Bits.u1IF = 1;
3672 else if ( uIopl < 3
3673 && (pCtx->cr4 & X86_CR4_VME)
3674 && !pCtx->eflags.Bits.u1VIP )
3675 pCtx->eflags.Bits.u1VIF = 1;
3676 else
3677 return iemRaiseGeneralProtectionFault0(pIemCpu);
3678 }
3679 /* real mode */
3680 else
3681 pCtx->eflags.Bits.u1IF = 1;
3682
3683 iemRegAddToRip(pIemCpu, cbInstr);
3684 /** @todo don't do this unconditionally... */
3685 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3686 return VINF_SUCCESS;
3687}
3688
3689
3690/**
3691 * Implements 'HLT'.
3692 */
3693IEM_CIMPL_DEF_0(iemCImpl_hlt)
3694{
3695 if (pIemCpu->uCpl != 0)
3696 return iemRaiseGeneralProtectionFault0(pIemCpu);
3697 iemRegAddToRip(pIemCpu, cbInstr);
3698 return VINF_EM_HALT;
3699}
3700
3701
3702/**
3703 * Implements 'CPUID'.
3704 */
3705IEM_CIMPL_DEF_0(iemCImpl_cpuid)
3706{
3707 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3708
3709 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
3710 pCtx->rax &= UINT32_C(0xffffffff);
3711 pCtx->rbx &= UINT32_C(0xffffffff);
3712 pCtx->rcx &= UINT32_C(0xffffffff);
3713 pCtx->rdx &= UINT32_C(0xffffffff);
3714
3715 iemRegAddToRip(pIemCpu, cbInstr);
3716 return VINF_SUCCESS;
3717}
3718
3719
3720/**
3721 * Implements 'AAD'.
3722 *
3723 * @param enmEffOpSize The effective operand size.
3724 */
3725IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
3726{
3727 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3728
3729 uint16_t const ax = pCtx->ax;
3730 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
3731 pCtx->ax = al;
3732 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3733 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3734 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3735
3736 iemRegAddToRip(pIemCpu, cbInstr);
3737 return VINF_SUCCESS;
3738}
3739
3740
3741/**
3742 * Implements 'AAM'.
3743 *
3744 * @param bImm The immediate operand. Cannot be 0.
3745 */
3746IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
3747{
3748 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3749 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
3750
3751 uint16_t const ax = pCtx->ax;
3752 uint8_t const al = (uint8_t)ax % bImm;
3753 uint8_t const ah = (uint8_t)ax / bImm;
3754 pCtx->ax = (ah << 8) + al;
3755 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3756 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3757 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3758
3759 iemRegAddToRip(pIemCpu, cbInstr);
3760 return VINF_SUCCESS;
3761}
3762
3763
3764
3765
3766/*
3767 * Instantiate the various string operation combinations.
3768 */
3769#define OP_SIZE 8
3770#define ADDR_SIZE 16
3771#include "IEMAllCImplStrInstr.cpp.h"
3772#define OP_SIZE 8
3773#define ADDR_SIZE 32
3774#include "IEMAllCImplStrInstr.cpp.h"
3775#define OP_SIZE 8
3776#define ADDR_SIZE 64
3777#include "IEMAllCImplStrInstr.cpp.h"
3778
3779#define OP_SIZE 16
3780#define ADDR_SIZE 16
3781#include "IEMAllCImplStrInstr.cpp.h"
3782#define OP_SIZE 16
3783#define ADDR_SIZE 32
3784#include "IEMAllCImplStrInstr.cpp.h"
3785#define OP_SIZE 16
3786#define ADDR_SIZE 64
3787#include "IEMAllCImplStrInstr.cpp.h"
3788
3789#define OP_SIZE 32
3790#define ADDR_SIZE 16
3791#include "IEMAllCImplStrInstr.cpp.h"
3792#define OP_SIZE 32
3793#define ADDR_SIZE 32
3794#include "IEMAllCImplStrInstr.cpp.h"
3795#define OP_SIZE 32
3796#define ADDR_SIZE 64
3797#include "IEMAllCImplStrInstr.cpp.h"
3798
3799#define OP_SIZE 64
3800#define ADDR_SIZE 32
3801#include "IEMAllCImplStrInstr.cpp.h"
3802#define OP_SIZE 64
3803#define ADDR_SIZE 64
3804#include "IEMAllCImplStrInstr.cpp.h"
3805
3806
3807/**
3808 * Implements 'FINIT' and 'FNINIT'.
3809 *
3810 * @param fCheckXcpts Whether to check for umasked pending exceptions or
3811 * not.
3812 */
3813IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
3814{
3815 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3816
3817 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
3818 return iemRaiseDeviceNotAvailable(pIemCpu);
3819
3820 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
3821 if (fCheckXcpts && TODO )
3822 return iemRaiseMathFault(pIemCpu);
3823 */
3824
3825 if (iemFRegIsFxSaveFormat(pIemCpu))
3826 {
3827 pCtx->fpu.FCW = 0x37f;
3828 pCtx->fpu.FSW = 0;
3829 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
3830 pCtx->fpu.FPUDP = 0;
3831 pCtx->fpu.DS = 0; //??
3832 pCtx->fpu.FPUIP = 0;
3833 pCtx->fpu.CS = 0; //??
3834 pCtx->fpu.FOP = 0;
3835 }
3836 else
3837 {
3838 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
3839 pFpu->FCW = 0x37f;
3840 pFpu->FSW = 0;
3841 pFpu->FTW = 0xffff; /* 11 - empty */
3842 pFpu->FPUOO = 0; //??
3843 pFpu->FPUOS = 0; //??
3844 pFpu->FPUIP = 0;
3845 pFpu->CS = 0; //??
3846 pFpu->FOP = 0;
3847 }
3848
3849 iemRegAddToRip(pIemCpu, cbInstr);
3850 return VINF_SUCCESS;
3851}
3852
3853
3854/**
3855 * Implements 'FXSAVE'.
3856 *
3857 * @param iEffSeg The effective segment.
3858 * @param GCPtrEff The address of the image.
3859 * @param enmEffOpSize The operand size (only REX.W really matters).
3860 */
3861IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3862{
3863 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3864
3865 /*
3866 * Raise exceptions.
3867 */
3868 if (pCtx->cr0 & X86_CR0_EM)
3869 return iemRaiseUndefinedOpcode(pIemCpu);
3870 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3871 return iemRaiseDeviceNotAvailable(pIemCpu);
3872 if (GCPtrEff & 15)
3873 {
3874 /** @todo CPU/VM detection possible! \#AC might not be signal for
3875 * all/any misalignment sizes, intel says its an implementation detail. */
3876 if ( (pCtx->cr0 & X86_CR0_AM)
3877 && pCtx->eflags.Bits.u1AC
3878 && pIemCpu->uCpl == 3)
3879 return iemRaiseAlignmentCheckException(pIemCpu);
3880 return iemRaiseGeneralProtectionFault0(pIemCpu);
3881 }
3882 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3883
3884 /*
3885 * Access the memory.
3886 */
3887 void *pvMem512;
3888 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3889 if (rcStrict != VINF_SUCCESS)
3890 return rcStrict;
3891 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
3892
3893 /*
3894 * Store the registers.
3895 */
3896 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
3897 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
3898
3899 /* common for all formats */
3900 pDst->FCW = pCtx->fpu.FCW;
3901 pDst->FSW = pCtx->fpu.FSW;
3902 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
3903 pDst->FOP = pCtx->fpu.FOP;
3904 pDst->MXCSR = pCtx->fpu.MXCSR;
3905 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
3906 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
3907 {
3908 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
3909 * them for now... */
3910 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
3911 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
3912 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
3913 pDst->aRegs[i].au32[3] = 0;
3914 }
3915
3916 /* FPU IP, CS, DP and DS. */
3917 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
3918 * state information. :-/
3919 * Storing zeros now to prevent any potential leakage of host info. */
3920 pDst->FPUIP = 0;
3921 pDst->CS = 0;
3922 pDst->Rsrvd1 = 0;
3923 pDst->FPUDP = 0;
3924 pDst->DS = 0;
3925 pDst->Rsrvd2 = 0;
3926
3927 /* XMM registers. */
3928 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
3929 || pIemCpu->enmCpuMode != IEMMODE_64BIT
3930 || pIemCpu->uCpl != 0)
3931 {
3932 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
3933 for (uint32_t i = 0; i < cXmmRegs; i++)
3934 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
3935 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
3936 * right? */
3937 }
3938
3939 /*
3940 * Commit the memory.
3941 */
3942 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3943 if (rcStrict != VINF_SUCCESS)
3944 return rcStrict;
3945
3946 iemRegAddToRip(pIemCpu, cbInstr);
3947 return VINF_SUCCESS;
3948}
3949
3950
3951/**
3952 * Implements 'FXRSTOR'.
3953 *
3954 * @param GCPtrEff The address of the image.
3955 * @param enmEffOpSize The operand size (only REX.W really matters).
3956 */
3957IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3958{
3959 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3960
3961 /*
3962 * Raise exceptions.
3963 */
3964 if (pCtx->cr0 & X86_CR0_EM)
3965 return iemRaiseUndefinedOpcode(pIemCpu);
3966 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3967 return iemRaiseDeviceNotAvailable(pIemCpu);
3968 if (GCPtrEff & 15)
3969 {
3970 /** @todo CPU/VM detection possible! \#AC might not be signal for
3971 * all/any misalignment sizes, intel says its an implementation detail. */
3972 if ( (pCtx->cr0 & X86_CR0_AM)
3973 && pCtx->eflags.Bits.u1AC
3974 && pIemCpu->uCpl == 3)
3975 return iemRaiseAlignmentCheckException(pIemCpu);
3976 return iemRaiseGeneralProtectionFault0(pIemCpu);
3977 }
3978 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3979
3980 /*
3981 * Access the memory.
3982 */
3983 void *pvMem512;
3984 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
3985 if (rcStrict != VINF_SUCCESS)
3986 return rcStrict;
3987 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
3988
3989 /*
3990 * Check the state for stuff which will GP(0).
3991 */
3992 uint32_t const fMXCSR = pSrc->MXCSR;
3993 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
3994 if (fMXCSR & ~fMXCSR_MASK)
3995 {
3996 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
3997 return iemRaiseGeneralProtectionFault0(pIemCpu);
3998 }
3999
4000 /*
4001 * Load the registers.
4002 */
4003 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4004 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4005
4006 /* common for all formats */
4007 pCtx->fpu.FCW = pSrc->FCW;
4008 pCtx->fpu.FSW = pSrc->FSW;
4009 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4010 pCtx->fpu.FOP = pSrc->FOP;
4011 pCtx->fpu.MXCSR = fMXCSR;
4012 /* (MXCSR_MASK is read-only) */
4013 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4014 {
4015 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4016 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4017 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4018 pCtx->fpu.aRegs[i].au32[3] = 0;
4019 }
4020
4021 /* FPU IP, CS, DP and DS. */
4022 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4023 {
4024 pCtx->fpu.FPUIP = pSrc->FPUIP;
4025 pCtx->fpu.CS = pSrc->CS;
4026 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4027 pCtx->fpu.FPUDP = pSrc->FPUDP;
4028 pCtx->fpu.DS = pSrc->DS;
4029 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4030 }
4031 else
4032 {
4033 pCtx->fpu.FPUIP = pSrc->FPUIP;
4034 pCtx->fpu.CS = pSrc->CS;
4035 pCtx->fpu.Rsrvd1 = 0;
4036 pCtx->fpu.FPUDP = pSrc->FPUDP;
4037 pCtx->fpu.DS = pSrc->DS;
4038 pCtx->fpu.Rsrvd2 = 0;
4039 }
4040
4041 /* XMM registers. */
4042 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4043 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4044 || pIemCpu->uCpl != 0)
4045 {
4046 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4047 for (uint32_t i = 0; i < cXmmRegs; i++)
4048 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4049 }
4050
4051 /*
4052 * Commit the memory.
4053 */
4054 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4055 if (rcStrict != VINF_SUCCESS)
4056 return rcStrict;
4057
4058 iemRegAddToRip(pIemCpu, cbInstr);
4059 return VINF_SUCCESS;
4060}
4061
4062
4063/**
4064 * Commmon routine for fnstenv and fnsave.
4065 *
4066 * @param uPtr Where to store the state.
4067 * @param pCtx The CPU context.
4068 */
4069static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4070{
4071 if (enmEffOpSize == IEMMODE_16BIT)
4072 {
4073 uPtr.pu16[0] = pCtx->fpu.FCW;
4074 uPtr.pu16[1] = pCtx->fpu.FSW;
4075 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4076 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4077 {
4078 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4079 * protected mode or long mode and we save it in real mode? And vice
4080 * versa? And with 32-bit operand size? I think CPU is storing the
4081 * effective address ((CS << 4) + IP) in the offset register and not
4082 * doing any address calculations here. */
4083 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4084 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4085 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4086 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4087 }
4088 else
4089 {
4090 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4091 uPtr.pu16[4] = pCtx->fpu.CS;
4092 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4093 uPtr.pu16[6] = pCtx->fpu.DS;
4094 }
4095 }
4096 else
4097 {
4098 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4099 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4100 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4101 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4102 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4103 {
4104 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4105 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4106 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4107 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4108 }
4109 else
4110 {
4111 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4112 uPtr.pu16[4*2] = pCtx->fpu.CS;
4113 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4114 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4115 uPtr.pu16[6*2] = pCtx->fpu.DS;
4116 }
4117 }
4118}
4119
4120
4121/**
4122 * Commmon routine for fnstenv and fnsave.
4123 *
4124 * @param uPtr Where to store the state.
4125 * @param pCtx The CPU context.
4126 */
4127static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4128{
4129 if (enmEffOpSize == IEMMODE_16BIT)
4130 {
4131 pCtx->fpu.FCW = uPtr.pu16[0];
4132 pCtx->fpu.FSW = uPtr.pu16[1];
4133 pCtx->fpu.FTW = uPtr.pu16[2];
4134 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4135 {
4136 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4137 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4138 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4139 pCtx->fpu.CS = 0;
4140 pCtx->fpu.DS = 0;
4141 }
4142 else
4143 {
4144 pCtx->fpu.FPUIP = uPtr.pu16[3];
4145 pCtx->fpu.CS = uPtr.pu16[4];
4146 pCtx->fpu.FPUDP = uPtr.pu16[5];
4147 pCtx->fpu.DS = uPtr.pu16[6];
4148 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4149 }
4150 }
4151 else
4152 {
4153 pCtx->fpu.FCW = uPtr.pu16[0*2];
4154 pCtx->fpu.FSW = uPtr.pu16[1*2];
4155 pCtx->fpu.FTW = uPtr.pu16[2*2];
4156 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4157 {
4158 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4159 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4160 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4161 pCtx->fpu.CS = 0;
4162 pCtx->fpu.DS = 0;
4163 }
4164 else
4165 {
4166 pCtx->fpu.FPUIP = uPtr.pu32[3];
4167 pCtx->fpu.CS = uPtr.pu16[4*2];
4168 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4169 pCtx->fpu.FPUDP = uPtr.pu32[5];
4170 pCtx->fpu.DS = uPtr.pu16[6*2];
4171 }
4172 }
4173
4174 /* Make adjustments. */
4175 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4176 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4177 iemFpuRecalcExceptionStatus(pCtx);
4178 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4179 * exceptions are pending after loading the saved state? */
4180}
4181
4182
4183/**
4184 * Implements 'FNSTENV'.
4185 *
4186 * @param enmEffOpSize The operand size (only REX.W really matters).
4187 * @param iEffSeg The effective segment register for @a GCPtrEff.
4188 * @param GCPtrEffDst The address of the image.
4189 */
4190IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4191{
4192 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4193 RTPTRUNION uPtr;
4194 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4195 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4196 if (rcStrict != VINF_SUCCESS)
4197 return rcStrict;
4198
4199 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4200
4201 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4202 if (rcStrict != VINF_SUCCESS)
4203 return rcStrict;
4204
4205 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4206 iemRegAddToRip(pIemCpu, cbInstr);
4207 return VINF_SUCCESS;
4208}
4209
4210
4211/**
4212 * Implements 'FLDENV'.
4213 *
4214 * @param enmEffOpSize The operand size (only REX.W really matters).
4215 * @param iEffSeg The effective segment register for @a GCPtrEff.
4216 * @param GCPtrEffSrc The address of the image.
4217 */
4218IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4219{
4220 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4221 RTCPTRUNION uPtr;
4222 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4223 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4224 if (rcStrict != VINF_SUCCESS)
4225 return rcStrict;
4226
4227 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4228
4229 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4230 if (rcStrict != VINF_SUCCESS)
4231 return rcStrict;
4232
4233 iemRegAddToRip(pIemCpu, cbInstr);
4234 return VINF_SUCCESS;
4235}
4236
4237
4238/**
4239 * Implements 'FLDCW'.
4240 *
4241 * @param u16Fcw The new FCW.
4242 */
4243IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4244{
4245 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4246
4247 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4248 /** @todo Testcase: Try see what happens when trying to set undefined bits
4249 * (other than 6 and 7). Currently ignoring them. */
4250 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4251 * according to FSW. (This is was is currently implemented.) */
4252 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4253 iemFpuRecalcExceptionStatus(pCtx);
4254
4255 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4256 iemRegAddToRip(pIemCpu, cbInstr);
4257 return VINF_SUCCESS;
4258}
4259
4260
4261
4262/**
4263 * Implements the underflow case of fxch.
4264 *
4265 * @param iStReg The other stack register.
4266 */
4267IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4268{
4269 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4270
4271 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4272 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4273 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4274
4275 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4276 * registers are read as QNaN and then exchanged. This could be
4277 * wrong... */
4278 if (pCtx->fpu.FCW & X86_FCW_IM)
4279 {
4280 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4281 {
4282 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4283 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4284 else
4285 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4286 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4287 }
4288 else
4289 {
4290 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4291 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4292 }
4293 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4294 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4295 }
4296 else
4297 {
4298 /* raise underflow exception, don't change anything. */
4299 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
4300 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4301 }
4302 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4303
4304 iemRegAddToRip(pIemCpu, cbInstr);
4305 return VINF_SUCCESS;
4306}
4307
4308
4309/**
4310 * Implements 'FINCSTP' and 'FDECSTP'.
4311 *
4312 * @param cToAdd 1 or 7.
4313 */
4314IEM_CIMPL_DEF_1(iemCImpl_fpu_AddToTop, uint8_t, cToAdd)
4315{
4316 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4317
4318 /*
4319 * Raise exceptions.
4320 */
4321 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4322 return iemRaiseDeviceNotAvailable(pIemCpu);
4323 uint16_t u16Fsw = pCtx->fpu.FSW;
4324 if (u16Fsw & X86_FSW_ES)
4325 return iemRaiseMathFault(pIemCpu);
4326
4327 /*
4328 * Do the job.
4329 *
4330 * Note! The instructions are listed as control instructions and should
4331 * therefore not update FOP, FPUIP and FPUCS...
4332 * Note! C0, C2 and C3 are documented as undefined, we clear them.
4333 */
4334 /** @todo Testcase: Check whether FOP, FPUIP and FPUCS are affected by
4335 * FINCSTP and FDECSTP. */
4336 uint16_t iTop = X86_FSW_TOP_GET(u16Fsw);
4337 iTop += cToAdd;
4338 iTop &= X86_FSW_TOP_SMASK;
4339 u16Fsw &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4340 u16Fsw |= (iTop << X86_FSW_TOP_SHIFT);
4341 pCtx->fpu.FSW = u16Fsw;
4342
4343 iemRegAddToRip(pIemCpu, cbInstr);
4344 return VINF_SUCCESS;
4345}
4346
4347
4348/**
4349 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
4350 *
4351 * @param cToAdd 1 or 7.
4352 */
4353IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
4354{
4355 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4356 Assert(iStReg < 8);
4357
4358 /*
4359 * Raise exceptions.
4360 */
4361 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4362 return iemRaiseDeviceNotAvailable(pIemCpu);
4363 uint16_t u16Fsw = pCtx->fpu.FSW;
4364 if (u16Fsw & X86_FSW_ES)
4365 return iemRaiseMathFault(pIemCpu);
4366
4367 /*
4368 * Check if any of the register accesses causes #SF + #IA.
4369 */
4370 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
4371 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4372 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
4373 {
4374 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
4375 pCtx->fpu.FSW &= ~X86_FSW_C1;
4376 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
4377 if ( !(u16Fsw & X86_FSW_IE)
4378 || (pCtx->fpu.FCW & X86_FCW_IM) )
4379 {
4380 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4381 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4382 }
4383 }
4384 else if (pCtx->fpu.FCW & X86_FCW_IM)
4385 {
4386 /* Masked underflow. */
4387 pCtx->fpu.FSW &= ~X86_FSW_C1;
4388 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4389 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4390 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
4391 }
4392 else
4393 {
4394 /* Raise underflow - don't touch EFLAGS or TOP. */
4395 pCtx->fpu.FSW &= ~X86_FSW_C1;
4396 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4397 fPop = false;
4398 }
4399
4400 /*
4401 * Pop if necessary.
4402 */
4403 if (fPop)
4404 {
4405 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
4406 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
4407 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
4408 }
4409
4410 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4411 iemRegAddToRip(pIemCpu, cbInstr);
4412 return VINF_SUCCESS;
4413}
4414
4415/** @} */
4416
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette