VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 40187

Last change on this file since 40187 was 40187, checked in by vboxsync, 13 years ago

callf fixes. fxsave bounce buffering fix. Don't try fxsave output as REM is incomplete.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 133.6 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 40187 2012-02-21 00:32:45Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
42 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
43 }
44 return VINF_SUCCESS;
45}
46
47
48#if 0
49/**
50 * Calculates the parity bit.
51 *
52 * @returns true if the bit is set, false if not.
53 * @param u8Result The least significant byte of the result.
54 */
55static bool iemHlpCalcParityFlag(uint8_t u8Result)
56{
57 /*
58 * Parity is set if the number of bits in the least significant byte of
59 * the result is even.
60 */
61 uint8_t cBits;
62 cBits = u8Result & 1; /* 0 */
63 u8Result >>= 1;
64 cBits += u8Result & 1;
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1; /* 4 */
71 u8Result >>= 1;
72 cBits += u8Result & 1;
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 return !(cBits & 1);
78}
79#endif /* not used */
80
81
82/**
83 * Updates the specified flags according to a 8-bit result.
84 *
85 * @param pIemCpu The.
86 * @param u8Result The result to set the flags according to.
87 * @param fToUpdate The flags to update.
88 * @param fUndefined The flags that are specified as undefined.
89 */
90static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
91{
92 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
93
94 uint32_t fEFlags = pCtx->eflags.u;
95 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
96 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
97 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
98}
99
100
101/**
102 * Loads a NULL data selector into a selector register, both the hidden and
103 * visible parts, in protected mode.
104 *
105 * @param puSel The selector register.
106 * @param pHid The hidden register part.
107 */
108static void iemHlpLoadNullDataSelectorProt(PRTSEL puSel, PCPUMSELREGHID pHid)
109{
110 /** @todo Testcase: write a testcase checking what happends when loading a NULL
111 * data selector in protected mode. */
112 pHid->u64Base = 0;
113 pHid->u32Limit = 0;
114 pHid->Attr.u = 0;
115 *puSel = 0;
116}
117
118
119/**
120 * Helper used by iret.
121 *
122 * @param uCpl The new CPL.
123 * @param puSel The selector register.
124 * @param pHid The corresponding hidden register.
125 */
126static void iemHlpAdjustSelectorForNewCpl(uint8_t uCpl, PRTSEL puSel, PCPUMSELREGHID pHid)
127{
128 if ( uCpl > pHid->Attr.n.u2Dpl
129 && pHid->Attr.n.u1DescType /* code or data, not system */
130 && (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
131 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
132 iemHlpLoadNullDataSelectorProt(puSel, pHid);
133}
134
135
136/** @} */
137
138/** @name C Implementations
139 * @{
140 */
141
142/**
143 * Implements a 16-bit popa.
144 */
145IEM_CIMPL_DEF_0(iemCImpl_popa_16)
146{
147 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
148 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
149 RTGCPTR GCPtrLast = GCPtrStart + 15;
150 VBOXSTRICTRC rcStrict;
151
152 /*
153 * The docs are a bit hard to comprehend here, but it looks like we wrap
154 * around in real mode as long as none of the individual "popa" crosses the
155 * end of the stack segment. In protected mode we check the whole access
156 * in one go. For efficiency, only do the word-by-word thing if we're in
157 * danger of wrapping around.
158 */
159 /** @todo do popa boundary / wrap-around checks. */
160 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
161 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
162 {
163 /* word-by-word */
164 RTUINT64U TmpRsp;
165 TmpRsp.u = pCtx->rsp;
166 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
167 if (rcStrict == VINF_SUCCESS)
168 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
169 if (rcStrict == VINF_SUCCESS)
170 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
171 if (rcStrict == VINF_SUCCESS)
172 {
173 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
174 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
175 }
176 if (rcStrict == VINF_SUCCESS)
177 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
178 if (rcStrict == VINF_SUCCESS)
179 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
180 if (rcStrict == VINF_SUCCESS)
181 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
182 if (rcStrict == VINF_SUCCESS)
183 {
184 pCtx->rsp = TmpRsp.u;
185 iemRegAddToRip(pIemCpu, cbInstr);
186 }
187 }
188 else
189 {
190 uint16_t const *pa16Mem = NULL;
191 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
192 if (rcStrict == VINF_SUCCESS)
193 {
194 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
195 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
196 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
197 /* skip sp */
198 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
199 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
200 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
201 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
202 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
203 if (rcStrict == VINF_SUCCESS)
204 {
205 iemRegAddToRsp(pCtx, 16);
206 iemRegAddToRip(pIemCpu, cbInstr);
207 }
208 }
209 }
210 return rcStrict;
211}
212
213
214/**
215 * Implements a 32-bit popa.
216 */
217IEM_CIMPL_DEF_0(iemCImpl_popa_32)
218{
219 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
220 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
221 RTGCPTR GCPtrLast = GCPtrStart + 31;
222 VBOXSTRICTRC rcStrict;
223
224 /*
225 * The docs are a bit hard to comprehend here, but it looks like we wrap
226 * around in real mode as long as none of the individual "popa" crosses the
227 * end of the stack segment. In protected mode we check the whole access
228 * in one go. For efficiency, only do the word-by-word thing if we're in
229 * danger of wrapping around.
230 */
231 /** @todo do popa boundary / wrap-around checks. */
232 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
233 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
234 {
235 /* word-by-word */
236 RTUINT64U TmpRsp;
237 TmpRsp.u = pCtx->rsp;
238 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
239 if (rcStrict == VINF_SUCCESS)
240 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
241 if (rcStrict == VINF_SUCCESS)
242 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
243 if (rcStrict == VINF_SUCCESS)
244 {
245 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
246 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
247 }
248 if (rcStrict == VINF_SUCCESS)
249 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
250 if (rcStrict == VINF_SUCCESS)
251 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
252 if (rcStrict == VINF_SUCCESS)
253 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
254 if (rcStrict == VINF_SUCCESS)
255 {
256#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
257 pCtx->rdi &= UINT32_MAX;
258 pCtx->rsi &= UINT32_MAX;
259 pCtx->rbp &= UINT32_MAX;
260 pCtx->rbx &= UINT32_MAX;
261 pCtx->rdx &= UINT32_MAX;
262 pCtx->rcx &= UINT32_MAX;
263 pCtx->rax &= UINT32_MAX;
264#endif
265 pCtx->rsp = TmpRsp.u;
266 iemRegAddToRip(pIemCpu, cbInstr);
267 }
268 }
269 else
270 {
271 uint32_t const *pa32Mem;
272 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
273 if (rcStrict == VINF_SUCCESS)
274 {
275 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
276 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
277 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
278 /* skip esp */
279 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
280 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
281 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
282 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
283 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
284 if (rcStrict == VINF_SUCCESS)
285 {
286 iemRegAddToRsp(pCtx, 32);
287 iemRegAddToRip(pIemCpu, cbInstr);
288 }
289 }
290 }
291 return rcStrict;
292}
293
294
295/**
296 * Implements a 16-bit pusha.
297 */
298IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
299{
300 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
301 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
302 RTGCPTR GCPtrBottom = GCPtrTop - 15;
303 VBOXSTRICTRC rcStrict;
304
305 /*
306 * The docs are a bit hard to comprehend here, but it looks like we wrap
307 * around in real mode as long as none of the individual "pushd" crosses the
308 * end of the stack segment. In protected mode we check the whole access
309 * in one go. For efficiency, only do the word-by-word thing if we're in
310 * danger of wrapping around.
311 */
312 /** @todo do pusha boundary / wrap-around checks. */
313 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
314 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
315 {
316 /* word-by-word */
317 RTUINT64U TmpRsp;
318 TmpRsp.u = pCtx->rsp;
319 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
320 if (rcStrict == VINF_SUCCESS)
321 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
322 if (rcStrict == VINF_SUCCESS)
323 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
324 if (rcStrict == VINF_SUCCESS)
325 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
326 if (rcStrict == VINF_SUCCESS)
327 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
328 if (rcStrict == VINF_SUCCESS)
329 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
330 if (rcStrict == VINF_SUCCESS)
331 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
332 if (rcStrict == VINF_SUCCESS)
333 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
334 if (rcStrict == VINF_SUCCESS)
335 {
336 pCtx->rsp = TmpRsp.u;
337 iemRegAddToRip(pIemCpu, cbInstr);
338 }
339 }
340 else
341 {
342 GCPtrBottom--;
343 uint16_t *pa16Mem = NULL;
344 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
345 if (rcStrict == VINF_SUCCESS)
346 {
347 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
348 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
349 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
350 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
351 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
352 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
353 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
354 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
355 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
356 if (rcStrict == VINF_SUCCESS)
357 {
358 iemRegSubFromRsp(pCtx, 16);
359 iemRegAddToRip(pIemCpu, cbInstr);
360 }
361 }
362 }
363 return rcStrict;
364}
365
366
367/**
368 * Implements a 32-bit pusha.
369 */
370IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
371{
372 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
373 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
374 RTGCPTR GCPtrBottom = GCPtrTop - 31;
375 VBOXSTRICTRC rcStrict;
376
377 /*
378 * The docs are a bit hard to comprehend here, but it looks like we wrap
379 * around in real mode as long as none of the individual "pusha" crosses the
380 * end of the stack segment. In protected mode we check the whole access
381 * in one go. For efficiency, only do the word-by-word thing if we're in
382 * danger of wrapping around.
383 */
384 /** @todo do pusha boundary / wrap-around checks. */
385 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
386 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
387 {
388 /* word-by-word */
389 RTUINT64U TmpRsp;
390 TmpRsp.u = pCtx->rsp;
391 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
392 if (rcStrict == VINF_SUCCESS)
393 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
394 if (rcStrict == VINF_SUCCESS)
395 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
396 if (rcStrict == VINF_SUCCESS)
397 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
398 if (rcStrict == VINF_SUCCESS)
399 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
400 if (rcStrict == VINF_SUCCESS)
401 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
402 if (rcStrict == VINF_SUCCESS)
403 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
404 if (rcStrict == VINF_SUCCESS)
405 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
406 if (rcStrict == VINF_SUCCESS)
407 {
408 pCtx->rsp = TmpRsp.u;
409 iemRegAddToRip(pIemCpu, cbInstr);
410 }
411 }
412 else
413 {
414 GCPtrBottom--;
415 uint32_t *pa32Mem;
416 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
417 if (rcStrict == VINF_SUCCESS)
418 {
419 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
420 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
421 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
422 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
423 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
424 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
425 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
426 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
427 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
428 if (rcStrict == VINF_SUCCESS)
429 {
430 iemRegSubFromRsp(pCtx, 32);
431 iemRegAddToRip(pIemCpu, cbInstr);
432 }
433 }
434 }
435 return rcStrict;
436}
437
438
439/**
440 * Implements pushf.
441 *
442 *
443 * @param enmEffOpSize The effective operand size.
444 */
445IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
446{
447 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
448
449 /*
450 * If we're in V8086 mode some care is required (which is why we're in
451 * doing this in a C implementation).
452 */
453 uint32_t fEfl = pCtx->eflags.u;
454 if ( (fEfl & X86_EFL_VM)
455 && X86_EFL_GET_IOPL(fEfl) != 3 )
456 {
457 Assert(pCtx->cr0 & X86_CR0_PE);
458 if ( enmEffOpSize != IEMMODE_16BIT
459 || !(pCtx->cr4 & X86_CR4_VME))
460 return iemRaiseGeneralProtectionFault0(pIemCpu);
461 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
462 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
463 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
464 }
465
466 /*
467 * Ok, clear RF and VM and push the flags.
468 */
469 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
470
471 VBOXSTRICTRC rcStrict;
472 switch (enmEffOpSize)
473 {
474 case IEMMODE_16BIT:
475 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
476 break;
477 case IEMMODE_32BIT:
478 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
479 break;
480 case IEMMODE_64BIT:
481 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
482 break;
483 IEM_NOT_REACHED_DEFAULT_CASE_RET();
484 }
485 if (rcStrict != VINF_SUCCESS)
486 return rcStrict;
487
488 iemRegAddToRip(pIemCpu, cbInstr);
489 return VINF_SUCCESS;
490}
491
492
493/**
494 * Implements popf.
495 *
496 * @param enmEffOpSize The effective operand size.
497 */
498IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
499{
500 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
501 uint32_t const fEflOld = pCtx->eflags.u;
502 VBOXSTRICTRC rcStrict;
503 uint32_t fEflNew;
504
505 /*
506 * V8086 is special as usual.
507 */
508 if (fEflOld & X86_EFL_VM)
509 {
510 /*
511 * Almost anything goes if IOPL is 3.
512 */
513 if (X86_EFL_GET_IOPL(fEflOld) == 3)
514 {
515 switch (enmEffOpSize)
516 {
517 case IEMMODE_16BIT:
518 {
519 uint16_t u16Value;
520 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
521 if (rcStrict != VINF_SUCCESS)
522 return rcStrict;
523 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
524 break;
525 }
526 case IEMMODE_32BIT:
527 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
528 if (rcStrict != VINF_SUCCESS)
529 return rcStrict;
530 break;
531 IEM_NOT_REACHED_DEFAULT_CASE_RET();
532 }
533
534 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
535 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
536 }
537 /*
538 * Interrupt flag virtualization with CR4.VME=1.
539 */
540 else if ( enmEffOpSize == IEMMODE_16BIT
541 && (pCtx->cr4 & X86_CR4_VME) )
542 {
543 uint16_t u16Value;
544 RTUINT64U TmpRsp;
545 TmpRsp.u = pCtx->rsp;
546 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
547 if (rcStrict != VINF_SUCCESS)
548 return rcStrict;
549
550 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
551 * or before? */
552 if ( ( (u16Value & X86_EFL_IF)
553 && (fEflOld & X86_EFL_VIP))
554 || (u16Value & X86_EFL_TF) )
555 return iemRaiseGeneralProtectionFault0(pIemCpu);
556
557 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
558 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
559 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
560 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
561
562 pCtx->rsp = TmpRsp.u;
563 }
564 else
565 return iemRaiseGeneralProtectionFault0(pIemCpu);
566
567 }
568 /*
569 * Not in V8086 mode.
570 */
571 else
572 {
573 /* Pop the flags. */
574 switch (enmEffOpSize)
575 {
576 case IEMMODE_16BIT:
577 {
578 uint16_t u16Value;
579 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
580 if (rcStrict != VINF_SUCCESS)
581 return rcStrict;
582 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
583 break;
584 }
585 case IEMMODE_32BIT:
586 case IEMMODE_64BIT:
587 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
588 if (rcStrict != VINF_SUCCESS)
589 return rcStrict;
590 break;
591 IEM_NOT_REACHED_DEFAULT_CASE_RET();
592 }
593
594 /* Merge them with the current flags. */
595 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
596 || pIemCpu->uCpl == 0)
597 {
598 fEflNew &= X86_EFL_POPF_BITS;
599 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
600 }
601 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
602 {
603 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
604 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
605 }
606 else
607 {
608 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
609 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
610 }
611 }
612
613 /*
614 * Commit the flags.
615 */
616 Assert(fEflNew & RT_BIT_32(1));
617 pCtx->eflags.u = fEflNew;
618 iemRegAddToRip(pIemCpu, cbInstr);
619
620 return VINF_SUCCESS;
621}
622
623
624/**
625 * Implements an indirect call.
626 *
627 * @param uNewPC The new program counter (RIP) value (loaded from the
628 * operand).
629 * @param enmEffOpSize The effective operand size.
630 */
631IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
632{
633 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
634 uint16_t uOldPC = pCtx->ip + cbInstr;
635 if (uNewPC > pCtx->csHid.u32Limit)
636 return iemRaiseGeneralProtectionFault0(pIemCpu);
637
638 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
639 if (rcStrict != VINF_SUCCESS)
640 return rcStrict;
641
642 pCtx->rip = uNewPC;
643 return VINF_SUCCESS;
644
645}
646
647
648/**
649 * Implements a 16-bit relative call.
650 *
651 * @param offDisp The displacment offset.
652 */
653IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
654{
655 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
656 uint16_t uOldPC = pCtx->ip + cbInstr;
657 uint16_t uNewPC = uOldPC + offDisp;
658 if (uNewPC > pCtx->csHid.u32Limit)
659 return iemRaiseGeneralProtectionFault0(pIemCpu);
660
661 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
662 if (rcStrict != VINF_SUCCESS)
663 return rcStrict;
664
665 pCtx->rip = uNewPC;
666 return VINF_SUCCESS;
667}
668
669
670/**
671 * Implements a 32-bit indirect call.
672 *
673 * @param uNewPC The new program counter (RIP) value (loaded from the
674 * operand).
675 * @param enmEffOpSize The effective operand size.
676 */
677IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
678{
679 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
680 uint32_t uOldPC = pCtx->eip + cbInstr;
681 if (uNewPC > pCtx->csHid.u32Limit)
682 return iemRaiseGeneralProtectionFault0(pIemCpu);
683
684 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
685 if (rcStrict != VINF_SUCCESS)
686 return rcStrict;
687
688 pCtx->rip = uNewPC;
689 return VINF_SUCCESS;
690
691}
692
693
694/**
695 * Implements a 32-bit relative call.
696 *
697 * @param offDisp The displacment offset.
698 */
699IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
700{
701 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
702 uint32_t uOldPC = pCtx->eip + cbInstr;
703 uint32_t uNewPC = uOldPC + offDisp;
704 if (uNewPC > pCtx->csHid.u32Limit)
705 return iemRaiseGeneralProtectionFault0(pIemCpu);
706
707 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
708 if (rcStrict != VINF_SUCCESS)
709 return rcStrict;
710
711 pCtx->rip = uNewPC;
712 return VINF_SUCCESS;
713}
714
715
716/**
717 * Implements a 64-bit indirect call.
718 *
719 * @param uNewPC The new program counter (RIP) value (loaded from the
720 * operand).
721 * @param enmEffOpSize The effective operand size.
722 */
723IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
724{
725 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
726 uint64_t uOldPC = pCtx->rip + cbInstr;
727 if (!IEM_IS_CANONICAL(uNewPC))
728 return iemRaiseGeneralProtectionFault0(pIemCpu);
729
730 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
731 if (rcStrict != VINF_SUCCESS)
732 return rcStrict;
733
734 pCtx->rip = uNewPC;
735 return VINF_SUCCESS;
736
737}
738
739
740/**
741 * Implements a 64-bit relative call.
742 *
743 * @param offDisp The displacment offset.
744 */
745IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
746{
747 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
748 uint64_t uOldPC = pCtx->rip + cbInstr;
749 uint64_t uNewPC = uOldPC + offDisp;
750 if (!IEM_IS_CANONICAL(uNewPC))
751 return iemRaiseNotCanonical(pIemCpu);
752
753 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
754 if (rcStrict != VINF_SUCCESS)
755 return rcStrict;
756
757 pCtx->rip = uNewPC;
758 return VINF_SUCCESS;
759}
760
761
762/**
763 * Implements far jumps and calls thru task segments (TSS).
764 *
765 * @param uSel The selector.
766 * @param enmBranch The kind of branching we're performing.
767 * @param enmEffOpSize The effective operand size.
768 * @param pDesc The descriptor corrsponding to @a uSel. The type is
769 * call gate.
770 */
771IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
772{
773 /* Call various functions to do the work. */
774 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
775}
776
777
778/**
779 * Implements far jumps and calls thru task gates.
780 *
781 * @param uSel The selector.
782 * @param enmBranch The kind of branching we're performing.
783 * @param enmEffOpSize The effective operand size.
784 * @param pDesc The descriptor corrsponding to @a uSel. The type is
785 * call gate.
786 */
787IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
788{
789 /* Call various functions to do the work. */
790 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
791}
792
793
794/**
795 * Implements far jumps and calls thru call gates.
796 *
797 * @param uSel The selector.
798 * @param enmBranch The kind of branching we're performing.
799 * @param enmEffOpSize The effective operand size.
800 * @param pDesc The descriptor corrsponding to @a uSel. The type is
801 * call gate.
802 */
803IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
804{
805 /* Call various functions to do the work. */
806 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
807}
808
809
810/**
811 * Implements far jumps and calls thru system selectors.
812 *
813 * @param uSel The selector.
814 * @param enmBranch The kind of branching we're performing.
815 * @param enmEffOpSize The effective operand size.
816 * @param pDesc The descriptor corrsponding to @a uSel.
817 */
818IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
819{
820 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
821 Assert((uSel & (X86_SEL_MASK | X86_SEL_LDT)));
822
823 if (IEM_IS_LONG_MODE(pIemCpu))
824 switch (pDesc->Legacy.Gen.u4Type)
825 {
826 case AMD64_SEL_TYPE_SYS_CALL_GATE:
827 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
828
829 default:
830 case AMD64_SEL_TYPE_SYS_LDT:
831 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
832 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
833 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
834 case AMD64_SEL_TYPE_SYS_INT_GATE:
835 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
836 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
837
838 }
839
840 switch (pDesc->Legacy.Gen.u4Type)
841 {
842 case X86_SEL_TYPE_SYS_286_CALL_GATE:
843 case X86_SEL_TYPE_SYS_386_CALL_GATE:
844 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
845
846 case X86_SEL_TYPE_SYS_TASK_GATE:
847 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
848
849 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
850 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
851 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
852
853 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
854 Log(("branch %04x -> busy 286 TSS\n", uSel));
855 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
856
857 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
858 Log(("branch %04x -> busy 386 TSS\n", uSel));
859 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
860
861 default:
862 case X86_SEL_TYPE_SYS_LDT:
863 case X86_SEL_TYPE_SYS_286_INT_GATE:
864 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
865 case X86_SEL_TYPE_SYS_386_INT_GATE:
866 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
867 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
868 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
869 }
870}
871
872
873/**
874 * Implements far jumps.
875 *
876 * @param uSel The selector.
877 * @param offSeg The segment offset.
878 * @param enmEffOpSize The effective operand size.
879 */
880IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
881{
882 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
883 NOREF(cbInstr);
884 Assert(offSeg <= UINT32_MAX);
885
886 /*
887 * Real mode and V8086 mode are easy. The only snag seems to be that
888 * CS.limit doesn't change and the limit check is done against the current
889 * limit.
890 */
891 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
892 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
893 {
894 if (offSeg > pCtx->csHid.u32Limit)
895 return iemRaiseGeneralProtectionFault0(pIemCpu);
896
897 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
898 pCtx->rip = offSeg;
899 else
900 pCtx->rip = offSeg & UINT16_MAX;
901 pCtx->cs = uSel;
902 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
903 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
904 * PE. Check with VT-x and AMD-V. */
905#ifdef IEM_VERIFICATION_MODE
906 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
907#endif
908 return VINF_SUCCESS;
909 }
910
911 /*
912 * Protected mode. Need to parse the specified descriptor...
913 */
914 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
915 {
916 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
917 return iemRaiseGeneralProtectionFault0(pIemCpu);
918 }
919
920 /* Fetch the descriptor. */
921 IEMSELDESC Desc;
922 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
923 if (rcStrict != VINF_SUCCESS)
924 return rcStrict;
925
926 /* Is it there? */
927 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
928 {
929 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
930 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
931 }
932
933 /*
934 * Deal with it according to its type. We do the standard code selectors
935 * here and dispatch the system selectors to worker functions.
936 */
937 if (!Desc.Legacy.Gen.u1DescType)
938 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
939
940 /* Only code segments. */
941 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
942 {
943 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
944 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
945 }
946
947 /* L vs D. */
948 if ( Desc.Legacy.Gen.u1Long
949 && Desc.Legacy.Gen.u1DefBig
950 && IEM_IS_LONG_MODE(pIemCpu))
951 {
952 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
953 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
954 }
955
956 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
957 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
958 {
959 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
960 {
961 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
962 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
963 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
964 }
965 }
966 else
967 {
968 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
969 {
970 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
971 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
972 }
973 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
974 {
975 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
976 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
977 }
978 }
979
980 /* Chop the high bits if 16-bit (Intel says so). */
981 if (enmEffOpSize == IEMMODE_16BIT)
982 offSeg &= UINT16_MAX;
983
984 /* Limit check. (Should alternatively check for non-canonical addresses
985 here, but that is ruled out by offSeg being 32-bit, right?) */
986 uint64_t u64Base;
987 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
988 if (Desc.Legacy.Gen.u1Granularity)
989 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
990 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
991 u64Base = 0;
992 else
993 {
994 if (offSeg > cbLimit)
995 {
996 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
997 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
998 }
999 u64Base = X86DESC_BASE(Desc.Legacy);
1000 }
1001
1002 /*
1003 * Ok, everything checked out fine. Now set the accessed bit before
1004 * committing the result into CS, CSHID and RIP.
1005 */
1006 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1007 {
1008 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1009 if (rcStrict != VINF_SUCCESS)
1010 return rcStrict;
1011#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1012 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1013#endif
1014 }
1015
1016 /* commit */
1017 pCtx->rip = offSeg;
1018 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1019 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1020 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1021 pCtx->csHid.u32Limit = cbLimit;
1022 pCtx->csHid.u64Base = u64Base;
1023 /** @todo check if the hidden bits are loaded correctly for 64-bit
1024 * mode. */
1025 return VINF_SUCCESS;
1026}
1027
1028
1029/**
1030 * Implements far calls.
1031 *
1032 * This very similar to iemCImpl_FarJmp.
1033 *
1034 * @param uSel The selector.
1035 * @param offSeg The segment offset.
1036 * @param enmEffOpSize The operand size (in case we need it).
1037 */
1038IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1039{
1040 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1041 VBOXSTRICTRC rcStrict;
1042 uint64_t uNewRsp;
1043 RTPTRUNION uPtrRet;
1044
1045 /*
1046 * Real mode and V8086 mode are easy. The only snag seems to be that
1047 * CS.limit doesn't change and the limit check is done against the current
1048 * limit.
1049 */
1050 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1051 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1052 {
1053 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1054
1055 /* Check stack first - may #SS(0). */
1056 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1057 &uPtrRet.pv, &uNewRsp);
1058 if (rcStrict != VINF_SUCCESS)
1059 return rcStrict;
1060
1061 /* Check the target address range. */
1062 if (offSeg > UINT32_MAX)
1063 return iemRaiseGeneralProtectionFault0(pIemCpu);
1064
1065 /* Everything is fine, push the return address. */
1066 if (enmEffOpSize == IEMMODE_16BIT)
1067 {
1068 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1069 uPtrRet.pu16[1] = pCtx->cs;
1070 }
1071 else
1072 {
1073 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1074 uPtrRet.pu16[3] = pCtx->cs;
1075 }
1076 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1077 if (rcStrict != VINF_SUCCESS)
1078 return rcStrict;
1079
1080 /* Branch. */
1081 pCtx->rip = offSeg;
1082 pCtx->cs = uSel;
1083 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
1084 /** @todo Does REM reset the accessed bit here too? (See on jmp far16
1085 * after disabling PE.) Check with VT-x and AMD-V. */
1086#ifdef IEM_VERIFICATION_MODE
1087 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
1088#endif
1089 return VINF_SUCCESS;
1090 }
1091
1092 /*
1093 * Protected mode. Need to parse the specified descriptor...
1094 */
1095 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
1096 {
1097 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1098 return iemRaiseGeneralProtectionFault0(pIemCpu);
1099 }
1100
1101 /* Fetch the descriptor. */
1102 IEMSELDESC Desc;
1103 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1104 if (rcStrict != VINF_SUCCESS)
1105 return rcStrict;
1106
1107 /*
1108 * Deal with it according to its type. We do the standard code selectors
1109 * here and dispatch the system selectors to worker functions.
1110 */
1111 if (!Desc.Legacy.Gen.u1DescType)
1112 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1113
1114 /* Only code segments. */
1115 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1116 {
1117 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1118 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1119 }
1120
1121 /* L vs D. */
1122 if ( Desc.Legacy.Gen.u1Long
1123 && Desc.Legacy.Gen.u1DefBig
1124 && IEM_IS_LONG_MODE(pIemCpu))
1125 {
1126 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1127 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1128 }
1129
1130 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1131 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1132 {
1133 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1134 {
1135 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1136 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1137 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1138 }
1139 }
1140 else
1141 {
1142 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1143 {
1144 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1145 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1146 }
1147 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1148 {
1149 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1150 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1151 }
1152 }
1153
1154 /* Is it there? */
1155 if (!Desc.Legacy.Gen.u1Present)
1156 {
1157 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1158 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1159 }
1160
1161 /* Check stack first - may #SS(0). */
1162 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1163 * 16-bit code cause a two or four byte CS to be pushed? */
1164 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1165 enmEffOpSize == IEMMODE_64BIT ? 8+8
1166 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1167 &uPtrRet.pv, &uNewRsp);
1168 if (rcStrict != VINF_SUCCESS)
1169 return rcStrict;
1170
1171 /* Chop the high bits if 16-bit (Intel says so). */
1172 if (enmEffOpSize == IEMMODE_16BIT)
1173 offSeg &= UINT16_MAX;
1174
1175 /* Limit / canonical check. */
1176 uint64_t u64Base;
1177 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
1178 if (Desc.Legacy.Gen.u1Granularity)
1179 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1180
1181 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1182 {
1183 if (!IEM_IS_CANONICAL(offSeg))
1184 {
1185 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1186 return iemRaiseNotCanonical(pIemCpu);
1187 }
1188 u64Base = 0;
1189 }
1190 else
1191 {
1192 if (offSeg > cbLimit)
1193 {
1194 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1195 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1196 }
1197 u64Base = X86DESC_BASE(Desc.Legacy);
1198 }
1199
1200 /*
1201 * Now set the accessed bit before
1202 * writing the return address to the stack and committing the result into
1203 * CS, CSHID and RIP.
1204 */
1205 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1206 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1207 {
1208 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1209 if (rcStrict != VINF_SUCCESS)
1210 return rcStrict;
1211#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1212 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1213#endif
1214 }
1215
1216 /* stack */
1217 if (enmEffOpSize == IEMMODE_16BIT)
1218 {
1219 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1220 uPtrRet.pu16[1] = pCtx->cs;
1221 }
1222 else if (enmEffOpSize == IEMMODE_32BIT)
1223 {
1224 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1225 uPtrRet.pu32[1] = pCtx->cs; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1226 }
1227 else
1228 {
1229 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1230 uPtrRet.pu64[1] = pCtx->cs; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1231 }
1232 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1233 if (rcStrict != VINF_SUCCESS)
1234 return rcStrict;
1235
1236 /* commit */
1237 pCtx->rip = offSeg;
1238 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1239 pCtx->cs |= pIemCpu->uCpl;
1240 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1241 pCtx->csHid.u32Limit = cbLimit;
1242 pCtx->csHid.u64Base = u64Base;
1243 /** @todo check if the hidden bits are loaded correctly for 64-bit
1244 * mode. */
1245 return VINF_SUCCESS;
1246}
1247
1248
1249/**
1250 * Implements retf.
1251 *
1252 * @param enmEffOpSize The effective operand size.
1253 * @param cbPop The amount of arguments to pop from the stack
1254 * (bytes).
1255 */
1256IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1257{
1258 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1259 VBOXSTRICTRC rcStrict;
1260 RTCPTRUNION uPtrFrame;
1261 uint64_t uNewRsp;
1262 uint64_t uNewRip;
1263 uint16_t uNewCs;
1264 NOREF(cbInstr);
1265
1266 /*
1267 * Read the stack values first.
1268 */
1269 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1270 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1271 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1272 if (rcStrict != VINF_SUCCESS)
1273 return rcStrict;
1274 if (enmEffOpSize == IEMMODE_16BIT)
1275 {
1276 uNewRip = uPtrFrame.pu16[0];
1277 uNewCs = uPtrFrame.pu16[1];
1278 }
1279 else if (enmEffOpSize == IEMMODE_32BIT)
1280 {
1281 uNewRip = uPtrFrame.pu32[0];
1282 uNewCs = uPtrFrame.pu16[2];
1283 }
1284 else
1285 {
1286 uNewRip = uPtrFrame.pu64[0];
1287 uNewCs = uPtrFrame.pu16[4];
1288 }
1289
1290 /*
1291 * Real mode and V8086 mode are easy.
1292 */
1293 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1294 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1295 {
1296 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1297 /** @todo check how this is supposed to work if sp=0xfffe. */
1298
1299 /* Check the limit of the new EIP. */
1300 /** @todo Intel pseudo code only does the limit check for 16-bit
1301 * operands, AMD does not make any distinction. What is right? */
1302 if (uNewRip > pCtx->csHid.u32Limit)
1303 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1304
1305 /* commit the operation. */
1306 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1307 if (rcStrict != VINF_SUCCESS)
1308 return rcStrict;
1309 pCtx->rip = uNewRip;
1310 pCtx->cs = uNewCs;
1311 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
1312 /** @todo do we load attribs and limit as well? */
1313 if (cbPop)
1314 iemRegAddToRsp(pCtx, cbPop);
1315 return VINF_SUCCESS;
1316 }
1317
1318 /*
1319 * Protected mode is complicated, of course.
1320 */
1321 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1322 {
1323 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1324 return iemRaiseGeneralProtectionFault0(pIemCpu);
1325 }
1326
1327 /* Fetch the descriptor. */
1328 IEMSELDESC DescCs;
1329 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1330 if (rcStrict != VINF_SUCCESS)
1331 return rcStrict;
1332
1333 /* Can only return to a code selector. */
1334 if ( !DescCs.Legacy.Gen.u1DescType
1335 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1336 {
1337 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1338 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1339 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1340 }
1341
1342 /* L vs D. */
1343 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1344 && DescCs.Legacy.Gen.u1DefBig
1345 && IEM_IS_LONG_MODE(pIemCpu))
1346 {
1347 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1348 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1349 }
1350
1351 /* DPL/RPL/CPL checks. */
1352 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1353 {
1354 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1355 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1356 }
1357
1358 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1359 {
1360 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1361 {
1362 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1363 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1364 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1365 }
1366 }
1367 else
1368 {
1369 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1370 {
1371 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1372 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1373 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1374 }
1375 }
1376
1377 /* Is it there? */
1378 if (!DescCs.Legacy.Gen.u1Present)
1379 {
1380 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1381 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1382 }
1383
1384 /*
1385 * Return to outer privilege? (We'll typically have entered via a call gate.)
1386 */
1387 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1388 {
1389 /* Read the return pointer, it comes before the parameters. */
1390 RTCPTRUNION uPtrStack;
1391 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1392 if (rcStrict != VINF_SUCCESS)
1393 return rcStrict;
1394 uint16_t uNewOuterSs;
1395 uint64_t uNewOuterRsp;
1396 if (enmEffOpSize == IEMMODE_16BIT)
1397 {
1398 uNewOuterRsp = uPtrFrame.pu16[0];
1399 uNewOuterSs = uPtrFrame.pu16[1];
1400 }
1401 else if (enmEffOpSize == IEMMODE_32BIT)
1402 {
1403 uNewOuterRsp = uPtrFrame.pu32[0];
1404 uNewOuterSs = uPtrFrame.pu16[2];
1405 }
1406 else
1407 {
1408 uNewOuterRsp = uPtrFrame.pu64[0];
1409 uNewOuterSs = uPtrFrame.pu16[4];
1410 }
1411
1412 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1413 and read the selector. */
1414 IEMSELDESC DescSs;
1415 if (!(uNewOuterSs & (X86_SEL_MASK | X86_SEL_LDT)))
1416 {
1417 if ( !DescCs.Legacy.Gen.u1Long
1418 || (uNewOuterSs & X86_SEL_RPL) == 3)
1419 {
1420 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1421 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1422 return iemRaiseGeneralProtectionFault0(pIemCpu);
1423 }
1424 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1425 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1426 }
1427 else
1428 {
1429 /* Fetch the descriptor for the new stack segment. */
1430 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1431 if (rcStrict != VINF_SUCCESS)
1432 return rcStrict;
1433 }
1434
1435 /* Check that RPL of stack and code selectors match. */
1436 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1437 {
1438 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1439 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1440 }
1441
1442 /* Must be a writable data segment. */
1443 if ( !DescSs.Legacy.Gen.u1DescType
1444 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1445 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1446 {
1447 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1448 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1449 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1450 }
1451
1452 /* L vs D. (Not mentioned by intel.) */
1453 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1454 && DescSs.Legacy.Gen.u1DefBig
1455 && IEM_IS_LONG_MODE(pIemCpu))
1456 {
1457 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1458 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1459 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1460 }
1461
1462 /* DPL/RPL/CPL checks. */
1463 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1464 {
1465 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1466 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1467 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1468 }
1469
1470 /* Is it there? */
1471 if (!DescSs.Legacy.Gen.u1Present)
1472 {
1473 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1474 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1475 }
1476
1477 /* Calc SS limit.*/
1478 uint32_t cbLimitSs = X86DESC_LIMIT(DescSs.Legacy);
1479 if (DescSs.Legacy.Gen.u1Granularity)
1480 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1481
1482
1483 /* Is RIP canonical or within CS.limit? */
1484 uint64_t u64Base;
1485 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
1486 if (DescCs.Legacy.Gen.u1Granularity)
1487 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1488
1489 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1490 {
1491 if (!IEM_IS_CANONICAL(uNewRip))
1492 {
1493 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1494 return iemRaiseNotCanonical(pIemCpu);
1495 }
1496 u64Base = 0;
1497 }
1498 else
1499 {
1500 if (uNewRip > cbLimitCs)
1501 {
1502 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1503 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1504 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1505 }
1506 u64Base = X86DESC_BASE(DescCs.Legacy);
1507 }
1508
1509 /*
1510 * Now set the accessed bit before
1511 * writing the return address to the stack and committing the result into
1512 * CS, CSHID and RIP.
1513 */
1514 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1515 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1516 {
1517 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1518 if (rcStrict != VINF_SUCCESS)
1519 return rcStrict;
1520#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1521 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1522#endif
1523 }
1524 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1525 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1526 {
1527 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1528 if (rcStrict != VINF_SUCCESS)
1529 return rcStrict;
1530#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1531 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1532#endif
1533 }
1534
1535 /* commit */
1536 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1537 if (rcStrict != VINF_SUCCESS)
1538 return rcStrict;
1539 if (enmEffOpSize == IEMMODE_16BIT)
1540 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1541 else
1542 pCtx->rip = uNewRip;
1543 pCtx->cs = uNewCs;
1544 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy);
1545 pCtx->csHid.u32Limit = cbLimitCs;
1546 pCtx->csHid.u64Base = u64Base;
1547 pCtx->rsp = uNewRsp;
1548 pCtx->ss = uNewCs;
1549 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSs.Legacy);
1550 pCtx->ssHid.u32Limit = cbLimitSs;
1551 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1552 pCtx->ssHid.u64Base = 0;
1553 else
1554 pCtx->ssHid.u64Base = X86DESC_BASE(DescSs.Legacy);
1555
1556 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1557 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid);
1558 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es, &pCtx->esHid);
1559 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid);
1560 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid);
1561
1562 /** @todo check if the hidden bits are loaded correctly for 64-bit
1563 * mode. */
1564
1565 if (cbPop)
1566 iemRegAddToRsp(pCtx, cbPop);
1567
1568 /* Done! */
1569 }
1570 /*
1571 * Return to the same privilege level
1572 */
1573 else
1574 {
1575 /* Limit / canonical check. */
1576 uint64_t u64Base;
1577 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
1578 if (DescCs.Legacy.Gen.u1Granularity)
1579 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1580
1581 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1582 {
1583 if (!IEM_IS_CANONICAL(uNewRip))
1584 {
1585 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1586 return iemRaiseNotCanonical(pIemCpu);
1587 }
1588 u64Base = 0;
1589 }
1590 else
1591 {
1592 if (uNewRip > cbLimitCs)
1593 {
1594 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1595 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1596 }
1597 u64Base = X86DESC_BASE(DescCs.Legacy);
1598 }
1599
1600 /*
1601 * Now set the accessed bit before
1602 * writing the return address to the stack and committing the result into
1603 * CS, CSHID and RIP.
1604 */
1605 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1606 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1607 {
1608 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1609 if (rcStrict != VINF_SUCCESS)
1610 return rcStrict;
1611#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1612 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1613#endif
1614 }
1615
1616 /* commit */
1617 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1618 if (rcStrict != VINF_SUCCESS)
1619 return rcStrict;
1620 if (enmEffOpSize == IEMMODE_16BIT)
1621 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1622 else
1623 pCtx->rip = uNewRip;
1624 pCtx->cs = uNewCs;
1625 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy);
1626 pCtx->csHid.u32Limit = cbLimitCs;
1627 pCtx->csHid.u64Base = u64Base;
1628 /** @todo check if the hidden bits are loaded correctly for 64-bit
1629 * mode. */
1630 if (cbPop)
1631 iemRegAddToRsp(pCtx, cbPop);
1632 }
1633 return VINF_SUCCESS;
1634}
1635
1636
1637/**
1638 * Implements retn.
1639 *
1640 * We're doing this in C because of the \#GP that might be raised if the popped
1641 * program counter is out of bounds.
1642 *
1643 * @param enmEffOpSize The effective operand size.
1644 * @param cbPop The amount of arguments to pop from the stack
1645 * (bytes).
1646 */
1647IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1648{
1649 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1650 NOREF(cbInstr);
1651
1652 /* Fetch the RSP from the stack. */
1653 VBOXSTRICTRC rcStrict;
1654 RTUINT64U NewRip;
1655 RTUINT64U NewRsp;
1656 NewRsp.u = pCtx->rsp;
1657 switch (enmEffOpSize)
1658 {
1659 case IEMMODE_16BIT:
1660 NewRip.u = 0;
1661 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1662 break;
1663 case IEMMODE_32BIT:
1664 NewRip.u = 0;
1665 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1666 break;
1667 case IEMMODE_64BIT:
1668 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1669 break;
1670 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1671 }
1672 if (rcStrict != VINF_SUCCESS)
1673 return rcStrict;
1674
1675 /* Check the new RSP before loading it. */
1676 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1677 * of it. The canonical test is performed here and for call. */
1678 if (enmEffOpSize != IEMMODE_64BIT)
1679 {
1680 if (NewRip.DWords.dw0 > pCtx->csHid.u32Limit)
1681 {
1682 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->csHid.u32Limit));
1683 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1684 }
1685 }
1686 else
1687 {
1688 if (!IEM_IS_CANONICAL(NewRip.u))
1689 {
1690 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1691 return iemRaiseNotCanonical(pIemCpu);
1692 }
1693 }
1694
1695 /* Commit it. */
1696 pCtx->rip = NewRip.u;
1697 pCtx->rsp = NewRsp.u;
1698 if (cbPop)
1699 iemRegAddToRsp(pCtx, cbPop);
1700
1701 return VINF_SUCCESS;
1702}
1703
1704
1705/**
1706 * Implements leave.
1707 *
1708 * We're doing this in C because messing with the stack registers is annoying
1709 * since they depends on SS attributes.
1710 *
1711 * @param enmEffOpSize The effective operand size.
1712 */
1713IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1714{
1715 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1716
1717 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1718 RTUINT64U NewRsp;
1719 if (pCtx->ssHid.Attr.n.u1Long)
1720 {
1721 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1722 NewRsp.u = pCtx->rsp;
1723 NewRsp.Words.w0 = pCtx->bp;
1724 }
1725 else if (pCtx->ssHid.Attr.n.u1DefBig)
1726 NewRsp.u = pCtx->ebp;
1727 else
1728 NewRsp.u = pCtx->rbp;
1729
1730 /* Pop RBP according to the operand size. */
1731 VBOXSTRICTRC rcStrict;
1732 RTUINT64U NewRbp;
1733 switch (enmEffOpSize)
1734 {
1735 case IEMMODE_16BIT:
1736 NewRbp.u = pCtx->rbp;
1737 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1738 break;
1739 case IEMMODE_32BIT:
1740 NewRbp.u = 0;
1741 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1742 break;
1743 case IEMMODE_64BIT:
1744 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1745 break;
1746 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1747 }
1748 if (rcStrict != VINF_SUCCESS)
1749 return rcStrict;
1750
1751
1752 /* Commit it. */
1753 pCtx->rbp = NewRbp.u;
1754 pCtx->rsp = NewRsp.u;
1755 iemRegAddToRip(pIemCpu, cbInstr);
1756
1757 return VINF_SUCCESS;
1758}
1759
1760
1761/**
1762 * Implements int3 and int XX.
1763 *
1764 * @param u8Int The interrupt vector number.
1765 * @param fIsBpInstr Is it the breakpoint instruction.
1766 */
1767IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1768{
1769 Assert(pIemCpu->cXcptRecursions == 0);
1770 return iemRaiseXcptOrInt(pIemCpu,
1771 cbInstr,
1772 u8Int,
1773 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1774 0,
1775 0);
1776}
1777
1778
1779/**
1780 * Implements iret for real mode and V8086 mode.
1781 *
1782 * @param enmEffOpSize The effective operand size.
1783 */
1784IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1785{
1786 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1787 NOREF(cbInstr);
1788
1789 /*
1790 * iret throws an exception if VME isn't enabled.
1791 */
1792 if ( pCtx->eflags.Bits.u1VM
1793 && !(pCtx->cr4 & X86_CR4_VME))
1794 return iemRaiseGeneralProtectionFault0(pIemCpu);
1795
1796 /*
1797 * Do the stack bits, but don't commit RSP before everything checks
1798 * out right.
1799 */
1800 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1801 VBOXSTRICTRC rcStrict;
1802 RTCPTRUNION uFrame;
1803 uint16_t uNewCs;
1804 uint32_t uNewEip;
1805 uint32_t uNewFlags;
1806 uint64_t uNewRsp;
1807 if (enmEffOpSize == IEMMODE_32BIT)
1808 {
1809 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1810 if (rcStrict != VINF_SUCCESS)
1811 return rcStrict;
1812 uNewEip = uFrame.pu32[0];
1813 uNewCs = (uint16_t)uFrame.pu32[1];
1814 uNewFlags = uFrame.pu32[2];
1815 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1816 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1817 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1818 | X86_EFL_ID;
1819 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1820 }
1821 else
1822 {
1823 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1824 if (rcStrict != VINF_SUCCESS)
1825 return rcStrict;
1826 uNewEip = uFrame.pu16[0];
1827 uNewCs = uFrame.pu16[1];
1828 uNewFlags = uFrame.pu16[2];
1829 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1830 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1831 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1832 /** @todo The intel pseudo code does not indicate what happens to
1833 * reserved flags. We just ignore them. */
1834 }
1835 /** @todo Check how this is supposed to work if sp=0xfffe. */
1836
1837 /*
1838 * Check the limit of the new EIP.
1839 */
1840 /** @todo Only the AMD pseudo code check the limit here, what's
1841 * right? */
1842 if (uNewEip > pCtx->csHid.u32Limit)
1843 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1844
1845 /*
1846 * V8086 checks and flag adjustments
1847 */
1848 if (pCtx->eflags.Bits.u1VM)
1849 {
1850 if (pCtx->eflags.Bits.u2IOPL == 3)
1851 {
1852 /* Preserve IOPL and clear RF. */
1853 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1854 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1855 }
1856 else if ( enmEffOpSize == IEMMODE_16BIT
1857 && ( !(uNewFlags & X86_EFL_IF)
1858 || !pCtx->eflags.Bits.u1VIP )
1859 && !(uNewFlags & X86_EFL_TF) )
1860 {
1861 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1862 uNewFlags &= ~X86_EFL_VIF;
1863 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1864 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1865 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1866 }
1867 else
1868 return iemRaiseGeneralProtectionFault0(pIemCpu);
1869 }
1870
1871 /*
1872 * Commit the operation.
1873 */
1874 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1875 if (rcStrict != VINF_SUCCESS)
1876 return rcStrict;
1877 pCtx->rip = uNewEip;
1878 pCtx->cs = uNewCs;
1879 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
1880 /** @todo do we load attribs and limit as well? */
1881 Assert(uNewFlags & X86_EFL_1);
1882 pCtx->eflags.u = uNewFlags;
1883
1884 return VINF_SUCCESS;
1885}
1886
1887
1888/**
1889 * Implements iret for protected mode
1890 *
1891 * @param enmEffOpSize The effective operand size.
1892 */
1893IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
1894{
1895 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1896 NOREF(cbInstr);
1897
1898 /*
1899 * Nested task return.
1900 */
1901 if (pCtx->eflags.Bits.u1NT)
1902 {
1903 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1904 }
1905 /*
1906 * Normal return.
1907 */
1908 else
1909 {
1910 /*
1911 * Do the stack bits, but don't commit RSP before everything checks
1912 * out right.
1913 */
1914 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1915 VBOXSTRICTRC rcStrict;
1916 RTCPTRUNION uFrame;
1917 uint16_t uNewCs;
1918 uint32_t uNewEip;
1919 uint32_t uNewFlags;
1920 uint64_t uNewRsp;
1921 if (enmEffOpSize == IEMMODE_32BIT)
1922 {
1923 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1924 if (rcStrict != VINF_SUCCESS)
1925 return rcStrict;
1926 uNewEip = uFrame.pu32[0];
1927 uNewCs = (uint16_t)uFrame.pu32[1];
1928 uNewFlags = uFrame.pu32[2];
1929 }
1930 else
1931 {
1932 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1933 if (rcStrict != VINF_SUCCESS)
1934 return rcStrict;
1935 uNewEip = uFrame.pu16[0];
1936 uNewCs = uFrame.pu16[1];
1937 uNewFlags = uFrame.pu16[2];
1938 }
1939 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
1940 if (rcStrict != VINF_SUCCESS)
1941 return rcStrict;
1942
1943 /*
1944 * What are we returning to?
1945 */
1946 if ( (uNewFlags & X86_EFL_VM)
1947 && pIemCpu->uCpl == 0)
1948 {
1949 /* V8086 mode! */
1950 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1951 }
1952 else
1953 {
1954 /*
1955 * Protected mode.
1956 */
1957 /* Read the CS descriptor. */
1958 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1959 {
1960 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
1961 return iemRaiseGeneralProtectionFault0(pIemCpu);
1962 }
1963
1964 IEMSELDESC DescCS;
1965 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
1966 if (rcStrict != VINF_SUCCESS)
1967 {
1968 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
1969 return rcStrict;
1970 }
1971
1972 /* Must be a code descriptor. */
1973 if (!DescCS.Legacy.Gen.u1DescType)
1974 {
1975 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1976 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1977 }
1978 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1979 {
1980 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1981 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1982 }
1983
1984 /* Privilege checks. */
1985 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1986 {
1987 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
1988 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1989 }
1990 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1991 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
1992 {
1993 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
1994 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1995 }
1996
1997 /* Present? */
1998 if (!DescCS.Legacy.Gen.u1Present)
1999 {
2000 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2001 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2002 }
2003
2004 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
2005 if (DescCS.Legacy.Gen.u1Granularity)
2006 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2007
2008 /*
2009 * Return to outer level?
2010 */
2011 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2012 {
2013 uint16_t uNewSS;
2014 uint32_t uNewESP;
2015 if (enmEffOpSize == IEMMODE_32BIT)
2016 {
2017 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2018 if (rcStrict != VINF_SUCCESS)
2019 return rcStrict;
2020 uNewESP = uFrame.pu32[0];
2021 uNewSS = (uint16_t)uFrame.pu32[1];
2022 }
2023 else
2024 {
2025 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2026 if (rcStrict != VINF_SUCCESS)
2027 return rcStrict;
2028 uNewESP = uFrame.pu16[0];
2029 uNewSS = uFrame.pu16[1];
2030 }
2031 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2032 if (rcStrict != VINF_SUCCESS)
2033 return rcStrict;
2034
2035 /* Read the SS descriptor. */
2036 if (!(uNewSS & (X86_SEL_MASK | X86_SEL_LDT)))
2037 {
2038 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2039 return iemRaiseGeneralProtectionFault0(pIemCpu);
2040 }
2041
2042 IEMSELDESC DescSS;
2043 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2044 if (rcStrict != VINF_SUCCESS)
2045 {
2046 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2047 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2048 return rcStrict;
2049 }
2050
2051 /* Privilege checks. */
2052 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2053 {
2054 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2055 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2056 }
2057 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2058 {
2059 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2060 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2061 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2062 }
2063
2064 /* Must be a writeable data segment descriptor. */
2065 if (!DescSS.Legacy.Gen.u1DescType)
2066 {
2067 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2068 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2069 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2070 }
2071 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2072 {
2073 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2074 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2075 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2076 }
2077
2078 /* Present? */
2079 if (!DescSS.Legacy.Gen.u1Present)
2080 {
2081 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2082 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2083 }
2084
2085 uint32_t cbLimitSs = X86DESC_LIMIT(DescSS.Legacy);
2086 if (DescSS.Legacy.Gen.u1Granularity)
2087 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2088
2089 /* Check EIP. */
2090 if (uNewEip > cbLimitCS)
2091 {
2092 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2093 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2094 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2095 }
2096
2097 /*
2098 * Commit the changes, marking CS and SS accessed first since
2099 * that may fail.
2100 */
2101 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2102 {
2103 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2104 if (rcStrict != VINF_SUCCESS)
2105 return rcStrict;
2106 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2107 }
2108 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2109 {
2110 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2111 if (rcStrict != VINF_SUCCESS)
2112 return rcStrict;
2113 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2114 }
2115
2116 pCtx->rip = uNewEip;
2117 pCtx->cs = uNewCs;
2118 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2119 pCtx->csHid.u32Limit = cbLimitCS;
2120 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2121 pCtx->rsp = uNewESP;
2122 pCtx->ss = uNewSS;
2123 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
2124 pCtx->ssHid.u32Limit = cbLimitSs;
2125 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy);
2126
2127 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2128 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2129 if (enmEffOpSize != IEMMODE_16BIT)
2130 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2131 if (pIemCpu->uCpl == 0)
2132 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2133 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2134 fEFlagsMask |= X86_EFL_IF;
2135 pCtx->eflags.u &= ~fEFlagsMask;
2136 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2137
2138 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2139 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid);
2140 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es, &pCtx->esHid);
2141 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid);
2142 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid);
2143
2144 /* Done! */
2145
2146 }
2147 /*
2148 * Return to the same level.
2149 */
2150 else
2151 {
2152 /* Check EIP. */
2153 if (uNewEip > cbLimitCS)
2154 {
2155 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2156 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2157 }
2158
2159 /*
2160 * Commit the changes, marking CS first since it may fail.
2161 */
2162 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2163 {
2164 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2165 if (rcStrict != VINF_SUCCESS)
2166 return rcStrict;
2167 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2168 }
2169
2170 pCtx->rip = uNewEip;
2171 pCtx->cs = uNewCs;
2172 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2173 pCtx->csHid.u32Limit = cbLimitCS;
2174 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2175 pCtx->rsp = uNewRsp;
2176
2177 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2178 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2179 if (enmEffOpSize != IEMMODE_16BIT)
2180 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2181 if (pIemCpu->uCpl == 0)
2182 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2183 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2184 fEFlagsMask |= X86_EFL_IF;
2185 pCtx->eflags.u &= ~fEFlagsMask;
2186 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2187 /* Done! */
2188 }
2189 }
2190 }
2191
2192 return VINF_SUCCESS;
2193}
2194
2195
2196/**
2197 * Implements iret for long mode
2198 *
2199 * @param enmEffOpSize The effective operand size.
2200 */
2201IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2202{
2203 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2204 //VBOXSTRICTRC rcStrict;
2205 //uint64_t uNewRsp;
2206
2207 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2208 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2209}
2210
2211
2212/**
2213 * Implements iret.
2214 *
2215 * @param enmEffOpSize The effective operand size.
2216 */
2217IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2218{
2219 /*
2220 * Call a mode specific worker.
2221 */
2222 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2223 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2224 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2225 if (IEM_IS_LONG_MODE(pIemCpu))
2226 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2227
2228 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2229}
2230
2231
2232/**
2233 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2234 *
2235 * @param iSegReg The segment register number (valid).
2236 * @param uSel The new selector value.
2237 */
2238IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2239{
2240 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2241 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2242 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2243
2244 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2245
2246 /*
2247 * Real mode and V8086 mode are easy.
2248 */
2249 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2250 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2251 {
2252 *pSel = uSel;
2253 pHid->u64Base = (uint32_t)uSel << 4;
2254 /** @todo Does the CPU actually load limits and attributes in the
2255 * real/V8086 mode segment load case? It doesn't for CS in far
2256 * jumps... Affects unreal mode. */
2257 pHid->u32Limit = 0xffff;
2258 pHid->Attr.u = 0;
2259 pHid->Attr.n.u1Present = 1;
2260 pHid->Attr.n.u1DescType = 1;
2261 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2262 ? X86_SEL_TYPE_RW
2263 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2264
2265 iemRegAddToRip(pIemCpu, cbInstr);
2266 return VINF_SUCCESS;
2267 }
2268
2269 /*
2270 * Protected mode.
2271 *
2272 * Check if it's a null segment selector value first, that's OK for DS, ES,
2273 * FS and GS. If not null, then we have to load and parse the descriptor.
2274 */
2275 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
2276 {
2277 if (iSegReg == X86_SREG_SS)
2278 {
2279 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2280 || pIemCpu->uCpl != 0
2281 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2282 {
2283 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2284 return iemRaiseGeneralProtectionFault0(pIemCpu);
2285 }
2286
2287 /* In 64-bit kernel mode, the stack can be 0 because of the way
2288 interrupts are dispatched when in kernel ctx. Just load the
2289 selector value into the register and leave the hidden bits
2290 as is. */
2291 *pSel = uSel;
2292 iemRegAddToRip(pIemCpu, cbInstr);
2293 return VINF_SUCCESS;
2294 }
2295
2296 *pSel = uSel; /* Not RPL, remember :-) */
2297 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2298 && iSegReg != X86_SREG_FS
2299 && iSegReg != X86_SREG_GS)
2300 {
2301 /** @todo figure out what this actually does, it works. Needs
2302 * testcase! */
2303 pHid->Attr.u = 0;
2304 pHid->Attr.n.u1Present = 1;
2305 pHid->Attr.n.u1Long = 1;
2306 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2307 pHid->Attr.n.u2Dpl = 3;
2308 pHid->u32Limit = 0;
2309 pHid->u64Base = 0;
2310 }
2311 else
2312 {
2313 pHid->Attr.u = 0;
2314 pHid->u32Limit = 0;
2315 pHid->u64Base = 0;
2316 }
2317 iemRegAddToRip(pIemCpu, cbInstr);
2318 return VINF_SUCCESS;
2319 }
2320
2321 /* Fetch the descriptor. */
2322 IEMSELDESC Desc;
2323 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2324 if (rcStrict != VINF_SUCCESS)
2325 return rcStrict;
2326
2327 /* Check GPs first. */
2328 if (!Desc.Legacy.Gen.u1DescType)
2329 {
2330 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2331 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2332 }
2333 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2334 {
2335 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2336 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2337 {
2338 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2339 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2340 }
2341 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2342 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2343 {
2344 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2345 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2346 }
2347 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2348 {
2349 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2350 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2351 }
2352 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2353 {
2354 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2355 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2356 }
2357 }
2358 else
2359 {
2360 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2361 {
2362 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2363 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2364 }
2365 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2366 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2367 {
2368#if 0 /* this is what intel says. */
2369 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2370 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2371 {
2372 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2373 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2374 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2375 }
2376#else /* this is what makes more sense. */
2377 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2378 {
2379 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2380 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2381 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2382 }
2383 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2384 {
2385 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2386 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2387 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2388 }
2389#endif
2390 }
2391 }
2392
2393 /* Is it there? */
2394 if (!Desc.Legacy.Gen.u1Present)
2395 {
2396 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2397 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2398 }
2399
2400 /* The the base and limit. */
2401 uint64_t u64Base;
2402 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
2403 if (Desc.Legacy.Gen.u1Granularity)
2404 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2405
2406 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2407 && iSegReg < X86_SREG_FS)
2408 u64Base = 0;
2409 else
2410 u64Base = X86DESC_BASE(Desc.Legacy);
2411
2412 /*
2413 * Ok, everything checked out fine. Now set the accessed bit before
2414 * committing the result into the registers.
2415 */
2416 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2417 {
2418 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2419 if (rcStrict != VINF_SUCCESS)
2420 return rcStrict;
2421 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2422 }
2423
2424 /* commit */
2425 *pSel = uSel;
2426 pHid->Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2427 pHid->u32Limit = cbLimit;
2428 pHid->u64Base = u64Base;
2429
2430 /** @todo check if the hidden bits are loaded correctly for 64-bit
2431 * mode. */
2432
2433 iemRegAddToRip(pIemCpu, cbInstr);
2434 return VINF_SUCCESS;
2435}
2436
2437
2438/**
2439 * Implements 'mov SReg, r/m'.
2440 *
2441 * @param iSegReg The segment register number (valid).
2442 * @param uSel The new selector value.
2443 */
2444IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2445{
2446 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2447 if (rcStrict == VINF_SUCCESS)
2448 {
2449 if (iSegReg == X86_SREG_SS)
2450 {
2451 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2452 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2453 }
2454 }
2455 return rcStrict;
2456}
2457
2458
2459/**
2460 * Implements 'pop SReg'.
2461 *
2462 * @param iSegReg The segment register number (valid).
2463 * @param enmEffOpSize The efficient operand size (valid).
2464 */
2465IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2466{
2467 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2468 VBOXSTRICTRC rcStrict;
2469
2470 /*
2471 * Read the selector off the stack and join paths with mov ss, reg.
2472 */
2473 RTUINT64U TmpRsp;
2474 TmpRsp.u = pCtx->rsp;
2475 switch (enmEffOpSize)
2476 {
2477 case IEMMODE_16BIT:
2478 {
2479 uint16_t uSel;
2480 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2481 if (rcStrict == VINF_SUCCESS)
2482 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2483 break;
2484 }
2485
2486 case IEMMODE_32BIT:
2487 {
2488 uint32_t u32Value;
2489 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2490 if (rcStrict == VINF_SUCCESS)
2491 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2492 break;
2493 }
2494
2495 case IEMMODE_64BIT:
2496 {
2497 uint64_t u64Value;
2498 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2499 if (rcStrict == VINF_SUCCESS)
2500 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2501 break;
2502 }
2503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2504 }
2505
2506 /*
2507 * Commit the stack on success.
2508 */
2509 if (rcStrict == VINF_SUCCESS)
2510 {
2511 pCtx->rsp = TmpRsp.u;
2512 if (iSegReg == X86_SREG_SS)
2513 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2514 }
2515 return rcStrict;
2516}
2517
2518
2519/**
2520 * Implements lgs, lfs, les, lds & lss.
2521 */
2522IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2523 uint16_t, uSel,
2524 uint64_t, offSeg,
2525 uint8_t, iSegReg,
2526 uint8_t, iGReg,
2527 IEMMODE, enmEffOpSize)
2528{
2529 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2530 VBOXSTRICTRC rcStrict;
2531
2532 /*
2533 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2534 */
2535 /** @todo verify and test that mov, pop and lXs works the segment
2536 * register loading in the exact same way. */
2537 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2538 if (rcStrict == VINF_SUCCESS)
2539 {
2540 switch (enmEffOpSize)
2541 {
2542 case IEMMODE_16BIT:
2543 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2544 break;
2545 case IEMMODE_32BIT:
2546 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2547 break;
2548 case IEMMODE_64BIT:
2549 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2550 break;
2551 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2552 }
2553 }
2554
2555 return rcStrict;
2556}
2557
2558
2559/**
2560 * Implements lgdt.
2561 *
2562 * @param iEffSeg The segment of the new ldtr contents
2563 * @param GCPtrEffSrc The address of the new ldtr contents.
2564 * @param enmEffOpSize The effective operand size.
2565 */
2566IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2567{
2568 if (pIemCpu->uCpl != 0)
2569 return iemRaiseGeneralProtectionFault0(pIemCpu);
2570 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2571
2572 /*
2573 * Fetch the limit and base address.
2574 */
2575 uint16_t cbLimit;
2576 RTGCPTR GCPtrBase;
2577 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2578 if (rcStrict == VINF_SUCCESS)
2579 {
2580 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2581 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2582 else
2583 {
2584 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2585 pCtx->gdtr.cbGdt = cbLimit;
2586 pCtx->gdtr.pGdt = GCPtrBase;
2587 }
2588 if (rcStrict == VINF_SUCCESS)
2589 iemRegAddToRip(pIemCpu, cbInstr);
2590 }
2591 return rcStrict;
2592}
2593
2594
2595/**
2596 * Implements lidt.
2597 *
2598 * @param iEffSeg The segment of the new ldtr contents
2599 * @param GCPtrEffSrc The address of the new ldtr contents.
2600 * @param enmEffOpSize The effective operand size.
2601 */
2602IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2603{
2604 if (pIemCpu->uCpl != 0)
2605 return iemRaiseGeneralProtectionFault0(pIemCpu);
2606 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2607
2608 /*
2609 * Fetch the limit and base address.
2610 */
2611 uint16_t cbLimit;
2612 RTGCPTR GCPtrBase;
2613 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2614 if (rcStrict == VINF_SUCCESS)
2615 {
2616 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2617 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2618 else
2619 {
2620 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2621 pCtx->idtr.cbIdt = cbLimit;
2622 pCtx->idtr.pIdt = GCPtrBase;
2623 }
2624 if (rcStrict == VINF_SUCCESS)
2625 iemRegAddToRip(pIemCpu, cbInstr);
2626 }
2627 return rcStrict;
2628}
2629
2630
2631/**
2632 * Implements lldt.
2633 *
2634 * @param uNewLdt The new LDT selector value.
2635 */
2636IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2637{
2638 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2639
2640 /*
2641 * Check preconditions.
2642 */
2643 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2644 {
2645 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2646 return iemRaiseUndefinedOpcode(pIemCpu);
2647 }
2648 if (pIemCpu->uCpl != 0)
2649 {
2650 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2651 return iemRaiseGeneralProtectionFault0(pIemCpu);
2652 }
2653 if (uNewLdt & X86_SEL_LDT)
2654 {
2655 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2656 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2657 }
2658
2659 /*
2660 * Now, loading a NULL selector is easy.
2661 */
2662 if ((uNewLdt & X86_SEL_MASK) == 0)
2663 {
2664 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2665 /** @todo check if the actual value is loaded or if it's always 0. */
2666 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2667 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), 0);
2668 else
2669 pCtx->ldtr = 0;
2670 pCtx->ldtrHid.Attr.u = 0;
2671 pCtx->ldtrHid.u64Base = 0;
2672 pCtx->ldtrHid.u32Limit = 0;
2673
2674 iemRegAddToRip(pIemCpu, cbInstr);
2675 return VINF_SUCCESS;
2676 }
2677
2678 /*
2679 * Read the descriptor.
2680 */
2681 IEMSELDESC Desc;
2682 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2683 if (rcStrict != VINF_SUCCESS)
2684 return rcStrict;
2685
2686 /* Check GPs first. */
2687 if (Desc.Legacy.Gen.u1DescType)
2688 {
2689 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2690 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2691 }
2692 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2693 {
2694 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2695 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2696 }
2697 uint64_t u64Base;
2698 if (!IEM_IS_LONG_MODE(pIemCpu))
2699 u64Base = X86DESC_BASE(Desc.Legacy);
2700 else
2701 {
2702 if (Desc.Long.Gen.u5Zeros)
2703 {
2704 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2705 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2706 }
2707
2708 u64Base = X86DESC64_BASE(Desc.Long);
2709 if (!IEM_IS_CANONICAL(u64Base))
2710 {
2711 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2712 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2713 }
2714 }
2715
2716 /* NP */
2717 if (!Desc.Legacy.Gen.u1Present)
2718 {
2719 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2720 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2721 }
2722
2723 /*
2724 * It checks out alright, update the registers.
2725 */
2726/** @todo check if the actual value is loaded or if the RPL is dropped */
2727 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2728 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK);
2729 else
2730 pCtx->ldtr = uNewLdt & X86_SEL_MASK;
2731 pCtx->ldtrHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2732 pCtx->ldtrHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2733 pCtx->ldtrHid.u64Base = u64Base;
2734
2735 iemRegAddToRip(pIemCpu, cbInstr);
2736 return VINF_SUCCESS;
2737}
2738
2739
2740/**
2741 * Implements lldt.
2742 *
2743 * @param uNewLdt The new LDT selector value.
2744 */
2745IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2746{
2747 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2748
2749 /*
2750 * Check preconditions.
2751 */
2752 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2753 {
2754 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2755 return iemRaiseUndefinedOpcode(pIemCpu);
2756 }
2757 if (pIemCpu->uCpl != 0)
2758 {
2759 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2760 return iemRaiseGeneralProtectionFault0(pIemCpu);
2761 }
2762 if (uNewTr & X86_SEL_LDT)
2763 {
2764 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2765 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
2766 }
2767 if ((uNewTr & X86_SEL_MASK) == 0)
2768 {
2769 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2770 return iemRaiseGeneralProtectionFault0(pIemCpu);
2771 }
2772
2773 /*
2774 * Read the descriptor.
2775 */
2776 IEMSELDESC Desc;
2777 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2778 if (rcStrict != VINF_SUCCESS)
2779 return rcStrict;
2780
2781 /* Check GPs first. */
2782 if (Desc.Legacy.Gen.u1DescType)
2783 {
2784 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2785 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2786 }
2787 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2788 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2789 || IEM_IS_LONG_MODE(pIemCpu)) )
2790 {
2791 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2792 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2793 }
2794 uint64_t u64Base;
2795 if (!IEM_IS_LONG_MODE(pIemCpu))
2796 u64Base = X86DESC_BASE(Desc.Legacy);
2797 else
2798 {
2799 if (Desc.Long.Gen.u5Zeros)
2800 {
2801 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2802 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2803 }
2804
2805 u64Base = X86DESC64_BASE(Desc.Long);
2806 if (!IEM_IS_CANONICAL(u64Base))
2807 {
2808 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2809 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2810 }
2811 }
2812
2813 /* NP */
2814 if (!Desc.Legacy.Gen.u1Present)
2815 {
2816 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
2817 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
2818 }
2819
2820 /*
2821 * Set it busy.
2822 * Note! Intel says this should lock down the whole descriptor, but we'll
2823 * restrict our selves to 32-bit for now due to lack of inline
2824 * assembly and such.
2825 */
2826 void *pvDesc;
2827 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2828 if (rcStrict != VINF_SUCCESS)
2829 return rcStrict;
2830 switch ((uintptr_t)pvDesc & 3)
2831 {
2832 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
2833 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
2834 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
2835 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
2836 }
2837 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2838 if (rcStrict != VINF_SUCCESS)
2839 return rcStrict;
2840 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2841
2842 /*
2843 * It checks out alright, update the registers.
2844 */
2845/** @todo check if the actual value is loaded or if the RPL is dropped */
2846 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2847 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK);
2848 else
2849 pCtx->tr = uNewTr & X86_SEL_MASK;
2850 pCtx->trHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2851 pCtx->trHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2852 pCtx->trHid.u64Base = u64Base;
2853
2854 iemRegAddToRip(pIemCpu, cbInstr);
2855 return VINF_SUCCESS;
2856}
2857
2858
2859/**
2860 * Implements mov GReg,CRx.
2861 *
2862 * @param iGReg The general register to store the CRx value in.
2863 * @param iCrReg The CRx register to read (valid).
2864 */
2865IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
2866{
2867 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2868 if (pIemCpu->uCpl != 0)
2869 return iemRaiseGeneralProtectionFault0(pIemCpu);
2870 Assert(!pCtx->eflags.Bits.u1VM);
2871
2872 /* read it */
2873 uint64_t crX;
2874 switch (iCrReg)
2875 {
2876 case 0: crX = pCtx->cr0; break;
2877 case 2: crX = pCtx->cr2; break;
2878 case 3: crX = pCtx->cr3; break;
2879 case 4: crX = pCtx->cr4; break;
2880 case 8:
2881 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2882 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
2883 else
2884 crX = 0xff;
2885 break;
2886 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2887 }
2888
2889 /* store it */
2890 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2891 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
2892 else
2893 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
2894
2895 iemRegAddToRip(pIemCpu, cbInstr);
2896 return VINF_SUCCESS;
2897}
2898
2899
2900/**
2901 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
2902 *
2903 * @param iCrReg The CRx register to write (valid).
2904 * @param uNewCrX The new value.
2905 */
2906IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
2907{
2908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2909 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2910 VBOXSTRICTRC rcStrict;
2911 int rc;
2912
2913 /*
2914 * Try store it.
2915 * Unfortunately, CPUM only does a tiny bit of the work.
2916 */
2917 switch (iCrReg)
2918 {
2919 case 0:
2920 {
2921 /*
2922 * Perform checks.
2923 */
2924 uint64_t const uOldCrX = pCtx->cr0;
2925 uNewCrX |= X86_CR0_ET; /* hardcoded */
2926
2927 /* Check for reserved bits. */
2928 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
2929 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
2930 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
2931 if (uNewCrX & ~(uint64_t)fValid)
2932 {
2933 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2934 return iemRaiseGeneralProtectionFault0(pIemCpu);
2935 }
2936
2937 /* Check for invalid combinations. */
2938 if ( (uNewCrX & X86_CR0_PG)
2939 && !(uNewCrX & X86_CR0_PE) )
2940 {
2941 Log(("Trying to set CR0.PG without CR0.PE\n"));
2942 return iemRaiseGeneralProtectionFault0(pIemCpu);
2943 }
2944
2945 if ( !(uNewCrX & X86_CR0_CD)
2946 && (uNewCrX & X86_CR0_NW) )
2947 {
2948 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
2949 return iemRaiseGeneralProtectionFault0(pIemCpu);
2950 }
2951
2952 /* Long mode consistency checks. */
2953 if ( (uNewCrX & X86_CR0_PG)
2954 && !(uOldCrX & X86_CR0_PG)
2955 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2956 {
2957 if (!(pCtx->cr4 & X86_CR4_PAE))
2958 {
2959 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
2960 return iemRaiseGeneralProtectionFault0(pIemCpu);
2961 }
2962 if (pCtx->csHid.Attr.n.u1Long)
2963 {
2964 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
2965 return iemRaiseGeneralProtectionFault0(pIemCpu);
2966 }
2967 }
2968
2969 /** @todo check reserved PDPTR bits as AMD states. */
2970
2971 /*
2972 * Change CR0.
2973 */
2974 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2975 {
2976 rc = CPUMSetGuestCR0(pVCpu, uNewCrX);
2977 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
2978 }
2979 else
2980 pCtx->cr0 = uNewCrX;
2981 Assert(pCtx->cr0 == uNewCrX);
2982
2983 /*
2984 * Change EFER.LMA if entering or leaving long mode.
2985 */
2986 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
2987 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2988 {
2989 uint64_t NewEFER = pCtx->msrEFER;
2990 if (uNewCrX & X86_CR0_PG)
2991 NewEFER |= MSR_K6_EFER_LME;
2992 else
2993 NewEFER &= ~MSR_K6_EFER_LME;
2994
2995 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2996 CPUMSetGuestEFER(pVCpu, NewEFER);
2997 else
2998 pCtx->msrEFER = NewEFER;
2999 Assert(pCtx->msrEFER == NewEFER);
3000 }
3001
3002 /*
3003 * Inform PGM.
3004 */
3005 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3006 {
3007 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3008 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3009 {
3010 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3011 AssertRCReturn(rc, rc);
3012 /* ignore informational status codes */
3013 }
3014 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3015 /** @todo Status code management. */
3016 }
3017 else
3018 rcStrict = VINF_SUCCESS;
3019 break;
3020 }
3021
3022 /*
3023 * CR2 can be changed without any restrictions.
3024 */
3025 case 2:
3026 pCtx->cr2 = uNewCrX;
3027 rcStrict = VINF_SUCCESS;
3028 break;
3029
3030 /*
3031 * CR3 is relatively simple, although AMD and Intel have different
3032 * accounts of how setting reserved bits are handled. We take intel's
3033 * word for the lower bits and AMD's for the high bits (63:52).
3034 */
3035 /** @todo Testcase: Setting reserved bits in CR3, especially before
3036 * enabling paging. */
3037 case 3:
3038 {
3039 /* check / mask the value. */
3040 if (uNewCrX & UINT64_C(0xfff0000000000000))
3041 {
3042 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3043 return iemRaiseGeneralProtectionFault0(pIemCpu);
3044 }
3045
3046 uint64_t fValid;
3047 if ( (pCtx->cr4 & X86_CR4_PAE)
3048 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3049 fValid = UINT64_C(0x000ffffffffff014);
3050 else if (pCtx->cr4 & X86_CR4_PAE)
3051 fValid = UINT64_C(0xfffffff4);
3052 else
3053 fValid = UINT64_C(0xfffff014);
3054 if (uNewCrX & ~fValid)
3055 {
3056 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3057 uNewCrX, uNewCrX & ~fValid));
3058 uNewCrX &= fValid;
3059 }
3060
3061 /** @todo If we're in PAE mode we should check the PDPTRs for
3062 * invalid bits. */
3063
3064 /* Make the change. */
3065 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3066 {
3067 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3068 AssertRCSuccessReturn(rc, rc);
3069 }
3070 else
3071 pCtx->cr3 = uNewCrX;
3072
3073 /* Inform PGM. */
3074 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3075 {
3076 if (pCtx->cr0 & X86_CR0_PG)
3077 {
3078 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
3079 AssertRCReturn(rc, rc);
3080 /* ignore informational status codes */
3081 /** @todo status code management */
3082 }
3083 }
3084 rcStrict = VINF_SUCCESS;
3085 break;
3086 }
3087
3088 /*
3089 * CR4 is a bit more tedious as there are bits which cannot be cleared
3090 * under some circumstances and such.
3091 */
3092 case 4:
3093 {
3094 uint64_t const uOldCrX = pCtx->cr0;
3095
3096 /* reserved bits */
3097 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3098 | X86_CR4_TSD | X86_CR4_DE
3099 | X86_CR4_PSE | X86_CR4_PAE
3100 | X86_CR4_MCE | X86_CR4_PGE
3101 | X86_CR4_PCE | X86_CR4_OSFSXR
3102 | X86_CR4_OSXMMEEXCPT;
3103 //if (xxx)
3104 // fValid |= X86_CR4_VMXE;
3105 //if (xxx)
3106 // fValid |= X86_CR4_OSXSAVE;
3107 if (uNewCrX & ~(uint64_t)fValid)
3108 {
3109 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3110 return iemRaiseGeneralProtectionFault0(pIemCpu);
3111 }
3112
3113 /* long mode checks. */
3114 if ( (uOldCrX & X86_CR4_PAE)
3115 && !(uNewCrX & X86_CR4_PAE)
3116 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3117 {
3118 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3119 return iemRaiseGeneralProtectionFault0(pIemCpu);
3120 }
3121
3122
3123 /*
3124 * Change it.
3125 */
3126 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3127 {
3128 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3129 AssertRCSuccessReturn(rc, rc);
3130 }
3131 else
3132 pCtx->cr4 = uNewCrX;
3133 Assert(pCtx->cr4 == uNewCrX);
3134
3135 /*
3136 * Notify SELM and PGM.
3137 */
3138 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3139 {
3140 /* SELM - VME may change things wrt to the TSS shadowing. */
3141 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3142 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3143
3144 /* PGM - flushing and mode. */
3145 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3146 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3147 {
3148 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3149 AssertRCReturn(rc, rc);
3150 /* ignore informational status codes */
3151 }
3152 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3153 /** @todo Status code management. */
3154 }
3155 else
3156 rcStrict = VINF_SUCCESS;
3157 break;
3158 }
3159
3160 /*
3161 * CR8 maps to the APIC TPR.
3162 */
3163 case 8:
3164 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3165 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
3166 else
3167 rcStrict = VINF_SUCCESS;
3168 break;
3169
3170 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3171 }
3172
3173 /*
3174 * Advance the RIP on success.
3175 */
3176 /** @todo Status code management. */
3177 if (rcStrict == VINF_SUCCESS)
3178 iemRegAddToRip(pIemCpu, cbInstr);
3179 return rcStrict;
3180
3181}
3182
3183
3184/**
3185 * Implements mov CRx,GReg.
3186 *
3187 * @param iCrReg The CRx register to write (valid).
3188 * @param iGReg The general register to load the DRx value from.
3189 */
3190IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3191{
3192 if (pIemCpu->uCpl != 0)
3193 return iemRaiseGeneralProtectionFault0(pIemCpu);
3194 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3195
3196 /*
3197 * Read the new value from the source register and call common worker.
3198 */
3199 uint64_t uNewCrX;
3200 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3201 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3202 else
3203 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3204 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3205}
3206
3207
3208/**
3209 * Implements 'LMSW r/m16'
3210 *
3211 * @param u16NewMsw The new value.
3212 */
3213IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3214{
3215 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3216
3217 if (pIemCpu->uCpl != 0)
3218 return iemRaiseGeneralProtectionFault0(pIemCpu);
3219 Assert(!pCtx->eflags.Bits.u1VM);
3220
3221 /*
3222 * Compose the new CR0 value and call common worker.
3223 */
3224 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3225 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3226 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3227}
3228
3229
3230/**
3231 * Implements 'CLTS'.
3232 */
3233IEM_CIMPL_DEF_0(iemCImpl_clts)
3234{
3235 if (pIemCpu->uCpl != 0)
3236 return iemRaiseGeneralProtectionFault0(pIemCpu);
3237
3238 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3239 uint64_t uNewCr0 = pCtx->cr0;
3240 uNewCr0 &= ~X86_CR0_TS;
3241 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3242}
3243
3244
3245/**
3246 * Implements mov GReg,DRx.
3247 *
3248 * @param iGReg The general register to store the DRx value in.
3249 * @param iDrReg The DRx register to read (0-7).
3250 */
3251IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3252{
3253 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3254
3255 /*
3256 * Check preconditions.
3257 */
3258
3259 /* Raise GPs. */
3260 if (pIemCpu->uCpl != 0)
3261 return iemRaiseGeneralProtectionFault0(pIemCpu);
3262 Assert(!pCtx->eflags.Bits.u1VM);
3263
3264 if ( (iDrReg == 4 || iDrReg == 5)
3265 && (pCtx->cr4 & X86_CR4_DE) )
3266 {
3267 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3268 return iemRaiseGeneralProtectionFault0(pIemCpu);
3269 }
3270
3271 /* Raise #DB if general access detect is enabled. */
3272 if (pCtx->dr[7] & X86_DR7_GD)
3273 {
3274 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3275 return iemRaiseDebugException(pIemCpu);
3276 }
3277
3278 /*
3279 * Read the debug register and store it in the specified general register.
3280 */
3281 uint64_t drX;
3282 switch (iDrReg)
3283 {
3284 case 0: drX = pCtx->dr[0]; break;
3285 case 1: drX = pCtx->dr[1]; break;
3286 case 2: drX = pCtx->dr[2]; break;
3287 case 3: drX = pCtx->dr[3]; break;
3288 case 6:
3289 case 4:
3290 drX = pCtx->dr[6];
3291 drX &= ~RT_BIT_32(12);
3292 drX |= UINT32_C(0xffff0ff0);
3293 break;
3294 case 7:
3295 case 5:
3296 drX = pCtx->dr[7];
3297 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3298 drX |= RT_BIT_32(10);
3299 break;
3300 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3301 }
3302
3303 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3304 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3305 else
3306 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3307
3308 iemRegAddToRip(pIemCpu, cbInstr);
3309 return VINF_SUCCESS;
3310}
3311
3312
3313/**
3314 * Implements mov DRx,GReg.
3315 *
3316 * @param iDrReg The DRx register to write (valid).
3317 * @param iGReg The general register to load the DRx value from.
3318 */
3319IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3320{
3321 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3322
3323 /*
3324 * Check preconditions.
3325 */
3326 if (pIemCpu->uCpl != 0)
3327 return iemRaiseGeneralProtectionFault0(pIemCpu);
3328 Assert(!pCtx->eflags.Bits.u1VM);
3329
3330 if ( (iDrReg == 4 || iDrReg == 5)
3331 && (pCtx->cr4 & X86_CR4_DE) )
3332 {
3333 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3334 return iemRaiseGeneralProtectionFault0(pIemCpu);
3335 }
3336
3337 /* Raise #DB if general access detect is enabled. */
3338 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3339 * \#GP? */
3340 if (pCtx->dr[7] & X86_DR7_GD)
3341 {
3342 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3343 return iemRaiseDebugException(pIemCpu);
3344 }
3345
3346 /*
3347 * Read the new value from the source register.
3348 */
3349 uint64_t uNewDrX;
3350 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3351 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3352 else
3353 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3354
3355 /*
3356 * Adjust it.
3357 */
3358 switch (iDrReg)
3359 {
3360 case 0:
3361 case 1:
3362 case 2:
3363 case 3:
3364 /* nothing to adjust */
3365 break;
3366
3367 case 6:
3368 case 4:
3369 if (uNewDrX & UINT64_C(0xffffffff00000000))
3370 {
3371 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3372 return iemRaiseGeneralProtectionFault0(pIemCpu);
3373 }
3374 uNewDrX &= ~RT_BIT_32(12);
3375 uNewDrX |= UINT32_C(0xffff0ff0);
3376 break;
3377
3378 case 7:
3379 case 5:
3380 if (uNewDrX & UINT64_C(0xffffffff00000000))
3381 {
3382 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3383 return iemRaiseGeneralProtectionFault0(pIemCpu);
3384 }
3385 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3386 uNewDrX |= RT_BIT_32(10);
3387 break;
3388
3389 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3390 }
3391
3392 /*
3393 * Do the actual setting.
3394 */
3395 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3396 {
3397 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3398 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3399 }
3400 else
3401 pCtx->dr[iDrReg] = uNewDrX;
3402
3403 iemRegAddToRip(pIemCpu, cbInstr);
3404 return VINF_SUCCESS;
3405}
3406
3407
3408/**
3409 * Implements 'INVLPG m'.
3410 *
3411 * @param GCPtrPage The effective address of the page to invalidate.
3412 * @remarks Updates the RIP.
3413 */
3414IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3415{
3416 /* ring-0 only. */
3417 if (pIemCpu->uCpl != 0)
3418 return iemRaiseGeneralProtectionFault0(pIemCpu);
3419 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3420
3421 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3422 iemRegAddToRip(pIemCpu, cbInstr);
3423
3424 if ( rc == VINF_SUCCESS
3425 || rc == VINF_PGM_SYNC_CR3)
3426 return VINF_SUCCESS;
3427 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3428 return rc;
3429}
3430
3431
3432/**
3433 * Implements RDTSC.
3434 */
3435IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3436{
3437 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3438
3439 /*
3440 * Check preconditions.
3441 */
3442 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3443 return iemRaiseUndefinedOpcode(pIemCpu);
3444
3445 if ( (pCtx->cr4 & X86_CR4_TSD)
3446 && pIemCpu->uCpl != 0)
3447 {
3448 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3449 return iemRaiseGeneralProtectionFault0(pIemCpu);
3450 }
3451
3452 /*
3453 * Do the job.
3454 */
3455 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3456 pCtx->rax = (uint32_t)uTicks;
3457 pCtx->rdx = uTicks >> 32;
3458#ifdef IEM_VERIFICATION_MODE
3459 pIemCpu->fIgnoreRaxRdx = true;
3460#endif
3461
3462 iemRegAddToRip(pIemCpu, cbInstr);
3463 return VINF_SUCCESS;
3464}
3465
3466
3467/**
3468 * Implements RDMSR.
3469 */
3470IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3471{
3472 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3473
3474 /*
3475 * Check preconditions.
3476 */
3477 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3478 return iemRaiseUndefinedOpcode(pIemCpu);
3479 if (pIemCpu->uCpl != 0)
3480 return iemRaiseGeneralProtectionFault0(pIemCpu);
3481
3482 /*
3483 * Do the job.
3484 */
3485 RTUINT64U uValue;
3486 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3487 if (rc != VINF_SUCCESS)
3488 {
3489 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3490 return iemRaiseGeneralProtectionFault0(pIemCpu);
3491 }
3492
3493 pCtx->rax = uValue.au32[0];
3494 pCtx->rdx = uValue.au32[1];
3495
3496 iemRegAddToRip(pIemCpu, cbInstr);
3497 return VINF_SUCCESS;
3498}
3499
3500
3501/**
3502 * Implements 'IN eAX, port'.
3503 *
3504 * @param u16Port The source port.
3505 * @param cbReg The register size.
3506 */
3507IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3508{
3509 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3510
3511 /*
3512 * CPL check
3513 */
3514 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3515 if (rcStrict != VINF_SUCCESS)
3516 return rcStrict;
3517
3518 /*
3519 * Perform the I/O.
3520 */
3521 uint32_t u32Value;
3522 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3523 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
3524 else
3525 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3526 if (IOM_SUCCESS(rcStrict))
3527 {
3528 switch (cbReg)
3529 {
3530 case 1: pCtx->al = (uint8_t)u32Value; break;
3531 case 2: pCtx->ax = (uint16_t)u32Value; break;
3532 case 4: pCtx->rax = u32Value; break;
3533 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3534 }
3535 iemRegAddToRip(pIemCpu, cbInstr);
3536 pIemCpu->cPotentialExits++;
3537 }
3538 /** @todo massage rcStrict. */
3539 return rcStrict;
3540}
3541
3542
3543/**
3544 * Implements 'IN eAX, DX'.
3545 *
3546 * @param cbReg The register size.
3547 */
3548IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3549{
3550 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3551}
3552
3553
3554/**
3555 * Implements 'OUT port, eAX'.
3556 *
3557 * @param u16Port The destination port.
3558 * @param cbReg The register size.
3559 */
3560IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3561{
3562 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3563
3564 /*
3565 * CPL check
3566 */
3567 if ( (pCtx->cr0 & X86_CR0_PE)
3568 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3569 || pCtx->eflags.Bits.u1VM) )
3570 {
3571 /** @todo I/O port permission bitmap check */
3572 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
3573 }
3574
3575 /*
3576 * Perform the I/O.
3577 */
3578 uint32_t u32Value;
3579 switch (cbReg)
3580 {
3581 case 1: u32Value = pCtx->al; break;
3582 case 2: u32Value = pCtx->ax; break;
3583 case 4: u32Value = pCtx->eax; break;
3584 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3585 }
3586 VBOXSTRICTRC rc;
3587 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3588 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
3589 else
3590 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3591 if (IOM_SUCCESS(rc))
3592 {
3593 iemRegAddToRip(pIemCpu, cbInstr);
3594 pIemCpu->cPotentialExits++;
3595 /** @todo massage rc. */
3596 }
3597 return rc;
3598}
3599
3600
3601/**
3602 * Implements 'OUT DX, eAX'.
3603 *
3604 * @param cbReg The register size.
3605 */
3606IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3607{
3608 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3609}
3610
3611
3612/**
3613 * Implements 'CLI'.
3614 */
3615IEM_CIMPL_DEF_0(iemCImpl_cli)
3616{
3617 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3618
3619 if (pCtx->cr0 & X86_CR0_PE)
3620 {
3621 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3622 if (!pCtx->eflags.Bits.u1VM)
3623 {
3624 if (pIemCpu->uCpl <= uIopl)
3625 pCtx->eflags.Bits.u1IF = 0;
3626 else if ( pIemCpu->uCpl == 3
3627 && (pCtx->cr4 & X86_CR4_PVI) )
3628 pCtx->eflags.Bits.u1VIF = 0;
3629 else
3630 return iemRaiseGeneralProtectionFault0(pIemCpu);
3631 }
3632 /* V8086 */
3633 else if (uIopl == 3)
3634 pCtx->eflags.Bits.u1IF = 0;
3635 else if ( uIopl < 3
3636 && (pCtx->cr4 & X86_CR4_VME) )
3637 pCtx->eflags.Bits.u1VIF = 0;
3638 else
3639 return iemRaiseGeneralProtectionFault0(pIemCpu);
3640 }
3641 /* real mode */
3642 else
3643 pCtx->eflags.Bits.u1IF = 0;
3644 iemRegAddToRip(pIemCpu, cbInstr);
3645 return VINF_SUCCESS;
3646}
3647
3648
3649/**
3650 * Implements 'STI'.
3651 */
3652IEM_CIMPL_DEF_0(iemCImpl_sti)
3653{
3654 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3655
3656 if (pCtx->cr0 & X86_CR0_PE)
3657 {
3658 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3659 if (!pCtx->eflags.Bits.u1VM)
3660 {
3661 if (pIemCpu->uCpl <= uIopl)
3662 pCtx->eflags.Bits.u1IF = 1;
3663 else if ( pIemCpu->uCpl == 3
3664 && (pCtx->cr4 & X86_CR4_PVI)
3665 && !pCtx->eflags.Bits.u1VIP )
3666 pCtx->eflags.Bits.u1VIF = 1;
3667 else
3668 return iemRaiseGeneralProtectionFault0(pIemCpu);
3669 }
3670 /* V8086 */
3671 else if (uIopl == 3)
3672 pCtx->eflags.Bits.u1IF = 1;
3673 else if ( uIopl < 3
3674 && (pCtx->cr4 & X86_CR4_VME)
3675 && !pCtx->eflags.Bits.u1VIP )
3676 pCtx->eflags.Bits.u1VIF = 1;
3677 else
3678 return iemRaiseGeneralProtectionFault0(pIemCpu);
3679 }
3680 /* real mode */
3681 else
3682 pCtx->eflags.Bits.u1IF = 1;
3683
3684 iemRegAddToRip(pIemCpu, cbInstr);
3685 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3686 return VINF_SUCCESS;
3687}
3688
3689
3690/**
3691 * Implements 'HLT'.
3692 */
3693IEM_CIMPL_DEF_0(iemCImpl_hlt)
3694{
3695 if (pIemCpu->uCpl != 0)
3696 return iemRaiseGeneralProtectionFault0(pIemCpu);
3697 iemRegAddToRip(pIemCpu, cbInstr);
3698 return VINF_EM_HALT;
3699}
3700
3701
3702/**
3703 * Implements 'CPUID'.
3704 */
3705IEM_CIMPL_DEF_0(iemCImpl_cpuid)
3706{
3707 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3708
3709 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
3710 pCtx->rax &= UINT32_C(0xffffffff);
3711 pCtx->rbx &= UINT32_C(0xffffffff);
3712 pCtx->rcx &= UINT32_C(0xffffffff);
3713 pCtx->rdx &= UINT32_C(0xffffffff);
3714
3715 iemRegAddToRip(pIemCpu, cbInstr);
3716 return VINF_SUCCESS;
3717}
3718
3719
3720/**
3721 * Implements 'AAD'.
3722 *
3723 * @param enmEffOpSize The effective operand size.
3724 */
3725IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
3726{
3727 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3728
3729 uint16_t const ax = pCtx->ax;
3730 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
3731 pCtx->ax = al;
3732 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3733 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3734 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3735
3736 iemRegAddToRip(pIemCpu, cbInstr);
3737 return VINF_SUCCESS;
3738}
3739
3740
3741/**
3742 * Implements 'AAM'.
3743 *
3744 * @param bImm The immediate operand. Cannot be 0.
3745 */
3746IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
3747{
3748 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3749 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
3750
3751 uint16_t const ax = pCtx->ax;
3752 uint8_t const al = (uint8_t)ax % bImm;
3753 uint8_t const ah = (uint8_t)ax / bImm;
3754 pCtx->ax = (ah << 8) + al;
3755 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3756 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3757 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3758
3759 iemRegAddToRip(pIemCpu, cbInstr);
3760 return VINF_SUCCESS;
3761}
3762
3763
3764
3765
3766/*
3767 * Instantiate the various string operation combinations.
3768 */
3769#define OP_SIZE 8
3770#define ADDR_SIZE 16
3771#include "IEMAllCImplStrInstr.cpp.h"
3772#define OP_SIZE 8
3773#define ADDR_SIZE 32
3774#include "IEMAllCImplStrInstr.cpp.h"
3775#define OP_SIZE 8
3776#define ADDR_SIZE 64
3777#include "IEMAllCImplStrInstr.cpp.h"
3778
3779#define OP_SIZE 16
3780#define ADDR_SIZE 16
3781#include "IEMAllCImplStrInstr.cpp.h"
3782#define OP_SIZE 16
3783#define ADDR_SIZE 32
3784#include "IEMAllCImplStrInstr.cpp.h"
3785#define OP_SIZE 16
3786#define ADDR_SIZE 64
3787#include "IEMAllCImplStrInstr.cpp.h"
3788
3789#define OP_SIZE 32
3790#define ADDR_SIZE 16
3791#include "IEMAllCImplStrInstr.cpp.h"
3792#define OP_SIZE 32
3793#define ADDR_SIZE 32
3794#include "IEMAllCImplStrInstr.cpp.h"
3795#define OP_SIZE 32
3796#define ADDR_SIZE 64
3797#include "IEMAllCImplStrInstr.cpp.h"
3798
3799#define OP_SIZE 64
3800#define ADDR_SIZE 32
3801#include "IEMAllCImplStrInstr.cpp.h"
3802#define OP_SIZE 64
3803#define ADDR_SIZE 64
3804#include "IEMAllCImplStrInstr.cpp.h"
3805
3806
3807/**
3808 * Implements 'FINIT' and 'FNINIT'.
3809 *
3810 * @param fCheckXcpts Whether to check for umasked pending exceptions or
3811 * not.
3812 */
3813IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
3814{
3815 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3816
3817 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
3818 return iemRaiseDeviceNotAvailable(pIemCpu);
3819
3820 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
3821 if (fCheckXcpts && TODO )
3822 return iemRaiseMathFault(pIemCpu);
3823 */
3824
3825 if (iemFRegIsFxSaveFormat(pIemCpu))
3826 {
3827 pCtx->fpu.FCW = 0x37f;
3828 pCtx->fpu.FSW = 0;
3829 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
3830 pCtx->fpu.FPUDP = 0;
3831 pCtx->fpu.DS = 0; //??
3832 pCtx->fpu.FPUIP = 0;
3833 pCtx->fpu.CS = 0; //??
3834 pCtx->fpu.FOP = 0;
3835 }
3836 else
3837 {
3838 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
3839 pFpu->FCW = 0x37f;
3840 pFpu->FSW = 0;
3841 pFpu->FTW = 0xffff; /* 11 - empty */
3842 pFpu->FPUOO = 0; //??
3843 pFpu->FPUOS = 0; //??
3844 pFpu->FPUIP = 0;
3845 pFpu->CS = 0; //??
3846 pFpu->FOP = 0;
3847 }
3848
3849 iemRegAddToRip(pIemCpu, cbInstr);
3850 return VINF_SUCCESS;
3851}
3852
3853
3854/**
3855 * Implements 'FXSAVE'.
3856 *
3857 * @param iEffSeg The effective segment.
3858 * @param GCPtrEff The address of the image.
3859 * @param enmEffOpSize The operand size (only REX.W really matters).
3860 */
3861IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3862{
3863 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3864
3865 /*
3866 * Raise exceptions.
3867 */
3868 if (pCtx->cr0 & X86_CR0_EM)
3869 return iemRaiseUndefinedOpcode(pIemCpu);
3870 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3871 return iemRaiseDeviceNotAvailable(pIemCpu);
3872 if (GCPtrEff & 15)
3873 {
3874 /** @todo CPU/VM detection possible! \#AC might not be signal for
3875 * all/any misalignment sizes, intel says its an implementation detail. */
3876 if ( (pCtx->cr0 & X86_CR0_AM)
3877 && pCtx->eflags.Bits.u1AC
3878 && pIemCpu->uCpl == 3)
3879 return iemRaiseAlignmentCheckException(pIemCpu);
3880 return iemRaiseGeneralProtectionFault0(pIemCpu);
3881 }
3882 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3883
3884 /*
3885 * Access the memory.
3886 */
3887 void *pvMem512;
3888 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3889 if (rcStrict != VINF_SUCCESS)
3890 return rcStrict;
3891 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
3892
3893 /*
3894 * Store the registers.
3895 */
3896 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
3897 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
3898
3899 /* common for all formats */
3900 pDst->FCW = pCtx->fpu.FCW;
3901 pDst->FSW = pCtx->fpu.FSW;
3902 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
3903 pDst->FOP = pCtx->fpu.FOP;
3904 pDst->MXCSR = pCtx->fpu.MXCSR;
3905 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
3906 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
3907 {
3908 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
3909 * them for now... */
3910 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
3911 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
3912 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
3913 pDst->aRegs[i].au32[3] = 0;
3914 }
3915
3916 /* FPU IP, CS, DP and DS. */
3917 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
3918 * state information. :-/
3919 * Storing zeros now to prevent any potential leakage of host info. */
3920 pDst->FPUIP = 0;
3921 pDst->CS = 0;
3922 pDst->Rsrvd1 = 0;
3923 pDst->FPUDP = 0;
3924 pDst->DS = 0;
3925 pDst->Rsrvd2 = 0;
3926
3927 /* XMM registers. */
3928 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
3929 || pIemCpu->enmCpuMode != IEMMODE_64BIT
3930 || pIemCpu->uCpl != 0)
3931 {
3932 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
3933 for (uint32_t i = 0; i < cXmmRegs; i++)
3934 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
3935 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
3936 * right? */
3937 }
3938
3939 /*
3940 * Commit the memory.
3941 */
3942 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3943 if (rcStrict != VINF_SUCCESS)
3944 return rcStrict;
3945
3946 iemRegAddToRip(pIemCpu, cbInstr);
3947 return VINF_SUCCESS;
3948}
3949
3950
3951/**
3952 * Implements 'FXRSTOR'.
3953 *
3954 * @param GCPtrEff The address of the image.
3955 * @param enmEffOpSize The operand size (only REX.W really matters).
3956 */
3957IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3958{
3959 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3960
3961 /*
3962 * Raise exceptions.
3963 */
3964 if (pCtx->cr0 & X86_CR0_EM)
3965 return iemRaiseUndefinedOpcode(pIemCpu);
3966 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3967 return iemRaiseDeviceNotAvailable(pIemCpu);
3968 if (GCPtrEff & 15)
3969 {
3970 /** @todo CPU/VM detection possible! \#AC might not be signal for
3971 * all/any misalignment sizes, intel says its an implementation detail. */
3972 if ( (pCtx->cr0 & X86_CR0_AM)
3973 && pCtx->eflags.Bits.u1AC
3974 && pIemCpu->uCpl == 3)
3975 return iemRaiseAlignmentCheckException(pIemCpu);
3976 return iemRaiseGeneralProtectionFault0(pIemCpu);
3977 }
3978 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3979
3980 /*
3981 * Access the memory.
3982 */
3983 void *pvMem512;
3984 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
3985 if (rcStrict != VINF_SUCCESS)
3986 return rcStrict;
3987 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
3988
3989 /*
3990 * Check the state for stuff which will GP(0).
3991 */
3992 uint32_t const fMXCSR = pSrc->MXCSR;
3993 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
3994 if (fMXCSR & ~fMXCSR_MASK)
3995 {
3996 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
3997 return iemRaiseGeneralProtectionFault0(pIemCpu);
3998 }
3999
4000 /*
4001 * Load the registers.
4002 */
4003 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4004 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4005
4006 /* common for all formats */
4007 pCtx->fpu.FCW = pSrc->FCW;
4008 pCtx->fpu.FSW = pSrc->FSW;
4009 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4010 pCtx->fpu.FOP = pSrc->FOP;
4011 pCtx->fpu.MXCSR = fMXCSR;
4012 /* (MXCSR_MASK is read-only) */
4013 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4014 {
4015 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4016 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4017 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4018 pCtx->fpu.aRegs[i].au32[3] = 0;
4019 }
4020
4021 /* FPU IP, CS, DP and DS. */
4022 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4023 {
4024 pCtx->fpu.FPUIP = pSrc->FPUIP;
4025 pCtx->fpu.CS = pSrc->CS;
4026 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4027 pCtx->fpu.FPUDP = pSrc->FPUDP;
4028 pCtx->fpu.DS = pSrc->DS;
4029 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4030 }
4031 else
4032 {
4033 pCtx->fpu.FPUIP = pSrc->FPUIP;
4034 pCtx->fpu.CS = pSrc->CS;
4035 pCtx->fpu.Rsrvd1 = 0;
4036 pCtx->fpu.FPUDP = pSrc->FPUDP;
4037 pCtx->fpu.DS = pSrc->DS;
4038 pCtx->fpu.Rsrvd2 = 0;
4039 }
4040
4041 /* XMM registers. */
4042 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4043 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4044 || pIemCpu->uCpl != 0)
4045 {
4046 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4047 for (uint32_t i = 0; i < cXmmRegs; i++)
4048 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4049 }
4050
4051 /*
4052 * Commit the memory.
4053 */
4054 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4055 if (rcStrict != VINF_SUCCESS)
4056 return rcStrict;
4057
4058 iemRegAddToRip(pIemCpu, cbInstr);
4059 return VINF_SUCCESS;
4060}
4061
4062/** @} */
4063
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette