VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 40184

Last change on this file since 40184 was 40184, checked in by vboxsync, 13 years ago

a little bug fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 133.3 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 40184 2012-02-20 15:34:02Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
42 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
43 }
44 return VINF_SUCCESS;
45}
46
47
48#if 0
49/**
50 * Calculates the parity bit.
51 *
52 * @returns true if the bit is set, false if not.
53 * @param u8Result The least significant byte of the result.
54 */
55static bool iemHlpCalcParityFlag(uint8_t u8Result)
56{
57 /*
58 * Parity is set if the number of bits in the least significant byte of
59 * the result is even.
60 */
61 uint8_t cBits;
62 cBits = u8Result & 1; /* 0 */
63 u8Result >>= 1;
64 cBits += u8Result & 1;
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1; /* 4 */
71 u8Result >>= 1;
72 cBits += u8Result & 1;
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 return !(cBits & 1);
78}
79#endif /* not used */
80
81
82/**
83 * Updates the specified flags according to a 8-bit result.
84 *
85 * @param pIemCpu The.
86 * @param u8Result The result to set the flags according to.
87 * @param fToUpdate The flags to update.
88 * @param fUndefined The flags that are specified as undefined.
89 */
90static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
91{
92 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
93
94 uint32_t fEFlags = pCtx->eflags.u;
95 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
96 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
97 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
98}
99
100
101/**
102 * Loads a NULL data selector into a selector register, both the hidden and
103 * visible parts, in protected mode.
104 *
105 * @param puSel The selector register.
106 * @param pHid The hidden register part.
107 */
108static void iemHlpLoadNullDataSelectorProt(PRTSEL puSel, PCPUMSELREGHID pHid)
109{
110 /** @todo Testcase: write a testcase checking what happends when loading a NULL
111 * data selector in protected mode. */
112 pHid->u64Base = 0;
113 pHid->u32Limit = 0;
114 pHid->Attr.u = 0;
115 *puSel = 0;
116}
117
118
119/**
120 * Helper used by iret.
121 *
122 * @param uCpl The new CPL.
123 * @param puSel The selector register.
124 * @param pHid The corresponding hidden register.
125 */
126static void iemHlpAdjustSelectorForNewCpl(uint8_t uCpl, PRTSEL puSel, PCPUMSELREGHID pHid)
127{
128 if ( uCpl > pHid->Attr.n.u2Dpl
129 && pHid->Attr.n.u1DescType /* code or data, not system */
130 && (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
131 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
132 iemHlpLoadNullDataSelectorProt(puSel, pHid);
133}
134
135
136/** @} */
137
138/** @name C Implementations
139 * @{
140 */
141
142/**
143 * Implements a 16-bit popa.
144 */
145IEM_CIMPL_DEF_0(iemCImpl_popa_16)
146{
147 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
148 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
149 RTGCPTR GCPtrLast = GCPtrStart + 15;
150 VBOXSTRICTRC rcStrict;
151
152 /*
153 * The docs are a bit hard to comprehend here, but it looks like we wrap
154 * around in real mode as long as none of the individual "popa" crosses the
155 * end of the stack segment. In protected mode we check the whole access
156 * in one go. For efficiency, only do the word-by-word thing if we're in
157 * danger of wrapping around.
158 */
159 /** @todo do popa boundary / wrap-around checks. */
160 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
161 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
162 {
163 /* word-by-word */
164 RTUINT64U TmpRsp;
165 TmpRsp.u = pCtx->rsp;
166 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
167 if (rcStrict == VINF_SUCCESS)
168 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
169 if (rcStrict == VINF_SUCCESS)
170 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
171 if (rcStrict == VINF_SUCCESS)
172 {
173 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
174 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
175 }
176 if (rcStrict == VINF_SUCCESS)
177 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
178 if (rcStrict == VINF_SUCCESS)
179 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
180 if (rcStrict == VINF_SUCCESS)
181 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
182 if (rcStrict == VINF_SUCCESS)
183 {
184 pCtx->rsp = TmpRsp.u;
185 iemRegAddToRip(pIemCpu, cbInstr);
186 }
187 }
188 else
189 {
190 uint16_t const *pa16Mem = NULL;
191 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
192 if (rcStrict == VINF_SUCCESS)
193 {
194 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
195 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
196 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
197 /* skip sp */
198 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
199 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
200 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
201 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
202 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
203 if (rcStrict == VINF_SUCCESS)
204 {
205 iemRegAddToRsp(pCtx, 16);
206 iemRegAddToRip(pIemCpu, cbInstr);
207 }
208 }
209 }
210 return rcStrict;
211}
212
213
214/**
215 * Implements a 32-bit popa.
216 */
217IEM_CIMPL_DEF_0(iemCImpl_popa_32)
218{
219 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
220 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
221 RTGCPTR GCPtrLast = GCPtrStart + 31;
222 VBOXSTRICTRC rcStrict;
223
224 /*
225 * The docs are a bit hard to comprehend here, but it looks like we wrap
226 * around in real mode as long as none of the individual "popa" crosses the
227 * end of the stack segment. In protected mode we check the whole access
228 * in one go. For efficiency, only do the word-by-word thing if we're in
229 * danger of wrapping around.
230 */
231 /** @todo do popa boundary / wrap-around checks. */
232 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
233 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
234 {
235 /* word-by-word */
236 RTUINT64U TmpRsp;
237 TmpRsp.u = pCtx->rsp;
238 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
239 if (rcStrict == VINF_SUCCESS)
240 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
241 if (rcStrict == VINF_SUCCESS)
242 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
243 if (rcStrict == VINF_SUCCESS)
244 {
245 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
246 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
247 }
248 if (rcStrict == VINF_SUCCESS)
249 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
250 if (rcStrict == VINF_SUCCESS)
251 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
252 if (rcStrict == VINF_SUCCESS)
253 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
254 if (rcStrict == VINF_SUCCESS)
255 {
256#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
257 pCtx->rdi &= UINT32_MAX;
258 pCtx->rsi &= UINT32_MAX;
259 pCtx->rbp &= UINT32_MAX;
260 pCtx->rbx &= UINT32_MAX;
261 pCtx->rdx &= UINT32_MAX;
262 pCtx->rcx &= UINT32_MAX;
263 pCtx->rax &= UINT32_MAX;
264#endif
265 pCtx->rsp = TmpRsp.u;
266 iemRegAddToRip(pIemCpu, cbInstr);
267 }
268 }
269 else
270 {
271 uint32_t const *pa32Mem;
272 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
273 if (rcStrict == VINF_SUCCESS)
274 {
275 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
276 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
277 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
278 /* skip esp */
279 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
280 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
281 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
282 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
283 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
284 if (rcStrict == VINF_SUCCESS)
285 {
286 iemRegAddToRsp(pCtx, 32);
287 iemRegAddToRip(pIemCpu, cbInstr);
288 }
289 }
290 }
291 return rcStrict;
292}
293
294
295/**
296 * Implements a 16-bit pusha.
297 */
298IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
299{
300 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
301 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
302 RTGCPTR GCPtrBottom = GCPtrTop - 15;
303 VBOXSTRICTRC rcStrict;
304
305 /*
306 * The docs are a bit hard to comprehend here, but it looks like we wrap
307 * around in real mode as long as none of the individual "pushd" crosses the
308 * end of the stack segment. In protected mode we check the whole access
309 * in one go. For efficiency, only do the word-by-word thing if we're in
310 * danger of wrapping around.
311 */
312 /** @todo do pusha boundary / wrap-around checks. */
313 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
314 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
315 {
316 /* word-by-word */
317 RTUINT64U TmpRsp;
318 TmpRsp.u = pCtx->rsp;
319 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
320 if (rcStrict == VINF_SUCCESS)
321 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
322 if (rcStrict == VINF_SUCCESS)
323 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
324 if (rcStrict == VINF_SUCCESS)
325 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
326 if (rcStrict == VINF_SUCCESS)
327 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
328 if (rcStrict == VINF_SUCCESS)
329 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
330 if (rcStrict == VINF_SUCCESS)
331 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
332 if (rcStrict == VINF_SUCCESS)
333 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
334 if (rcStrict == VINF_SUCCESS)
335 {
336 pCtx->rsp = TmpRsp.u;
337 iemRegAddToRip(pIemCpu, cbInstr);
338 }
339 }
340 else
341 {
342 GCPtrBottom--;
343 uint16_t *pa16Mem = NULL;
344 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
345 if (rcStrict == VINF_SUCCESS)
346 {
347 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
348 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
349 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
350 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
351 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
352 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
353 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
354 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
355 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
356 if (rcStrict == VINF_SUCCESS)
357 {
358 iemRegSubFromRsp(pCtx, 16);
359 iemRegAddToRip(pIemCpu, cbInstr);
360 }
361 }
362 }
363 return rcStrict;
364}
365
366
367/**
368 * Implements a 32-bit pusha.
369 */
370IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
371{
372 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
373 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
374 RTGCPTR GCPtrBottom = GCPtrTop - 31;
375 VBOXSTRICTRC rcStrict;
376
377 /*
378 * The docs are a bit hard to comprehend here, but it looks like we wrap
379 * around in real mode as long as none of the individual "pusha" crosses the
380 * end of the stack segment. In protected mode we check the whole access
381 * in one go. For efficiency, only do the word-by-word thing if we're in
382 * danger of wrapping around.
383 */
384 /** @todo do pusha boundary / wrap-around checks. */
385 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
386 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
387 {
388 /* word-by-word */
389 RTUINT64U TmpRsp;
390 TmpRsp.u = pCtx->rsp;
391 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
392 if (rcStrict == VINF_SUCCESS)
393 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
394 if (rcStrict == VINF_SUCCESS)
395 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
396 if (rcStrict == VINF_SUCCESS)
397 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
398 if (rcStrict == VINF_SUCCESS)
399 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
400 if (rcStrict == VINF_SUCCESS)
401 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
402 if (rcStrict == VINF_SUCCESS)
403 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
404 if (rcStrict == VINF_SUCCESS)
405 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
406 if (rcStrict == VINF_SUCCESS)
407 {
408 pCtx->rsp = TmpRsp.u;
409 iemRegAddToRip(pIemCpu, cbInstr);
410 }
411 }
412 else
413 {
414 GCPtrBottom--;
415 uint32_t *pa32Mem;
416 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
417 if (rcStrict == VINF_SUCCESS)
418 {
419 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
420 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
421 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
422 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
423 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
424 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
425 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
426 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
427 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
428 if (rcStrict == VINF_SUCCESS)
429 {
430 iemRegSubFromRsp(pCtx, 32);
431 iemRegAddToRip(pIemCpu, cbInstr);
432 }
433 }
434 }
435 return rcStrict;
436}
437
438
439/**
440 * Implements pushf.
441 *
442 *
443 * @param enmEffOpSize The effective operand size.
444 */
445IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
446{
447 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
448
449 /*
450 * If we're in V8086 mode some care is required (which is why we're in
451 * doing this in a C implementation).
452 */
453 uint32_t fEfl = pCtx->eflags.u;
454 if ( (fEfl & X86_EFL_VM)
455 && X86_EFL_GET_IOPL(fEfl) != 3 )
456 {
457 Assert(pCtx->cr0 & X86_CR0_PE);
458 if ( enmEffOpSize != IEMMODE_16BIT
459 || !(pCtx->cr4 & X86_CR4_VME))
460 return iemRaiseGeneralProtectionFault0(pIemCpu);
461 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
462 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
463 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
464 }
465
466 /*
467 * Ok, clear RF and VM and push the flags.
468 */
469 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
470
471 VBOXSTRICTRC rcStrict;
472 switch (enmEffOpSize)
473 {
474 case IEMMODE_16BIT:
475 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
476 break;
477 case IEMMODE_32BIT:
478 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
479 break;
480 case IEMMODE_64BIT:
481 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
482 break;
483 IEM_NOT_REACHED_DEFAULT_CASE_RET();
484 }
485 if (rcStrict != VINF_SUCCESS)
486 return rcStrict;
487
488 iemRegAddToRip(pIemCpu, cbInstr);
489 return VINF_SUCCESS;
490}
491
492
493/**
494 * Implements popf.
495 *
496 * @param enmEffOpSize The effective operand size.
497 */
498IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
499{
500 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
501 uint32_t const fEflOld = pCtx->eflags.u;
502 VBOXSTRICTRC rcStrict;
503 uint32_t fEflNew;
504
505 /*
506 * V8086 is special as usual.
507 */
508 if (fEflOld & X86_EFL_VM)
509 {
510 /*
511 * Almost anything goes if IOPL is 3.
512 */
513 if (X86_EFL_GET_IOPL(fEflOld) == 3)
514 {
515 switch (enmEffOpSize)
516 {
517 case IEMMODE_16BIT:
518 {
519 uint16_t u16Value;
520 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
521 if (rcStrict != VINF_SUCCESS)
522 return rcStrict;
523 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
524 break;
525 }
526 case IEMMODE_32BIT:
527 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
528 if (rcStrict != VINF_SUCCESS)
529 return rcStrict;
530 break;
531 IEM_NOT_REACHED_DEFAULT_CASE_RET();
532 }
533
534 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
535 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
536 }
537 /*
538 * Interrupt flag virtualization with CR4.VME=1.
539 */
540 else if ( enmEffOpSize == IEMMODE_16BIT
541 && (pCtx->cr4 & X86_CR4_VME) )
542 {
543 uint16_t u16Value;
544 RTUINT64U TmpRsp;
545 TmpRsp.u = pCtx->rsp;
546 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
547 if (rcStrict != VINF_SUCCESS)
548 return rcStrict;
549
550 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
551 * or before? */
552 if ( ( (u16Value & X86_EFL_IF)
553 && (fEflOld & X86_EFL_VIP))
554 || (u16Value & X86_EFL_TF) )
555 return iemRaiseGeneralProtectionFault0(pIemCpu);
556
557 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
558 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
559 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
560 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
561
562 pCtx->rsp = TmpRsp.u;
563 }
564 else
565 return iemRaiseGeneralProtectionFault0(pIemCpu);
566
567 }
568 /*
569 * Not in V8086 mode.
570 */
571 else
572 {
573 /* Pop the flags. */
574 switch (enmEffOpSize)
575 {
576 case IEMMODE_16BIT:
577 {
578 uint16_t u16Value;
579 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
580 if (rcStrict != VINF_SUCCESS)
581 return rcStrict;
582 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
583 break;
584 }
585 case IEMMODE_32BIT:
586 case IEMMODE_64BIT:
587 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
588 if (rcStrict != VINF_SUCCESS)
589 return rcStrict;
590 break;
591 IEM_NOT_REACHED_DEFAULT_CASE_RET();
592 }
593
594 /* Merge them with the current flags. */
595 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
596 || pIemCpu->uCpl == 0)
597 {
598 fEflNew &= X86_EFL_POPF_BITS;
599 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
600 }
601 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
602 {
603 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
604 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
605 }
606 else
607 {
608 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
609 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
610 }
611 }
612
613 /*
614 * Commit the flags.
615 */
616 Assert(fEflNew & RT_BIT_32(1));
617 pCtx->eflags.u = fEflNew;
618 iemRegAddToRip(pIemCpu, cbInstr);
619
620 return VINF_SUCCESS;
621}
622
623
624/**
625 * Implements an indirect call.
626 *
627 * @param uNewPC The new program counter (RIP) value (loaded from the
628 * operand).
629 * @param enmEffOpSize The effective operand size.
630 */
631IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
632{
633 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
634 uint16_t uOldPC = pCtx->ip + cbInstr;
635 if (uNewPC > pCtx->csHid.u32Limit)
636 return iemRaiseGeneralProtectionFault0(pIemCpu);
637
638 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
639 if (rcStrict != VINF_SUCCESS)
640 return rcStrict;
641
642 pCtx->rip = uNewPC;
643 return VINF_SUCCESS;
644
645}
646
647
648/**
649 * Implements a 16-bit relative call.
650 *
651 * @param offDisp The displacment offset.
652 */
653IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
654{
655 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
656 uint16_t uOldPC = pCtx->ip + cbInstr;
657 uint16_t uNewPC = uOldPC + offDisp;
658 if (uNewPC > pCtx->csHid.u32Limit)
659 return iemRaiseGeneralProtectionFault0(pIemCpu);
660
661 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
662 if (rcStrict != VINF_SUCCESS)
663 return rcStrict;
664
665 pCtx->rip = uNewPC;
666 return VINF_SUCCESS;
667}
668
669
670/**
671 * Implements a 32-bit indirect call.
672 *
673 * @param uNewPC The new program counter (RIP) value (loaded from the
674 * operand).
675 * @param enmEffOpSize The effective operand size.
676 */
677IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
678{
679 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
680 uint32_t uOldPC = pCtx->eip + cbInstr;
681 if (uNewPC > pCtx->csHid.u32Limit)
682 return iemRaiseGeneralProtectionFault0(pIemCpu);
683
684 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
685 if (rcStrict != VINF_SUCCESS)
686 return rcStrict;
687
688 pCtx->rip = uNewPC;
689 return VINF_SUCCESS;
690
691}
692
693
694/**
695 * Implements a 32-bit relative call.
696 *
697 * @param offDisp The displacment offset.
698 */
699IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
700{
701 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
702 uint32_t uOldPC = pCtx->eip + cbInstr;
703 uint32_t uNewPC = uOldPC + offDisp;
704 if (uNewPC > pCtx->csHid.u32Limit)
705 return iemRaiseGeneralProtectionFault0(pIemCpu);
706
707 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
708 if (rcStrict != VINF_SUCCESS)
709 return rcStrict;
710
711 pCtx->rip = uNewPC;
712 return VINF_SUCCESS;
713}
714
715
716/**
717 * Implements a 64-bit indirect call.
718 *
719 * @param uNewPC The new program counter (RIP) value (loaded from the
720 * operand).
721 * @param enmEffOpSize The effective operand size.
722 */
723IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
724{
725 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
726 uint64_t uOldPC = pCtx->rip + cbInstr;
727 if (!IEM_IS_CANONICAL(uNewPC))
728 return iemRaiseGeneralProtectionFault0(pIemCpu);
729
730 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
731 if (rcStrict != VINF_SUCCESS)
732 return rcStrict;
733
734 pCtx->rip = uNewPC;
735 return VINF_SUCCESS;
736
737}
738
739
740/**
741 * Implements a 64-bit relative call.
742 *
743 * @param offDisp The displacment offset.
744 */
745IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
746{
747 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
748 uint64_t uOldPC = pCtx->rip + cbInstr;
749 uint64_t uNewPC = uOldPC + offDisp;
750 if (!IEM_IS_CANONICAL(uNewPC))
751 return iemRaiseNotCanonical(pIemCpu);
752
753 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
754 if (rcStrict != VINF_SUCCESS)
755 return rcStrict;
756
757 pCtx->rip = uNewPC;
758 return VINF_SUCCESS;
759}
760
761
762/**
763 * Implements far jumps and calls thru task segments (TSS).
764 *
765 * @param uSel The selector.
766 * @param enmBranch The kind of branching we're performing.
767 * @param enmEffOpSize The effective operand size.
768 * @param pDesc The descriptor corrsponding to @a uSel. The type is
769 * call gate.
770 */
771IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
772{
773 /* Call various functions to do the work. */
774 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
775}
776
777
778/**
779 * Implements far jumps and calls thru task gates.
780 *
781 * @param uSel The selector.
782 * @param enmBranch The kind of branching we're performing.
783 * @param enmEffOpSize The effective operand size.
784 * @param pDesc The descriptor corrsponding to @a uSel. The type is
785 * call gate.
786 */
787IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
788{
789 /* Call various functions to do the work. */
790 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
791}
792
793
794/**
795 * Implements far jumps and calls thru call gates.
796 *
797 * @param uSel The selector.
798 * @param enmBranch The kind of branching we're performing.
799 * @param enmEffOpSize The effective operand size.
800 * @param pDesc The descriptor corrsponding to @a uSel. The type is
801 * call gate.
802 */
803IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
804{
805 /* Call various functions to do the work. */
806 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
807}
808
809
810/**
811 * Implements far jumps and calls thru system selectors.
812 *
813 * @param uSel The selector.
814 * @param enmBranch The kind of branching we're performing.
815 * @param enmEffOpSize The effective operand size.
816 * @param pDesc The descriptor corrsponding to @a uSel.
817 */
818IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
819{
820 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
821 Assert((uSel & (X86_SEL_MASK | X86_SEL_LDT)));
822
823 if (IEM_IS_LONG_MODE(pIemCpu))
824 switch (pDesc->Legacy.Gen.u4Type)
825 {
826 case AMD64_SEL_TYPE_SYS_CALL_GATE:
827 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
828
829 default:
830 case AMD64_SEL_TYPE_SYS_LDT:
831 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
832 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
833 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
834 case AMD64_SEL_TYPE_SYS_INT_GATE:
835 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
836 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
837
838 }
839
840 switch (pDesc->Legacy.Gen.u4Type)
841 {
842 case X86_SEL_TYPE_SYS_286_CALL_GATE:
843 case X86_SEL_TYPE_SYS_386_CALL_GATE:
844 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
845
846 case X86_SEL_TYPE_SYS_TASK_GATE:
847 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
848
849 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
850 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
851 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
852
853 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
854 Log(("branch %04x -> busy 286 TSS\n", uSel));
855 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
856
857 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
858 Log(("branch %04x -> busy 386 TSS\n", uSel));
859 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
860
861 default:
862 case X86_SEL_TYPE_SYS_LDT:
863 case X86_SEL_TYPE_SYS_286_INT_GATE:
864 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
865 case X86_SEL_TYPE_SYS_386_INT_GATE:
866 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
867 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
868 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
869 }
870}
871
872
873/**
874 * Implements far jumps.
875 *
876 * @param uSel The selector.
877 * @param offSeg The segment offset.
878 * @param enmEffOpSize The effective operand size.
879 */
880IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
881{
882 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
883 NOREF(cbInstr);
884 Assert(offSeg <= UINT32_MAX);
885
886 /*
887 * Real mode and V8086 mode are easy. The only snag seems to be that
888 * CS.limit doesn't change and the limit check is done against the current
889 * limit.
890 */
891 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
892 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
893 {
894 if (offSeg > pCtx->csHid.u32Limit)
895 return iemRaiseGeneralProtectionFault0(pIemCpu);
896
897 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
898 pCtx->rip = offSeg;
899 else
900 pCtx->rip = offSeg & UINT16_MAX;
901 pCtx->cs = uSel;
902 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
903 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
904 * PE. Check with VT-x and AMD-V. */
905#ifdef IEM_VERIFICATION_MODE
906 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
907#endif
908 return VINF_SUCCESS;
909 }
910
911 /*
912 * Protected mode. Need to parse the specified descriptor...
913 */
914 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
915 {
916 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
917 return iemRaiseGeneralProtectionFault0(pIemCpu);
918 }
919
920 /* Fetch the descriptor. */
921 IEMSELDESC Desc;
922 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
923 if (rcStrict != VINF_SUCCESS)
924 return rcStrict;
925
926 /* Is it there? */
927 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
928 {
929 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
930 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
931 }
932
933 /*
934 * Deal with it according to its type. We do the standard code selectors
935 * here and dispatch the system selectors to worker functions.
936 */
937 if (!Desc.Legacy.Gen.u1DescType)
938 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
939
940 /* Only code segments. */
941 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
942 {
943 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
944 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
945 }
946
947 /* L vs D. */
948 if ( Desc.Legacy.Gen.u1Long
949 && Desc.Legacy.Gen.u1DefBig
950 && IEM_IS_LONG_MODE(pIemCpu))
951 {
952 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
953 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
954 }
955
956 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
957 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
958 {
959 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
960 {
961 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
962 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
963 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
964 }
965 }
966 else
967 {
968 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
969 {
970 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
971 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
972 }
973 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
974 {
975 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
976 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
977 }
978 }
979
980 /* Chop the high bits if 16-bit (Intel says so). */
981 if (enmEffOpSize == IEMMODE_16BIT)
982 offSeg &= UINT16_MAX;
983
984 /* Limit check. (Should alternatively check for non-canonical addresses
985 here, but that is ruled out by offSeg being 32-bit, right?) */
986 uint64_t u64Base;
987 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
988 if (Desc.Legacy.Gen.u1Granularity)
989 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
990 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
991 u64Base = 0;
992 else
993 {
994 if (offSeg > cbLimit)
995 {
996 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
997 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
998 }
999 u64Base = X86DESC_BASE(Desc.Legacy);
1000 }
1001
1002 /*
1003 * Ok, everything checked out fine. Now set the accessed bit before
1004 * committing the result into CS, CSHID and RIP.
1005 */
1006 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1007 {
1008 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1009 if (rcStrict != VINF_SUCCESS)
1010 return rcStrict;
1011#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1012 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1013#endif
1014 }
1015
1016 /* commit */
1017 pCtx->rip = offSeg;
1018 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1019 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1020 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1021 pCtx->csHid.u32Limit = cbLimit;
1022 pCtx->csHid.u64Base = u64Base;
1023 /** @todo check if the hidden bits are loaded correctly for 64-bit
1024 * mode. */
1025 return VINF_SUCCESS;
1026}
1027
1028
1029/**
1030 * Implements far calls.
1031 *
1032 * This very similar to iemCImpl_FarJmp.
1033 *
1034 * @param uSel The selector.
1035 * @param offSeg The segment offset.
1036 * @param enmEffOpSize The operand size (in case we need it).
1037 */
1038IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1039{
1040 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1041 VBOXSTRICTRC rcStrict;
1042 uint64_t uNewRsp;
1043 void *pvRet;
1044
1045 /*
1046 * Real mode and V8086 mode are easy. The only snag seems to be that
1047 * CS.limit doesn't change and the limit check is done against the current
1048 * limit.
1049 */
1050 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1051 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1052 {
1053 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1054
1055 /* Check stack first - may #SS(0). */
1056 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1057 &pvRet, &uNewRsp);
1058 if (rcStrict != VINF_SUCCESS)
1059 return rcStrict;
1060
1061 /* Check the target address range. */
1062 if (offSeg > UINT32_MAX)
1063 return iemRaiseGeneralProtectionFault0(pIemCpu);
1064
1065 /* Everything is fine, push the return address. */
1066 if (enmEffOpSize == IEMMODE_16BIT)
1067 {
1068 ((uint16_t *)pvRet)[0] = pCtx->ip + cbInstr;
1069 ((uint16_t *)pvRet)[1] = pCtx->cs;
1070 }
1071 else
1072 {
1073 ((uint32_t *)pvRet)[0] = pCtx->eip + cbInstr;
1074 ((uint16_t *)pvRet)[3] = pCtx->cs;
1075 }
1076 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pvRet, uNewRsp);
1077 if (rcStrict != VINF_SUCCESS)
1078 return rcStrict;
1079
1080 /* Branch. */
1081 pCtx->rip = offSeg;
1082 pCtx->cs = uSel;
1083 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
1084 /** @todo Does REM reset the accessed bit here too? (See on jmp far16
1085 * after disabling PE.) Check with VT-x and AMD-V. */
1086#ifdef IEM_VERIFICATION_MODE
1087 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
1088#endif
1089 return VINF_SUCCESS;
1090 }
1091
1092 /*
1093 * Protected mode. Need to parse the specified descriptor...
1094 */
1095 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
1096 {
1097 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1098 return iemRaiseGeneralProtectionFault0(pIemCpu);
1099 }
1100
1101 /* Fetch the descriptor. */
1102 IEMSELDESC Desc;
1103 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1104 if (rcStrict != VINF_SUCCESS)
1105 return rcStrict;
1106
1107 /*
1108 * Deal with it according to its type. We do the standard code selectors
1109 * here and dispatch the system selectors to worker functions.
1110 */
1111 if (!Desc.Legacy.Gen.u1DescType)
1112 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1113
1114 /* Only code segments. */
1115 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1116 {
1117 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1118 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1119 }
1120
1121 /* L vs D. */
1122 if ( Desc.Legacy.Gen.u1Long
1123 && Desc.Legacy.Gen.u1DefBig
1124 && IEM_IS_LONG_MODE(pIemCpu))
1125 {
1126 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1127 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1128 }
1129
1130 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1131 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1132 {
1133 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1134 {
1135 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1136 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1137 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1138 }
1139 }
1140 else
1141 {
1142 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1143 {
1144 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1145 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1146 }
1147 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1148 {
1149 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1150 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1151 }
1152 }
1153
1154 /* Is it there? */
1155 if (!Desc.Legacy.Gen.u1Present)
1156 {
1157 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1158 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1159 }
1160
1161 /* Check stack first - may #SS(0). */
1162 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1163 enmEffOpSize == IEMMODE_64BIT ? 8+2
1164 : enmEffOpSize == IEMMODE_32BIT ? 4+2 : 2+2,
1165 &pvRet, &uNewRsp);
1166 if (rcStrict != VINF_SUCCESS)
1167 return rcStrict;
1168
1169 /* Chop the high bits if 16-bit (Intel says so). */
1170 if (enmEffOpSize == IEMMODE_16BIT)
1171 offSeg &= UINT16_MAX;
1172
1173 /* Limit / canonical check. */
1174 uint64_t u64Base;
1175 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
1176 if (Desc.Legacy.Gen.u1Granularity)
1177 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1178
1179 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1180 {
1181 if (!IEM_IS_CANONICAL(offSeg))
1182 {
1183 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1184 return iemRaiseNotCanonical(pIemCpu);
1185 }
1186 u64Base = 0;
1187 }
1188 else
1189 {
1190 if (offSeg > cbLimit)
1191 {
1192 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1193 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1194 }
1195 u64Base = X86DESC_BASE(Desc.Legacy);
1196 }
1197
1198 /*
1199 * Now set the accessed bit before
1200 * writing the return address to the stack and committing the result into
1201 * CS, CSHID and RIP.
1202 */
1203 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1204 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1205 {
1206 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1207 if (rcStrict != VINF_SUCCESS)
1208 return rcStrict;
1209#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1210 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1211#endif
1212 }
1213
1214 /* stack */
1215 if (enmEffOpSize == IEMMODE_16BIT)
1216 {
1217 ((uint16_t *)pvRet)[0] = pCtx->ip + cbInstr;
1218 ((uint16_t *)pvRet)[1] = pCtx->cs;
1219 }
1220 else if (enmEffOpSize == IEMMODE_32BIT)
1221 {
1222 ((uint32_t *)pvRet)[0] = pCtx->eip + cbInstr;
1223 ((uint32_t *)pvRet)[1] = pCtx->cs;
1224 }
1225 else
1226 {
1227 ((uint64_t *)pvRet)[0] = pCtx->rip + cbInstr;
1228 ((uint64_t *)pvRet)[1] = pCtx->cs;
1229 }
1230 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pvRet, uNewRsp);
1231 if (rcStrict != VINF_SUCCESS)
1232 return rcStrict;
1233
1234 /* commit */
1235 pCtx->rip = offSeg;
1236 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1237 pCtx->cs |= pIemCpu->uCpl;
1238 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1239 pCtx->csHid.u32Limit = cbLimit;
1240 pCtx->csHid.u64Base = u64Base;
1241 /** @todo check if the hidden bits are loaded correctly for 64-bit
1242 * mode. */
1243 return VINF_SUCCESS;
1244}
1245
1246
1247/**
1248 * Implements retf.
1249 *
1250 * @param enmEffOpSize The effective operand size.
1251 * @param cbPop The amount of arguments to pop from the stack
1252 * (bytes).
1253 */
1254IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1255{
1256 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1257 VBOXSTRICTRC rcStrict;
1258 RTCPTRUNION uPtrFrame;
1259 uint64_t uNewRsp;
1260 uint64_t uNewRip;
1261 uint16_t uNewCs;
1262 NOREF(cbInstr);
1263
1264 /*
1265 * Read the stack values first.
1266 */
1267 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1268 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1269 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1270 if (rcStrict != VINF_SUCCESS)
1271 return rcStrict;
1272 if (enmEffOpSize == IEMMODE_16BIT)
1273 {
1274 uNewRip = uPtrFrame.pu16[0];
1275 uNewCs = uPtrFrame.pu16[1];
1276 }
1277 else if (enmEffOpSize == IEMMODE_32BIT)
1278 {
1279 uNewRip = uPtrFrame.pu32[0];
1280 uNewCs = uPtrFrame.pu16[2];
1281 }
1282 else
1283 {
1284 uNewRip = uPtrFrame.pu64[0];
1285 uNewCs = uPtrFrame.pu16[4];
1286 }
1287
1288 /*
1289 * Real mode and V8086 mode are easy.
1290 */
1291 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1292 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1293 {
1294 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1295 /** @todo check how this is supposed to work if sp=0xfffe. */
1296
1297 /* Check the limit of the new EIP. */
1298 /** @todo Intel pseudo code only does the limit check for 16-bit
1299 * operands, AMD does not make any distinction. What is right? */
1300 if (uNewRip > pCtx->csHid.u32Limit)
1301 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1302
1303 /* commit the operation. */
1304 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1305 if (rcStrict != VINF_SUCCESS)
1306 return rcStrict;
1307 pCtx->rip = uNewRip;
1308 pCtx->cs = uNewCs;
1309 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
1310 /** @todo do we load attribs and limit as well? */
1311 if (cbPop)
1312 iemRegAddToRsp(pCtx, cbPop);
1313 return VINF_SUCCESS;
1314 }
1315
1316 /*
1317 * Protected mode is complicated, of course.
1318 */
1319 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1320 {
1321 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1322 return iemRaiseGeneralProtectionFault0(pIemCpu);
1323 }
1324
1325 /* Fetch the descriptor. */
1326 IEMSELDESC DescCs;
1327 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1328 if (rcStrict != VINF_SUCCESS)
1329 return rcStrict;
1330
1331 /* Can only return to a code selector. */
1332 if ( !DescCs.Legacy.Gen.u1DescType
1333 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1334 {
1335 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1336 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1337 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1338 }
1339
1340 /* L vs D. */
1341 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1342 && DescCs.Legacy.Gen.u1DefBig
1343 && IEM_IS_LONG_MODE(pIemCpu))
1344 {
1345 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1346 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1347 }
1348
1349 /* DPL/RPL/CPL checks. */
1350 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1351 {
1352 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1353 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1354 }
1355
1356 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1357 {
1358 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1359 {
1360 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1361 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1362 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1363 }
1364 }
1365 else
1366 {
1367 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1368 {
1369 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1370 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1371 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1372 }
1373 }
1374
1375 /* Is it there? */
1376 if (!DescCs.Legacy.Gen.u1Present)
1377 {
1378 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1379 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1380 }
1381
1382 /*
1383 * Return to outer privilege? (We'll typically have entered via a call gate.)
1384 */
1385 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1386 {
1387 /* Read the return pointer, it comes before the parameters. */
1388 RTCPTRUNION uPtrStack;
1389 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1390 if (rcStrict != VINF_SUCCESS)
1391 return rcStrict;
1392 uint16_t uNewOuterSs;
1393 uint64_t uNewOuterRsp;
1394 if (enmEffOpSize == IEMMODE_16BIT)
1395 {
1396 uNewOuterRsp = uPtrFrame.pu16[0];
1397 uNewOuterSs = uPtrFrame.pu16[1];
1398 }
1399 else if (enmEffOpSize == IEMMODE_32BIT)
1400 {
1401 uNewOuterRsp = uPtrFrame.pu32[0];
1402 uNewOuterSs = uPtrFrame.pu16[2];
1403 }
1404 else
1405 {
1406 uNewOuterRsp = uPtrFrame.pu64[0];
1407 uNewOuterSs = uPtrFrame.pu16[4];
1408 }
1409
1410 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1411 and read the selector. */
1412 IEMSELDESC DescSs;
1413 if (!(uNewOuterSs & (X86_SEL_MASK | X86_SEL_LDT)))
1414 {
1415 if ( !DescCs.Legacy.Gen.u1Long
1416 || (uNewOuterSs & X86_SEL_RPL) == 3)
1417 {
1418 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1419 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1420 return iemRaiseGeneralProtectionFault0(pIemCpu);
1421 }
1422 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1423 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1424 }
1425 else
1426 {
1427 /* Fetch the descriptor for the new stack segment. */
1428 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1429 if (rcStrict != VINF_SUCCESS)
1430 return rcStrict;
1431 }
1432
1433 /* Check that RPL of stack and code selectors match. */
1434 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1435 {
1436 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1437 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1438 }
1439
1440 /* Must be a writable data segment. */
1441 if ( !DescSs.Legacy.Gen.u1DescType
1442 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1443 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1444 {
1445 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1446 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1447 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1448 }
1449
1450 /* L vs D. (Not mentioned by intel.) */
1451 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1452 && DescSs.Legacy.Gen.u1DefBig
1453 && IEM_IS_LONG_MODE(pIemCpu))
1454 {
1455 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1456 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1457 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1458 }
1459
1460 /* DPL/RPL/CPL checks. */
1461 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1462 {
1463 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1464 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1465 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1466 }
1467
1468 /* Is it there? */
1469 if (!DescSs.Legacy.Gen.u1Present)
1470 {
1471 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1472 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1473 }
1474
1475 /* Calc SS limit.*/
1476 uint32_t cbLimitSs = X86DESC_LIMIT(DescSs.Legacy);
1477 if (DescSs.Legacy.Gen.u1Granularity)
1478 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1479
1480
1481 /* Is RIP canonical or within CS.limit? */
1482 uint64_t u64Base;
1483 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
1484 if (DescCs.Legacy.Gen.u1Granularity)
1485 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1486
1487 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1488 {
1489 if (!IEM_IS_CANONICAL(uNewRip))
1490 {
1491 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1492 return iemRaiseNotCanonical(pIemCpu);
1493 }
1494 u64Base = 0;
1495 }
1496 else
1497 {
1498 if (uNewRip > cbLimitCs)
1499 {
1500 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1501 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1502 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1503 }
1504 u64Base = X86DESC_BASE(DescCs.Legacy);
1505 }
1506
1507 /*
1508 * Now set the accessed bit before
1509 * writing the return address to the stack and committing the result into
1510 * CS, CSHID and RIP.
1511 */
1512 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1513 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1514 {
1515 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1516 if (rcStrict != VINF_SUCCESS)
1517 return rcStrict;
1518#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1519 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1520#endif
1521 }
1522 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1523 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1524 {
1525 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1526 if (rcStrict != VINF_SUCCESS)
1527 return rcStrict;
1528#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1529 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1530#endif
1531 }
1532
1533 /* commit */
1534 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1535 if (rcStrict != VINF_SUCCESS)
1536 return rcStrict;
1537 if (enmEffOpSize == IEMMODE_16BIT)
1538 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1539 else
1540 pCtx->rip = uNewRip;
1541 pCtx->cs = uNewCs;
1542 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy);
1543 pCtx->csHid.u32Limit = cbLimitCs;
1544 pCtx->csHid.u64Base = u64Base;
1545 pCtx->rsp = uNewRsp;
1546 pCtx->ss = uNewCs;
1547 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSs.Legacy);
1548 pCtx->ssHid.u32Limit = cbLimitSs;
1549 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1550 pCtx->ssHid.u64Base = 0;
1551 else
1552 pCtx->ssHid.u64Base = X86DESC_BASE(DescSs.Legacy);
1553
1554 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1555 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid);
1556 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es, &pCtx->esHid);
1557 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid);
1558 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid);
1559
1560 /** @todo check if the hidden bits are loaded correctly for 64-bit
1561 * mode. */
1562
1563 if (cbPop)
1564 iemRegAddToRsp(pCtx, cbPop);
1565
1566 /* Done! */
1567 }
1568 /*
1569 * Return to the same privilege level
1570 */
1571 else
1572 {
1573 /* Limit / canonical check. */
1574 uint64_t u64Base;
1575 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
1576 if (DescCs.Legacy.Gen.u1Granularity)
1577 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1578
1579 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1580 {
1581 if (!IEM_IS_CANONICAL(uNewRip))
1582 {
1583 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1584 return iemRaiseNotCanonical(pIemCpu);
1585 }
1586 u64Base = 0;
1587 }
1588 else
1589 {
1590 if (uNewRip > cbLimitCs)
1591 {
1592 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1593 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1594 }
1595 u64Base = X86DESC_BASE(DescCs.Legacy);
1596 }
1597
1598 /*
1599 * Now set the accessed bit before
1600 * writing the return address to the stack and committing the result into
1601 * CS, CSHID and RIP.
1602 */
1603 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1604 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1605 {
1606 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1607 if (rcStrict != VINF_SUCCESS)
1608 return rcStrict;
1609#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1610 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1611#endif
1612 }
1613
1614 /* commit */
1615 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1616 if (rcStrict != VINF_SUCCESS)
1617 return rcStrict;
1618 if (enmEffOpSize == IEMMODE_16BIT)
1619 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1620 else
1621 pCtx->rip = uNewRip;
1622 pCtx->cs = uNewCs;
1623 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy);
1624 pCtx->csHid.u32Limit = cbLimitCs;
1625 pCtx->csHid.u64Base = u64Base;
1626 /** @todo check if the hidden bits are loaded correctly for 64-bit
1627 * mode. */
1628 if (cbPop)
1629 iemRegAddToRsp(pCtx, cbPop);
1630 }
1631 return VINF_SUCCESS;
1632}
1633
1634
1635/**
1636 * Implements retn.
1637 *
1638 * We're doing this in C because of the \#GP that might be raised if the popped
1639 * program counter is out of bounds.
1640 *
1641 * @param enmEffOpSize The effective operand size.
1642 * @param cbPop The amount of arguments to pop from the stack
1643 * (bytes).
1644 */
1645IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1646{
1647 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1648 NOREF(cbInstr);
1649
1650 /* Fetch the RSP from the stack. */
1651 VBOXSTRICTRC rcStrict;
1652 RTUINT64U NewRip;
1653 RTUINT64U NewRsp;
1654 NewRsp.u = pCtx->rsp;
1655 switch (enmEffOpSize)
1656 {
1657 case IEMMODE_16BIT:
1658 NewRip.u = 0;
1659 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1660 break;
1661 case IEMMODE_32BIT:
1662 NewRip.u = 0;
1663 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1664 break;
1665 case IEMMODE_64BIT:
1666 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1667 break;
1668 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1669 }
1670 if (rcStrict != VINF_SUCCESS)
1671 return rcStrict;
1672
1673 /* Check the new RSP before loading it. */
1674 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1675 * of it. The canonical test is performed here and for call. */
1676 if (enmEffOpSize != IEMMODE_64BIT)
1677 {
1678 if (NewRip.DWords.dw0 > pCtx->csHid.u32Limit)
1679 {
1680 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->csHid.u32Limit));
1681 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1682 }
1683 }
1684 else
1685 {
1686 if (!IEM_IS_CANONICAL(NewRip.u))
1687 {
1688 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1689 return iemRaiseNotCanonical(pIemCpu);
1690 }
1691 }
1692
1693 /* Commit it. */
1694 pCtx->rip = NewRip.u;
1695 pCtx->rsp = NewRsp.u;
1696 if (cbPop)
1697 iemRegAddToRsp(pCtx, cbPop);
1698
1699 return VINF_SUCCESS;
1700}
1701
1702
1703/**
1704 * Implements leave.
1705 *
1706 * We're doing this in C because messing with the stack registers is annoying
1707 * since they depends on SS attributes.
1708 *
1709 * @param enmEffOpSize The effective operand size.
1710 */
1711IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1712{
1713 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1714
1715 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1716 RTUINT64U NewRsp;
1717 if (pCtx->ssHid.Attr.n.u1Long)
1718 {
1719 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1720 NewRsp.u = pCtx->rsp;
1721 NewRsp.Words.w0 = pCtx->bp;
1722 }
1723 else if (pCtx->ssHid.Attr.n.u1DefBig)
1724 NewRsp.u = pCtx->ebp;
1725 else
1726 NewRsp.u = pCtx->rbp;
1727
1728 /* Pop RBP according to the operand size. */
1729 VBOXSTRICTRC rcStrict;
1730 RTUINT64U NewRbp;
1731 switch (enmEffOpSize)
1732 {
1733 case IEMMODE_16BIT:
1734 NewRbp.u = pCtx->rbp;
1735 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1736 break;
1737 case IEMMODE_32BIT:
1738 NewRbp.u = 0;
1739 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1740 break;
1741 case IEMMODE_64BIT:
1742 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1743 break;
1744 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1745 }
1746 if (rcStrict != VINF_SUCCESS)
1747 return rcStrict;
1748
1749
1750 /* Commit it. */
1751 pCtx->rbp = NewRbp.u;
1752 pCtx->rsp = NewRsp.u;
1753 iemRegAddToRip(pIemCpu, cbInstr);
1754
1755 return VINF_SUCCESS;
1756}
1757
1758
1759/**
1760 * Implements int3 and int XX.
1761 *
1762 * @param u8Int The interrupt vector number.
1763 * @param fIsBpInstr Is it the breakpoint instruction.
1764 */
1765IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1766{
1767 Assert(pIemCpu->cXcptRecursions == 0);
1768 return iemRaiseXcptOrInt(pIemCpu,
1769 cbInstr,
1770 u8Int,
1771 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1772 0,
1773 0);
1774}
1775
1776
1777/**
1778 * Implements iret for real mode and V8086 mode.
1779 *
1780 * @param enmEffOpSize The effective operand size.
1781 */
1782IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1783{
1784 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1785 NOREF(cbInstr);
1786
1787 /*
1788 * iret throws an exception if VME isn't enabled.
1789 */
1790 if ( pCtx->eflags.Bits.u1VM
1791 && !(pCtx->cr4 & X86_CR4_VME))
1792 return iemRaiseGeneralProtectionFault0(pIemCpu);
1793
1794 /*
1795 * Do the stack bits, but don't commit RSP before everything checks
1796 * out right.
1797 */
1798 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1799 VBOXSTRICTRC rcStrict;
1800 RTCPTRUNION uFrame;
1801 uint16_t uNewCs;
1802 uint32_t uNewEip;
1803 uint32_t uNewFlags;
1804 uint64_t uNewRsp;
1805 if (enmEffOpSize == IEMMODE_32BIT)
1806 {
1807 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1808 if (rcStrict != VINF_SUCCESS)
1809 return rcStrict;
1810 uNewEip = uFrame.pu32[0];
1811 uNewCs = (uint16_t)uFrame.pu32[1];
1812 uNewFlags = uFrame.pu32[2];
1813 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1814 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1815 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1816 | X86_EFL_ID;
1817 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1818 }
1819 else
1820 {
1821 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1822 if (rcStrict != VINF_SUCCESS)
1823 return rcStrict;
1824 uNewEip = uFrame.pu16[0];
1825 uNewCs = uFrame.pu16[1];
1826 uNewFlags = uFrame.pu16[2];
1827 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1828 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1829 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1830 /** @todo The intel pseudo code does not indicate what happens to
1831 * reserved flags. We just ignore them. */
1832 }
1833 /** @todo Check how this is supposed to work if sp=0xfffe. */
1834
1835 /*
1836 * Check the limit of the new EIP.
1837 */
1838 /** @todo Only the AMD pseudo code check the limit here, what's
1839 * right? */
1840 if (uNewEip > pCtx->csHid.u32Limit)
1841 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1842
1843 /*
1844 * V8086 checks and flag adjustments
1845 */
1846 if (pCtx->eflags.Bits.u1VM)
1847 {
1848 if (pCtx->eflags.Bits.u2IOPL == 3)
1849 {
1850 /* Preserve IOPL and clear RF. */
1851 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1852 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1853 }
1854 else if ( enmEffOpSize == IEMMODE_16BIT
1855 && ( !(uNewFlags & X86_EFL_IF)
1856 || !pCtx->eflags.Bits.u1VIP )
1857 && !(uNewFlags & X86_EFL_TF) )
1858 {
1859 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1860 uNewFlags &= ~X86_EFL_VIF;
1861 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1862 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1863 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1864 }
1865 else
1866 return iemRaiseGeneralProtectionFault0(pIemCpu);
1867 }
1868
1869 /*
1870 * Commit the operation.
1871 */
1872 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1873 if (rcStrict != VINF_SUCCESS)
1874 return rcStrict;
1875 pCtx->rip = uNewEip;
1876 pCtx->cs = uNewCs;
1877 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
1878 /** @todo do we load attribs and limit as well? */
1879 Assert(uNewFlags & X86_EFL_1);
1880 pCtx->eflags.u = uNewFlags;
1881
1882 return VINF_SUCCESS;
1883}
1884
1885
1886/**
1887 * Implements iret for protected mode
1888 *
1889 * @param enmEffOpSize The effective operand size.
1890 */
1891IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
1892{
1893 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1894 NOREF(cbInstr);
1895
1896 /*
1897 * Nested task return.
1898 */
1899 if (pCtx->eflags.Bits.u1NT)
1900 {
1901 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1902 }
1903 /*
1904 * Normal return.
1905 */
1906 else
1907 {
1908 /*
1909 * Do the stack bits, but don't commit RSP before everything checks
1910 * out right.
1911 */
1912 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1913 VBOXSTRICTRC rcStrict;
1914 RTCPTRUNION uFrame;
1915 uint16_t uNewCs;
1916 uint32_t uNewEip;
1917 uint32_t uNewFlags;
1918 uint64_t uNewRsp;
1919 if (enmEffOpSize == IEMMODE_32BIT)
1920 {
1921 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1922 if (rcStrict != VINF_SUCCESS)
1923 return rcStrict;
1924 uNewEip = uFrame.pu32[0];
1925 uNewCs = (uint16_t)uFrame.pu32[1];
1926 uNewFlags = uFrame.pu32[2];
1927 }
1928 else
1929 {
1930 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1931 if (rcStrict != VINF_SUCCESS)
1932 return rcStrict;
1933 uNewEip = uFrame.pu16[0];
1934 uNewCs = uFrame.pu16[1];
1935 uNewFlags = uFrame.pu16[2];
1936 }
1937 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
1938 if (rcStrict != VINF_SUCCESS)
1939 return rcStrict;
1940
1941 /*
1942 * What are we returning to?
1943 */
1944 if ( (uNewFlags & X86_EFL_VM)
1945 && pIemCpu->uCpl == 0)
1946 {
1947 /* V8086 mode! */
1948 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1949 }
1950 else
1951 {
1952 /*
1953 * Protected mode.
1954 */
1955 /* Read the CS descriptor. */
1956 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1957 {
1958 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
1959 return iemRaiseGeneralProtectionFault0(pIemCpu);
1960 }
1961
1962 IEMSELDESC DescCS;
1963 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
1964 if (rcStrict != VINF_SUCCESS)
1965 {
1966 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
1967 return rcStrict;
1968 }
1969
1970 /* Must be a code descriptor. */
1971 if (!DescCS.Legacy.Gen.u1DescType)
1972 {
1973 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1974 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1975 }
1976 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1977 {
1978 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1979 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1980 }
1981
1982 /* Privilege checks. */
1983 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1984 {
1985 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
1986 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1987 }
1988 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1989 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
1990 {
1991 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
1992 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1993 }
1994
1995 /* Present? */
1996 if (!DescCS.Legacy.Gen.u1Present)
1997 {
1998 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
1999 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2000 }
2001
2002 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
2003 if (DescCS.Legacy.Gen.u1Granularity)
2004 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2005
2006 /*
2007 * Return to outer level?
2008 */
2009 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2010 {
2011 uint16_t uNewSS;
2012 uint32_t uNewESP;
2013 if (enmEffOpSize == IEMMODE_32BIT)
2014 {
2015 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2016 if (rcStrict != VINF_SUCCESS)
2017 return rcStrict;
2018 uNewESP = uFrame.pu32[0];
2019 uNewSS = (uint16_t)uFrame.pu32[1];
2020 }
2021 else
2022 {
2023 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2024 if (rcStrict != VINF_SUCCESS)
2025 return rcStrict;
2026 uNewESP = uFrame.pu16[0];
2027 uNewSS = uFrame.pu16[1];
2028 }
2029 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2030 if (rcStrict != VINF_SUCCESS)
2031 return rcStrict;
2032
2033 /* Read the SS descriptor. */
2034 if (!(uNewSS & (X86_SEL_MASK | X86_SEL_LDT)))
2035 {
2036 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2037 return iemRaiseGeneralProtectionFault0(pIemCpu);
2038 }
2039
2040 IEMSELDESC DescSS;
2041 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2042 if (rcStrict != VINF_SUCCESS)
2043 {
2044 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2045 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2046 return rcStrict;
2047 }
2048
2049 /* Privilege checks. */
2050 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2051 {
2052 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2053 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2054 }
2055 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2056 {
2057 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2058 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2059 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2060 }
2061
2062 /* Must be a writeable data segment descriptor. */
2063 if (!DescSS.Legacy.Gen.u1DescType)
2064 {
2065 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2066 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2067 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2068 }
2069 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2070 {
2071 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2072 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2073 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2074 }
2075
2076 /* Present? */
2077 if (!DescSS.Legacy.Gen.u1Present)
2078 {
2079 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2080 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2081 }
2082
2083 uint32_t cbLimitSs = X86DESC_LIMIT(DescSS.Legacy);
2084 if (DescSS.Legacy.Gen.u1Granularity)
2085 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2086
2087 /* Check EIP. */
2088 if (uNewEip > cbLimitCS)
2089 {
2090 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2091 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2092 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2093 }
2094
2095 /*
2096 * Commit the changes, marking CS and SS accessed first since
2097 * that may fail.
2098 */
2099 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2100 {
2101 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2102 if (rcStrict != VINF_SUCCESS)
2103 return rcStrict;
2104 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2105 }
2106 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2107 {
2108 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2109 if (rcStrict != VINF_SUCCESS)
2110 return rcStrict;
2111 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2112 }
2113
2114 pCtx->rip = uNewEip;
2115 pCtx->cs = uNewCs;
2116 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2117 pCtx->csHid.u32Limit = cbLimitCS;
2118 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2119 pCtx->rsp = uNewESP;
2120 pCtx->ss = uNewSS;
2121 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
2122 pCtx->ssHid.u32Limit = cbLimitSs;
2123 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy);
2124
2125 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2126 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2127 if (enmEffOpSize != IEMMODE_16BIT)
2128 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2129 if (pIemCpu->uCpl == 0)
2130 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2131 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2132 fEFlagsMask |= X86_EFL_IF;
2133 pCtx->eflags.u &= ~fEFlagsMask;
2134 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2135
2136 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2137 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid);
2138 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es, &pCtx->esHid);
2139 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid);
2140 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid);
2141
2142 /* Done! */
2143
2144 }
2145 /*
2146 * Return to the same level.
2147 */
2148 else
2149 {
2150 /* Check EIP. */
2151 if (uNewEip > cbLimitCS)
2152 {
2153 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2154 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2155 }
2156
2157 /*
2158 * Commit the changes, marking CS first since it may fail.
2159 */
2160 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2161 {
2162 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2163 if (rcStrict != VINF_SUCCESS)
2164 return rcStrict;
2165 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2166 }
2167
2168 pCtx->rip = uNewEip;
2169 pCtx->cs = uNewCs;
2170 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2171 pCtx->csHid.u32Limit = cbLimitCS;
2172 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2173 pCtx->rsp = uNewRsp;
2174
2175 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2176 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2177 if (enmEffOpSize != IEMMODE_16BIT)
2178 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2179 if (pIemCpu->uCpl == 0)
2180 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2181 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2182 fEFlagsMask |= X86_EFL_IF;
2183 pCtx->eflags.u &= ~fEFlagsMask;
2184 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2185 /* Done! */
2186 }
2187 }
2188 }
2189
2190 return VINF_SUCCESS;
2191}
2192
2193
2194/**
2195 * Implements iret for long mode
2196 *
2197 * @param enmEffOpSize The effective operand size.
2198 */
2199IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2200{
2201 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2202 //VBOXSTRICTRC rcStrict;
2203 //uint64_t uNewRsp;
2204
2205 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2206 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2207}
2208
2209
2210/**
2211 * Implements iret.
2212 *
2213 * @param enmEffOpSize The effective operand size.
2214 */
2215IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2216{
2217 /*
2218 * Call a mode specific worker.
2219 */
2220 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2221 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2222 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2223 if (IEM_IS_LONG_MODE(pIemCpu))
2224 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2225
2226 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2227}
2228
2229
2230/**
2231 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2232 *
2233 * @param iSegReg The segment register number (valid).
2234 * @param uSel The new selector value.
2235 */
2236IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2237{
2238 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2239 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2240 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2241
2242 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2243
2244 /*
2245 * Real mode and V8086 mode are easy.
2246 */
2247 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2248 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2249 {
2250 *pSel = uSel;
2251 pHid->u64Base = (uint32_t)uSel << 4;
2252 /** @todo Does the CPU actually load limits and attributes in the
2253 * real/V8086 mode segment load case? It doesn't for CS in far
2254 * jumps... Affects unreal mode. */
2255 pHid->u32Limit = 0xffff;
2256 pHid->Attr.u = 0;
2257 pHid->Attr.n.u1Present = 1;
2258 pHid->Attr.n.u1DescType = 1;
2259 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2260 ? X86_SEL_TYPE_RW
2261 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2262
2263 iemRegAddToRip(pIemCpu, cbInstr);
2264 return VINF_SUCCESS;
2265 }
2266
2267 /*
2268 * Protected mode.
2269 *
2270 * Check if it's a null segment selector value first, that's OK for DS, ES,
2271 * FS and GS. If not null, then we have to load and parse the descriptor.
2272 */
2273 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
2274 {
2275 if (iSegReg == X86_SREG_SS)
2276 {
2277 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2278 || pIemCpu->uCpl != 0
2279 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2280 {
2281 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2282 return iemRaiseGeneralProtectionFault0(pIemCpu);
2283 }
2284
2285 /* In 64-bit kernel mode, the stack can be 0 because of the way
2286 interrupts are dispatched when in kernel ctx. Just load the
2287 selector value into the register and leave the hidden bits
2288 as is. */
2289 *pSel = uSel;
2290 iemRegAddToRip(pIemCpu, cbInstr);
2291 return VINF_SUCCESS;
2292 }
2293
2294 *pSel = uSel; /* Not RPL, remember :-) */
2295 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2296 && iSegReg != X86_SREG_FS
2297 && iSegReg != X86_SREG_GS)
2298 {
2299 /** @todo figure out what this actually does, it works. Needs
2300 * testcase! */
2301 pHid->Attr.u = 0;
2302 pHid->Attr.n.u1Present = 1;
2303 pHid->Attr.n.u1Long = 1;
2304 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2305 pHid->Attr.n.u2Dpl = 3;
2306 pHid->u32Limit = 0;
2307 pHid->u64Base = 0;
2308 }
2309 else
2310 {
2311 pHid->Attr.u = 0;
2312 pHid->u32Limit = 0;
2313 pHid->u64Base = 0;
2314 }
2315 iemRegAddToRip(pIemCpu, cbInstr);
2316 return VINF_SUCCESS;
2317 }
2318
2319 /* Fetch the descriptor. */
2320 IEMSELDESC Desc;
2321 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2322 if (rcStrict != VINF_SUCCESS)
2323 return rcStrict;
2324
2325 /* Check GPs first. */
2326 if (!Desc.Legacy.Gen.u1DescType)
2327 {
2328 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2329 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2330 }
2331 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2332 {
2333 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2334 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2335 {
2336 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2337 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2338 }
2339 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2340 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2341 {
2342 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2343 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2344 }
2345 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2346 {
2347 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2348 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2349 }
2350 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2351 {
2352 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2353 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2354 }
2355 }
2356 else
2357 {
2358 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2359 {
2360 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2361 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2362 }
2363 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2364 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2365 {
2366#if 0 /* this is what intel says. */
2367 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2368 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2369 {
2370 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2371 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2372 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2373 }
2374#else /* this is what makes more sense. */
2375 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2376 {
2377 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2378 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2379 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2380 }
2381 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2382 {
2383 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2384 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2385 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2386 }
2387#endif
2388 }
2389 }
2390
2391 /* Is it there? */
2392 if (!Desc.Legacy.Gen.u1Present)
2393 {
2394 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2395 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2396 }
2397
2398 /* The the base and limit. */
2399 uint64_t u64Base;
2400 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
2401 if (Desc.Legacy.Gen.u1Granularity)
2402 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
2403
2404 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2405 && iSegReg < X86_SREG_FS)
2406 u64Base = 0;
2407 else
2408 u64Base = X86DESC_BASE(Desc.Legacy);
2409
2410 /*
2411 * Ok, everything checked out fine. Now set the accessed bit before
2412 * committing the result into the registers.
2413 */
2414 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2415 {
2416 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2417 if (rcStrict != VINF_SUCCESS)
2418 return rcStrict;
2419 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2420 }
2421
2422 /* commit */
2423 *pSel = uSel;
2424 pHid->Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2425 pHid->u32Limit = cbLimit;
2426 pHid->u64Base = u64Base;
2427
2428 /** @todo check if the hidden bits are loaded correctly for 64-bit
2429 * mode. */
2430
2431 iemRegAddToRip(pIemCpu, cbInstr);
2432 return VINF_SUCCESS;
2433}
2434
2435
2436/**
2437 * Implements 'mov SReg, r/m'.
2438 *
2439 * @param iSegReg The segment register number (valid).
2440 * @param uSel The new selector value.
2441 */
2442IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2443{
2444 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2445 if (rcStrict == VINF_SUCCESS)
2446 {
2447 if (iSegReg == X86_SREG_SS)
2448 {
2449 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2450 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2451 }
2452 }
2453 return rcStrict;
2454}
2455
2456
2457/**
2458 * Implements 'pop SReg'.
2459 *
2460 * @param iSegReg The segment register number (valid).
2461 * @param enmEffOpSize The efficient operand size (valid).
2462 */
2463IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2464{
2465 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2466 VBOXSTRICTRC rcStrict;
2467
2468 /*
2469 * Read the selector off the stack and join paths with mov ss, reg.
2470 */
2471 RTUINT64U TmpRsp;
2472 TmpRsp.u = pCtx->rsp;
2473 switch (enmEffOpSize)
2474 {
2475 case IEMMODE_16BIT:
2476 {
2477 uint16_t uSel;
2478 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2479 if (rcStrict == VINF_SUCCESS)
2480 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2481 break;
2482 }
2483
2484 case IEMMODE_32BIT:
2485 {
2486 uint32_t u32Value;
2487 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2488 if (rcStrict == VINF_SUCCESS)
2489 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2490 break;
2491 }
2492
2493 case IEMMODE_64BIT:
2494 {
2495 uint64_t u64Value;
2496 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2497 if (rcStrict == VINF_SUCCESS)
2498 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2499 break;
2500 }
2501 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2502 }
2503
2504 /*
2505 * Commit the stack on success.
2506 */
2507 if (rcStrict == VINF_SUCCESS)
2508 {
2509 pCtx->rsp = TmpRsp.u;
2510 if (iSegReg == X86_SREG_SS)
2511 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2512 }
2513 return rcStrict;
2514}
2515
2516
2517/**
2518 * Implements lgs, lfs, les, lds & lss.
2519 */
2520IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2521 uint16_t, uSel,
2522 uint64_t, offSeg,
2523 uint8_t, iSegReg,
2524 uint8_t, iGReg,
2525 IEMMODE, enmEffOpSize)
2526{
2527 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2528 VBOXSTRICTRC rcStrict;
2529
2530 /*
2531 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2532 */
2533 /** @todo verify and test that mov, pop and lXs works the segment
2534 * register loading in the exact same way. */
2535 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2536 if (rcStrict == VINF_SUCCESS)
2537 {
2538 switch (enmEffOpSize)
2539 {
2540 case IEMMODE_16BIT:
2541 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2542 break;
2543 case IEMMODE_32BIT:
2544 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2545 break;
2546 case IEMMODE_64BIT:
2547 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2548 break;
2549 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2550 }
2551 }
2552
2553 return rcStrict;
2554}
2555
2556
2557/**
2558 * Implements lgdt.
2559 *
2560 * @param iEffSeg The segment of the new ldtr contents
2561 * @param GCPtrEffSrc The address of the new ldtr contents.
2562 * @param enmEffOpSize The effective operand size.
2563 */
2564IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2565{
2566 if (pIemCpu->uCpl != 0)
2567 return iemRaiseGeneralProtectionFault0(pIemCpu);
2568 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2569
2570 /*
2571 * Fetch the limit and base address.
2572 */
2573 uint16_t cbLimit;
2574 RTGCPTR GCPtrBase;
2575 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2576 if (rcStrict == VINF_SUCCESS)
2577 {
2578 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2579 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2580 else
2581 {
2582 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2583 pCtx->gdtr.cbGdt = cbLimit;
2584 pCtx->gdtr.pGdt = GCPtrBase;
2585 }
2586 if (rcStrict == VINF_SUCCESS)
2587 iemRegAddToRip(pIemCpu, cbInstr);
2588 }
2589 return rcStrict;
2590}
2591
2592
2593/**
2594 * Implements lidt.
2595 *
2596 * @param iEffSeg The segment of the new ldtr contents
2597 * @param GCPtrEffSrc The address of the new ldtr contents.
2598 * @param enmEffOpSize The effective operand size.
2599 */
2600IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2601{
2602 if (pIemCpu->uCpl != 0)
2603 return iemRaiseGeneralProtectionFault0(pIemCpu);
2604 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2605
2606 /*
2607 * Fetch the limit and base address.
2608 */
2609 uint16_t cbLimit;
2610 RTGCPTR GCPtrBase;
2611 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2612 if (rcStrict == VINF_SUCCESS)
2613 {
2614 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2615 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2616 else
2617 {
2618 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2619 pCtx->idtr.cbIdt = cbLimit;
2620 pCtx->idtr.pIdt = GCPtrBase;
2621 }
2622 if (rcStrict == VINF_SUCCESS)
2623 iemRegAddToRip(pIemCpu, cbInstr);
2624 }
2625 return rcStrict;
2626}
2627
2628
2629/**
2630 * Implements lldt.
2631 *
2632 * @param uNewLdt The new LDT selector value.
2633 */
2634IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2635{
2636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2637
2638 /*
2639 * Check preconditions.
2640 */
2641 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2642 {
2643 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2644 return iemRaiseUndefinedOpcode(pIemCpu);
2645 }
2646 if (pIemCpu->uCpl != 0)
2647 {
2648 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2649 return iemRaiseGeneralProtectionFault0(pIemCpu);
2650 }
2651 if (uNewLdt & X86_SEL_LDT)
2652 {
2653 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2654 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2655 }
2656
2657 /*
2658 * Now, loading a NULL selector is easy.
2659 */
2660 if ((uNewLdt & X86_SEL_MASK) == 0)
2661 {
2662 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2663 /** @todo check if the actual value is loaded or if it's always 0. */
2664 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2665 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), 0);
2666 else
2667 pCtx->ldtr = 0;
2668 pCtx->ldtrHid.Attr.u = 0;
2669 pCtx->ldtrHid.u64Base = 0;
2670 pCtx->ldtrHid.u32Limit = 0;
2671
2672 iemRegAddToRip(pIemCpu, cbInstr);
2673 return VINF_SUCCESS;
2674 }
2675
2676 /*
2677 * Read the descriptor.
2678 */
2679 IEMSELDESC Desc;
2680 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2681 if (rcStrict != VINF_SUCCESS)
2682 return rcStrict;
2683
2684 /* Check GPs first. */
2685 if (Desc.Legacy.Gen.u1DescType)
2686 {
2687 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2688 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2689 }
2690 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2691 {
2692 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2693 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2694 }
2695 uint64_t u64Base;
2696 if (!IEM_IS_LONG_MODE(pIemCpu))
2697 u64Base = X86DESC_BASE(Desc.Legacy);
2698 else
2699 {
2700 if (Desc.Long.Gen.u5Zeros)
2701 {
2702 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2703 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2704 }
2705
2706 u64Base = X86DESC64_BASE(Desc.Long);
2707 if (!IEM_IS_CANONICAL(u64Base))
2708 {
2709 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2710 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2711 }
2712 }
2713
2714 /* NP */
2715 if (!Desc.Legacy.Gen.u1Present)
2716 {
2717 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2718 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2719 }
2720
2721 /*
2722 * It checks out alright, update the registers.
2723 */
2724/** @todo check if the actual value is loaded or if the RPL is dropped */
2725 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2726 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK);
2727 else
2728 pCtx->ldtr = uNewLdt & X86_SEL_MASK;
2729 pCtx->ldtrHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2730 pCtx->ldtrHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2731 pCtx->ldtrHid.u64Base = u64Base;
2732
2733 iemRegAddToRip(pIemCpu, cbInstr);
2734 return VINF_SUCCESS;
2735}
2736
2737
2738/**
2739 * Implements lldt.
2740 *
2741 * @param uNewLdt The new LDT selector value.
2742 */
2743IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2744{
2745 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2746
2747 /*
2748 * Check preconditions.
2749 */
2750 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2751 {
2752 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2753 return iemRaiseUndefinedOpcode(pIemCpu);
2754 }
2755 if (pIemCpu->uCpl != 0)
2756 {
2757 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2758 return iemRaiseGeneralProtectionFault0(pIemCpu);
2759 }
2760 if (uNewTr & X86_SEL_LDT)
2761 {
2762 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2763 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
2764 }
2765 if ((uNewTr & X86_SEL_MASK) == 0)
2766 {
2767 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2768 return iemRaiseGeneralProtectionFault0(pIemCpu);
2769 }
2770
2771 /*
2772 * Read the descriptor.
2773 */
2774 IEMSELDESC Desc;
2775 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2776 if (rcStrict != VINF_SUCCESS)
2777 return rcStrict;
2778
2779 /* Check GPs first. */
2780 if (Desc.Legacy.Gen.u1DescType)
2781 {
2782 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2783 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2784 }
2785 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2786 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2787 || IEM_IS_LONG_MODE(pIemCpu)) )
2788 {
2789 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2790 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2791 }
2792 uint64_t u64Base;
2793 if (!IEM_IS_LONG_MODE(pIemCpu))
2794 u64Base = X86DESC_BASE(Desc.Legacy);
2795 else
2796 {
2797 if (Desc.Long.Gen.u5Zeros)
2798 {
2799 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2800 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2801 }
2802
2803 u64Base = X86DESC64_BASE(Desc.Long);
2804 if (!IEM_IS_CANONICAL(u64Base))
2805 {
2806 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2807 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2808 }
2809 }
2810
2811 /* NP */
2812 if (!Desc.Legacy.Gen.u1Present)
2813 {
2814 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
2815 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
2816 }
2817
2818 /*
2819 * Set it busy.
2820 * Note! Intel says this should lock down the whole descriptor, but we'll
2821 * restrict our selves to 32-bit for now due to lack of inline
2822 * assembly and such.
2823 */
2824 void *pvDesc;
2825 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2826 if (rcStrict != VINF_SUCCESS)
2827 return rcStrict;
2828 switch ((uintptr_t)pvDesc & 3)
2829 {
2830 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
2831 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
2832 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
2833 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
2834 }
2835 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2836 if (rcStrict != VINF_SUCCESS)
2837 return rcStrict;
2838 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2839
2840 /*
2841 * It checks out alright, update the registers.
2842 */
2843/** @todo check if the actual value is loaded or if the RPL is dropped */
2844 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2845 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK);
2846 else
2847 pCtx->tr = uNewTr & X86_SEL_MASK;
2848 pCtx->trHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2849 pCtx->trHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2850 pCtx->trHid.u64Base = u64Base;
2851
2852 iemRegAddToRip(pIemCpu, cbInstr);
2853 return VINF_SUCCESS;
2854}
2855
2856
2857/**
2858 * Implements mov GReg,CRx.
2859 *
2860 * @param iGReg The general register to store the CRx value in.
2861 * @param iCrReg The CRx register to read (valid).
2862 */
2863IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
2864{
2865 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2866 if (pIemCpu->uCpl != 0)
2867 return iemRaiseGeneralProtectionFault0(pIemCpu);
2868 Assert(!pCtx->eflags.Bits.u1VM);
2869
2870 /* read it */
2871 uint64_t crX;
2872 switch (iCrReg)
2873 {
2874 case 0: crX = pCtx->cr0; break;
2875 case 2: crX = pCtx->cr2; break;
2876 case 3: crX = pCtx->cr3; break;
2877 case 4: crX = pCtx->cr4; break;
2878 case 8:
2879 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2880 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
2881 else
2882 crX = 0xff;
2883 break;
2884 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2885 }
2886
2887 /* store it */
2888 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2889 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
2890 else
2891 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
2892
2893 iemRegAddToRip(pIemCpu, cbInstr);
2894 return VINF_SUCCESS;
2895}
2896
2897
2898/**
2899 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
2900 *
2901 * @param iCrReg The CRx register to write (valid).
2902 * @param uNewCrX The new value.
2903 */
2904IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
2905{
2906 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2907 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2908 VBOXSTRICTRC rcStrict;
2909 int rc;
2910
2911 /*
2912 * Try store it.
2913 * Unfortunately, CPUM only does a tiny bit of the work.
2914 */
2915 switch (iCrReg)
2916 {
2917 case 0:
2918 {
2919 /*
2920 * Perform checks.
2921 */
2922 uint64_t const uOldCrX = pCtx->cr0;
2923 uNewCrX |= X86_CR0_ET; /* hardcoded */
2924
2925 /* Check for reserved bits. */
2926 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
2927 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
2928 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
2929 if (uNewCrX & ~(uint64_t)fValid)
2930 {
2931 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2932 return iemRaiseGeneralProtectionFault0(pIemCpu);
2933 }
2934
2935 /* Check for invalid combinations. */
2936 if ( (uNewCrX & X86_CR0_PG)
2937 && !(uNewCrX & X86_CR0_PE) )
2938 {
2939 Log(("Trying to set CR0.PG without CR0.PE\n"));
2940 return iemRaiseGeneralProtectionFault0(pIemCpu);
2941 }
2942
2943 if ( !(uNewCrX & X86_CR0_CD)
2944 && (uNewCrX & X86_CR0_NW) )
2945 {
2946 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
2947 return iemRaiseGeneralProtectionFault0(pIemCpu);
2948 }
2949
2950 /* Long mode consistency checks. */
2951 if ( (uNewCrX & X86_CR0_PG)
2952 && !(uOldCrX & X86_CR0_PG)
2953 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2954 {
2955 if (!(pCtx->cr4 & X86_CR4_PAE))
2956 {
2957 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
2958 return iemRaiseGeneralProtectionFault0(pIemCpu);
2959 }
2960 if (pCtx->csHid.Attr.n.u1Long)
2961 {
2962 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
2963 return iemRaiseGeneralProtectionFault0(pIemCpu);
2964 }
2965 }
2966
2967 /** @todo check reserved PDPTR bits as AMD states. */
2968
2969 /*
2970 * Change CR0.
2971 */
2972 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2973 {
2974 rc = CPUMSetGuestCR0(pVCpu, uNewCrX);
2975 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
2976 }
2977 else
2978 pCtx->cr0 = uNewCrX;
2979 Assert(pCtx->cr0 == uNewCrX);
2980
2981 /*
2982 * Change EFER.LMA if entering or leaving long mode.
2983 */
2984 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
2985 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2986 {
2987 uint64_t NewEFER = pCtx->msrEFER;
2988 if (uNewCrX & X86_CR0_PG)
2989 NewEFER |= MSR_K6_EFER_LME;
2990 else
2991 NewEFER &= ~MSR_K6_EFER_LME;
2992
2993 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2994 CPUMSetGuestEFER(pVCpu, NewEFER);
2995 else
2996 pCtx->msrEFER = NewEFER;
2997 Assert(pCtx->msrEFER == NewEFER);
2998 }
2999
3000 /*
3001 * Inform PGM.
3002 */
3003 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3004 {
3005 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3006 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3007 {
3008 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3009 AssertRCReturn(rc, rc);
3010 /* ignore informational status codes */
3011 }
3012 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3013 /** @todo Status code management. */
3014 }
3015 else
3016 rcStrict = VINF_SUCCESS;
3017 break;
3018 }
3019
3020 /*
3021 * CR2 can be changed without any restrictions.
3022 */
3023 case 2:
3024 pCtx->cr2 = uNewCrX;
3025 rcStrict = VINF_SUCCESS;
3026 break;
3027
3028 /*
3029 * CR3 is relatively simple, although AMD and Intel have different
3030 * accounts of how setting reserved bits are handled. We take intel's
3031 * word for the lower bits and AMD's for the high bits (63:52).
3032 */
3033 /** @todo Testcase: Setting reserved bits in CR3, especially before
3034 * enabling paging. */
3035 case 3:
3036 {
3037 /* check / mask the value. */
3038 if (uNewCrX & UINT64_C(0xfff0000000000000))
3039 {
3040 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3041 return iemRaiseGeneralProtectionFault0(pIemCpu);
3042 }
3043
3044 uint64_t fValid;
3045 if ( (pCtx->cr4 & X86_CR4_PAE)
3046 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3047 fValid = UINT64_C(0x000ffffffffff014);
3048 else if (pCtx->cr4 & X86_CR4_PAE)
3049 fValid = UINT64_C(0xfffffff4);
3050 else
3051 fValid = UINT64_C(0xfffff014);
3052 if (uNewCrX & ~fValid)
3053 {
3054 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3055 uNewCrX, uNewCrX & ~fValid));
3056 uNewCrX &= fValid;
3057 }
3058
3059 /** @todo If we're in PAE mode we should check the PDPTRs for
3060 * invalid bits. */
3061
3062 /* Make the change. */
3063 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3064 {
3065 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3066 AssertRCSuccessReturn(rc, rc);
3067 }
3068 else
3069 pCtx->cr3 = uNewCrX;
3070
3071 /* Inform PGM. */
3072 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3073 {
3074 if (pCtx->cr0 & X86_CR0_PG)
3075 {
3076 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
3077 AssertRCReturn(rc, rc);
3078 /* ignore informational status codes */
3079 /** @todo status code management */
3080 }
3081 }
3082 rcStrict = VINF_SUCCESS;
3083 break;
3084 }
3085
3086 /*
3087 * CR4 is a bit more tedious as there are bits which cannot be cleared
3088 * under some circumstances and such.
3089 */
3090 case 4:
3091 {
3092 uint64_t const uOldCrX = pCtx->cr0;
3093
3094 /* reserved bits */
3095 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3096 | X86_CR4_TSD | X86_CR4_DE
3097 | X86_CR4_PSE | X86_CR4_PAE
3098 | X86_CR4_MCE | X86_CR4_PGE
3099 | X86_CR4_PCE | X86_CR4_OSFSXR
3100 | X86_CR4_OSXMMEEXCPT;
3101 //if (xxx)
3102 // fValid |= X86_CR4_VMXE;
3103 //if (xxx)
3104 // fValid |= X86_CR4_OSXSAVE;
3105 if (uNewCrX & ~(uint64_t)fValid)
3106 {
3107 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3108 return iemRaiseGeneralProtectionFault0(pIemCpu);
3109 }
3110
3111 /* long mode checks. */
3112 if ( (uOldCrX & X86_CR4_PAE)
3113 && !(uNewCrX & X86_CR4_PAE)
3114 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3115 {
3116 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3117 return iemRaiseGeneralProtectionFault0(pIemCpu);
3118 }
3119
3120
3121 /*
3122 * Change it.
3123 */
3124 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3125 {
3126 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3127 AssertRCSuccessReturn(rc, rc);
3128 }
3129 else
3130 pCtx->cr4 = uNewCrX;
3131 Assert(pCtx->cr4 == uNewCrX);
3132
3133 /*
3134 * Notify SELM and PGM.
3135 */
3136 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3137 {
3138 /* SELM - VME may change things wrt to the TSS shadowing. */
3139 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3140 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3141
3142 /* PGM - flushing and mode. */
3143 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3144 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3145 {
3146 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3147 AssertRCReturn(rc, rc);
3148 /* ignore informational status codes */
3149 }
3150 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3151 /** @todo Status code management. */
3152 }
3153 else
3154 rcStrict = VINF_SUCCESS;
3155 break;
3156 }
3157
3158 /*
3159 * CR8 maps to the APIC TPR.
3160 */
3161 case 8:
3162 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3163 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
3164 else
3165 rcStrict = VINF_SUCCESS;
3166 break;
3167
3168 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3169 }
3170
3171 /*
3172 * Advance the RIP on success.
3173 */
3174 /** @todo Status code management. */
3175 if (rcStrict == VINF_SUCCESS)
3176 iemRegAddToRip(pIemCpu, cbInstr);
3177 return rcStrict;
3178
3179}
3180
3181
3182/**
3183 * Implements mov CRx,GReg.
3184 *
3185 * @param iCrReg The CRx register to write (valid).
3186 * @param iGReg The general register to load the DRx value from.
3187 */
3188IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3189{
3190 if (pIemCpu->uCpl != 0)
3191 return iemRaiseGeneralProtectionFault0(pIemCpu);
3192 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3193
3194 /*
3195 * Read the new value from the source register and call common worker.
3196 */
3197 uint64_t uNewCrX;
3198 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3199 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3200 else
3201 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3202 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3203}
3204
3205
3206/**
3207 * Implements 'LMSW r/m16'
3208 *
3209 * @param u16NewMsw The new value.
3210 */
3211IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3212{
3213 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3214
3215 if (pIemCpu->uCpl != 0)
3216 return iemRaiseGeneralProtectionFault0(pIemCpu);
3217 Assert(!pCtx->eflags.Bits.u1VM);
3218
3219 /*
3220 * Compose the new CR0 value and call common worker.
3221 */
3222 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3223 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3224 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3225}
3226
3227
3228/**
3229 * Implements 'CLTS'.
3230 */
3231IEM_CIMPL_DEF_0(iemCImpl_clts)
3232{
3233 if (pIemCpu->uCpl != 0)
3234 return iemRaiseGeneralProtectionFault0(pIemCpu);
3235
3236 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3237 uint64_t uNewCr0 = pCtx->cr0;
3238 uNewCr0 &= ~X86_CR0_TS;
3239 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3240}
3241
3242
3243/**
3244 * Implements mov GReg,DRx.
3245 *
3246 * @param iGReg The general register to store the DRx value in.
3247 * @param iDrReg The DRx register to read (0-7).
3248 */
3249IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3250{
3251 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3252
3253 /*
3254 * Check preconditions.
3255 */
3256
3257 /* Raise GPs. */
3258 if (pIemCpu->uCpl != 0)
3259 return iemRaiseGeneralProtectionFault0(pIemCpu);
3260 Assert(!pCtx->eflags.Bits.u1VM);
3261
3262 if ( (iDrReg == 4 || iDrReg == 5)
3263 && (pCtx->cr4 & X86_CR4_DE) )
3264 {
3265 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3266 return iemRaiseGeneralProtectionFault0(pIemCpu);
3267 }
3268
3269 /* Raise #DB if general access detect is enabled. */
3270 if (pCtx->dr[7] & X86_DR7_GD)
3271 {
3272 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3273 return iemRaiseDebugException(pIemCpu);
3274 }
3275
3276 /*
3277 * Read the debug register and store it in the specified general register.
3278 */
3279 uint64_t drX;
3280 switch (iDrReg)
3281 {
3282 case 0: drX = pCtx->dr[0]; break;
3283 case 1: drX = pCtx->dr[1]; break;
3284 case 2: drX = pCtx->dr[2]; break;
3285 case 3: drX = pCtx->dr[3]; break;
3286 case 6:
3287 case 4:
3288 drX = pCtx->dr[6];
3289 drX &= ~RT_BIT_32(12);
3290 drX |= UINT32_C(0xffff0ff0);
3291 break;
3292 case 7:
3293 case 5:
3294 drX = pCtx->dr[7];
3295 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3296 drX |= RT_BIT_32(10);
3297 break;
3298 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3299 }
3300
3301 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3302 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3303 else
3304 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3305
3306 iemRegAddToRip(pIemCpu, cbInstr);
3307 return VINF_SUCCESS;
3308}
3309
3310
3311/**
3312 * Implements mov DRx,GReg.
3313 *
3314 * @param iDrReg The DRx register to write (valid).
3315 * @param iGReg The general register to load the DRx value from.
3316 */
3317IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3318{
3319 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3320
3321 /*
3322 * Check preconditions.
3323 */
3324 if (pIemCpu->uCpl != 0)
3325 return iemRaiseGeneralProtectionFault0(pIemCpu);
3326 Assert(!pCtx->eflags.Bits.u1VM);
3327
3328 if ( (iDrReg == 4 || iDrReg == 5)
3329 && (pCtx->cr4 & X86_CR4_DE) )
3330 {
3331 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3332 return iemRaiseGeneralProtectionFault0(pIemCpu);
3333 }
3334
3335 /* Raise #DB if general access detect is enabled. */
3336 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3337 * \#GP? */
3338 if (pCtx->dr[7] & X86_DR7_GD)
3339 {
3340 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3341 return iemRaiseDebugException(pIemCpu);
3342 }
3343
3344 /*
3345 * Read the new value from the source register.
3346 */
3347 uint64_t uNewDrX;
3348 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3349 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3350 else
3351 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3352
3353 /*
3354 * Adjust it.
3355 */
3356 switch (iDrReg)
3357 {
3358 case 0:
3359 case 1:
3360 case 2:
3361 case 3:
3362 /* nothing to adjust */
3363 break;
3364
3365 case 6:
3366 case 4:
3367 if (uNewDrX & UINT64_C(0xffffffff00000000))
3368 {
3369 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3370 return iemRaiseGeneralProtectionFault0(pIemCpu);
3371 }
3372 uNewDrX &= ~RT_BIT_32(12);
3373 uNewDrX |= UINT32_C(0xffff0ff0);
3374 break;
3375
3376 case 7:
3377 case 5:
3378 if (uNewDrX & UINT64_C(0xffffffff00000000))
3379 {
3380 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3381 return iemRaiseGeneralProtectionFault0(pIemCpu);
3382 }
3383 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3384 uNewDrX |= RT_BIT_32(10);
3385 break;
3386
3387 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3388 }
3389
3390 /*
3391 * Do the actual setting.
3392 */
3393 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3394 {
3395 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3396 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3397 }
3398 else
3399 pCtx->dr[iDrReg] = uNewDrX;
3400
3401 iemRegAddToRip(pIemCpu, cbInstr);
3402 return VINF_SUCCESS;
3403}
3404
3405
3406/**
3407 * Implements 'INVLPG m'.
3408 *
3409 * @param GCPtrPage The effective address of the page to invalidate.
3410 * @remarks Updates the RIP.
3411 */
3412IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3413{
3414 /* ring-0 only. */
3415 if (pIemCpu->uCpl != 0)
3416 return iemRaiseGeneralProtectionFault0(pIemCpu);
3417 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3418
3419 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3420 iemRegAddToRip(pIemCpu, cbInstr);
3421
3422 if ( rc == VINF_SUCCESS
3423 || rc == VINF_PGM_SYNC_CR3)
3424 return VINF_SUCCESS;
3425 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3426 return rc;
3427}
3428
3429
3430/**
3431 * Implements RDTSC.
3432 */
3433IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3434{
3435 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3436
3437 /*
3438 * Check preconditions.
3439 */
3440 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3441 return iemRaiseUndefinedOpcode(pIemCpu);
3442
3443 if ( (pCtx->cr4 & X86_CR4_TSD)
3444 && pIemCpu->uCpl != 0)
3445 {
3446 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3447 return iemRaiseGeneralProtectionFault0(pIemCpu);
3448 }
3449
3450 /*
3451 * Do the job.
3452 */
3453 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3454 pCtx->rax = (uint32_t)uTicks;
3455 pCtx->rdx = uTicks >> 32;
3456#ifdef IEM_VERIFICATION_MODE
3457 pIemCpu->fIgnoreRaxRdx = true;
3458#endif
3459
3460 iemRegAddToRip(pIemCpu, cbInstr);
3461 return VINF_SUCCESS;
3462}
3463
3464
3465/**
3466 * Implements RDMSR.
3467 */
3468IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3469{
3470 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3471
3472 /*
3473 * Check preconditions.
3474 */
3475 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3476 return iemRaiseUndefinedOpcode(pIemCpu);
3477 if (pIemCpu->uCpl != 0)
3478 return iemRaiseGeneralProtectionFault0(pIemCpu);
3479
3480 /*
3481 * Do the job.
3482 */
3483 RTUINT64U uValue;
3484 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3485 if (rc != VINF_SUCCESS)
3486 {
3487 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3488 return iemRaiseGeneralProtectionFault0(pIemCpu);
3489 }
3490
3491 pCtx->rax = uValue.au32[0];
3492 pCtx->rdx = uValue.au32[1];
3493
3494 iemRegAddToRip(pIemCpu, cbInstr);
3495 return VINF_SUCCESS;
3496}
3497
3498
3499/**
3500 * Implements 'IN eAX, port'.
3501 *
3502 * @param u16Port The source port.
3503 * @param cbReg The register size.
3504 */
3505IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3506{
3507 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3508
3509 /*
3510 * CPL check
3511 */
3512 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3513 if (rcStrict != VINF_SUCCESS)
3514 return rcStrict;
3515
3516 /*
3517 * Perform the I/O.
3518 */
3519 uint32_t u32Value;
3520 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3521 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
3522 else
3523 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3524 if (IOM_SUCCESS(rcStrict))
3525 {
3526 switch (cbReg)
3527 {
3528 case 1: pCtx->al = (uint8_t)u32Value; break;
3529 case 2: pCtx->ax = (uint16_t)u32Value; break;
3530 case 4: pCtx->rax = u32Value; break;
3531 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3532 }
3533 iemRegAddToRip(pIemCpu, cbInstr);
3534 pIemCpu->cPotentialExits++;
3535 }
3536 /** @todo massage rcStrict. */
3537 return rcStrict;
3538}
3539
3540
3541/**
3542 * Implements 'IN eAX, DX'.
3543 *
3544 * @param cbReg The register size.
3545 */
3546IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3547{
3548 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3549}
3550
3551
3552/**
3553 * Implements 'OUT port, eAX'.
3554 *
3555 * @param u16Port The destination port.
3556 * @param cbReg The register size.
3557 */
3558IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3559{
3560 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3561
3562 /*
3563 * CPL check
3564 */
3565 if ( (pCtx->cr0 & X86_CR0_PE)
3566 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3567 || pCtx->eflags.Bits.u1VM) )
3568 {
3569 /** @todo I/O port permission bitmap check */
3570 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
3571 }
3572
3573 /*
3574 * Perform the I/O.
3575 */
3576 uint32_t u32Value;
3577 switch (cbReg)
3578 {
3579 case 1: u32Value = pCtx->al; break;
3580 case 2: u32Value = pCtx->ax; break;
3581 case 4: u32Value = pCtx->eax; break;
3582 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3583 }
3584 VBOXSTRICTRC rc;
3585 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3586 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
3587 else
3588 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3589 if (IOM_SUCCESS(rc))
3590 {
3591 iemRegAddToRip(pIemCpu, cbInstr);
3592 pIemCpu->cPotentialExits++;
3593 /** @todo massage rc. */
3594 }
3595 return rc;
3596}
3597
3598
3599/**
3600 * Implements 'OUT DX, eAX'.
3601 *
3602 * @param cbReg The register size.
3603 */
3604IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3605{
3606 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3607}
3608
3609
3610/**
3611 * Implements 'CLI'.
3612 */
3613IEM_CIMPL_DEF_0(iemCImpl_cli)
3614{
3615 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3616
3617 if (pCtx->cr0 & X86_CR0_PE)
3618 {
3619 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3620 if (!pCtx->eflags.Bits.u1VM)
3621 {
3622 if (pIemCpu->uCpl <= uIopl)
3623 pCtx->eflags.Bits.u1IF = 0;
3624 else if ( pIemCpu->uCpl == 3
3625 && (pCtx->cr4 & X86_CR4_PVI) )
3626 pCtx->eflags.Bits.u1VIF = 0;
3627 else
3628 return iemRaiseGeneralProtectionFault0(pIemCpu);
3629 }
3630 /* V8086 */
3631 else if (uIopl == 3)
3632 pCtx->eflags.Bits.u1IF = 0;
3633 else if ( uIopl < 3
3634 && (pCtx->cr4 & X86_CR4_VME) )
3635 pCtx->eflags.Bits.u1VIF = 0;
3636 else
3637 return iemRaiseGeneralProtectionFault0(pIemCpu);
3638 }
3639 /* real mode */
3640 else
3641 pCtx->eflags.Bits.u1IF = 0;
3642 iemRegAddToRip(pIemCpu, cbInstr);
3643 return VINF_SUCCESS;
3644}
3645
3646
3647/**
3648 * Implements 'STI'.
3649 */
3650IEM_CIMPL_DEF_0(iemCImpl_sti)
3651{
3652 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3653
3654 if (pCtx->cr0 & X86_CR0_PE)
3655 {
3656 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3657 if (!pCtx->eflags.Bits.u1VM)
3658 {
3659 if (pIemCpu->uCpl <= uIopl)
3660 pCtx->eflags.Bits.u1IF = 1;
3661 else if ( pIemCpu->uCpl == 3
3662 && (pCtx->cr4 & X86_CR4_PVI)
3663 && !pCtx->eflags.Bits.u1VIP )
3664 pCtx->eflags.Bits.u1VIF = 1;
3665 else
3666 return iemRaiseGeneralProtectionFault0(pIemCpu);
3667 }
3668 /* V8086 */
3669 else if (uIopl == 3)
3670 pCtx->eflags.Bits.u1IF = 1;
3671 else if ( uIopl < 3
3672 && (pCtx->cr4 & X86_CR4_VME)
3673 && !pCtx->eflags.Bits.u1VIP )
3674 pCtx->eflags.Bits.u1VIF = 1;
3675 else
3676 return iemRaiseGeneralProtectionFault0(pIemCpu);
3677 }
3678 /* real mode */
3679 else
3680 pCtx->eflags.Bits.u1IF = 1;
3681
3682 iemRegAddToRip(pIemCpu, cbInstr);
3683 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3684 return VINF_SUCCESS;
3685}
3686
3687
3688/**
3689 * Implements 'HLT'.
3690 */
3691IEM_CIMPL_DEF_0(iemCImpl_hlt)
3692{
3693 if (pIemCpu->uCpl != 0)
3694 return iemRaiseGeneralProtectionFault0(pIemCpu);
3695 iemRegAddToRip(pIemCpu, cbInstr);
3696 return VINF_EM_HALT;
3697}
3698
3699
3700/**
3701 * Implements 'CPUID'.
3702 */
3703IEM_CIMPL_DEF_0(iemCImpl_cpuid)
3704{
3705 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3706
3707 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
3708 pCtx->rax &= UINT32_C(0xffffffff);
3709 pCtx->rbx &= UINT32_C(0xffffffff);
3710 pCtx->rcx &= UINT32_C(0xffffffff);
3711 pCtx->rdx &= UINT32_C(0xffffffff);
3712
3713 iemRegAddToRip(pIemCpu, cbInstr);
3714 return VINF_SUCCESS;
3715}
3716
3717
3718/**
3719 * Implements 'AAD'.
3720 *
3721 * @param enmEffOpSize The effective operand size.
3722 */
3723IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
3724{
3725 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3726
3727 uint16_t const ax = pCtx->ax;
3728 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
3729 pCtx->ax = al;
3730 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3731 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3732 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3733
3734 iemRegAddToRip(pIemCpu, cbInstr);
3735 return VINF_SUCCESS;
3736}
3737
3738
3739/**
3740 * Implements 'AAM'.
3741 *
3742 * @param bImm The immediate operand. Cannot be 0.
3743 */
3744IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
3745{
3746 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3747 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
3748
3749 uint16_t const ax = pCtx->ax;
3750 uint8_t const al = (uint8_t)ax % bImm;
3751 uint8_t const ah = (uint8_t)ax / bImm;
3752 pCtx->ax = (ah << 8) + al;
3753 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3754 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3755 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3756
3757 iemRegAddToRip(pIemCpu, cbInstr);
3758 return VINF_SUCCESS;
3759}
3760
3761
3762
3763
3764/*
3765 * Instantiate the various string operation combinations.
3766 */
3767#define OP_SIZE 8
3768#define ADDR_SIZE 16
3769#include "IEMAllCImplStrInstr.cpp.h"
3770#define OP_SIZE 8
3771#define ADDR_SIZE 32
3772#include "IEMAllCImplStrInstr.cpp.h"
3773#define OP_SIZE 8
3774#define ADDR_SIZE 64
3775#include "IEMAllCImplStrInstr.cpp.h"
3776
3777#define OP_SIZE 16
3778#define ADDR_SIZE 16
3779#include "IEMAllCImplStrInstr.cpp.h"
3780#define OP_SIZE 16
3781#define ADDR_SIZE 32
3782#include "IEMAllCImplStrInstr.cpp.h"
3783#define OP_SIZE 16
3784#define ADDR_SIZE 64
3785#include "IEMAllCImplStrInstr.cpp.h"
3786
3787#define OP_SIZE 32
3788#define ADDR_SIZE 16
3789#include "IEMAllCImplStrInstr.cpp.h"
3790#define OP_SIZE 32
3791#define ADDR_SIZE 32
3792#include "IEMAllCImplStrInstr.cpp.h"
3793#define OP_SIZE 32
3794#define ADDR_SIZE 64
3795#include "IEMAllCImplStrInstr.cpp.h"
3796
3797#define OP_SIZE 64
3798#define ADDR_SIZE 32
3799#include "IEMAllCImplStrInstr.cpp.h"
3800#define OP_SIZE 64
3801#define ADDR_SIZE 64
3802#include "IEMAllCImplStrInstr.cpp.h"
3803
3804
3805/**
3806 * Implements 'FINIT' and 'FNINIT'.
3807 *
3808 * @param fCheckXcpts Whether to check for umasked pending exceptions or
3809 * not.
3810 */
3811IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
3812{
3813 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3814
3815 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
3816 return iemRaiseDeviceNotAvailable(pIemCpu);
3817
3818 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
3819 if (fCheckXcpts && TODO )
3820 return iemRaiseMathFault(pIemCpu);
3821 */
3822
3823 if (iemFRegIsFxSaveFormat(pIemCpu))
3824 {
3825 pCtx->fpu.FCW = 0x37f;
3826 pCtx->fpu.FSW = 0;
3827 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
3828 pCtx->fpu.FPUDP = 0;
3829 pCtx->fpu.DS = 0; //??
3830 pCtx->fpu.FPUIP = 0;
3831 pCtx->fpu.CS = 0; //??
3832 pCtx->fpu.FOP = 0;
3833 }
3834 else
3835 {
3836 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
3837 pFpu->FCW = 0x37f;
3838 pFpu->FSW = 0;
3839 pFpu->FTW = 0xffff; /* 11 - empty */
3840 pFpu->FPUOO = 0; //??
3841 pFpu->FPUOS = 0; //??
3842 pFpu->FPUIP = 0;
3843 pFpu->CS = 0; //??
3844 pFpu->FOP = 0;
3845 }
3846
3847 iemRegAddToRip(pIemCpu, cbInstr);
3848 return VINF_SUCCESS;
3849}
3850
3851
3852/**
3853 * Implements 'FXSAVE'.
3854 *
3855 * @param iEffSeg The effective segment.
3856 * @param GCPtrEff The address of the image.
3857 * @param enmEffOpSize The operand size (only REX.W really matters).
3858 */
3859IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3860{
3861 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3862
3863 /*
3864 * Raise exceptions.
3865 */
3866 if (pCtx->cr0 & X86_CR0_EM)
3867 return iemRaiseUndefinedOpcode(pIemCpu);
3868 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3869 return iemRaiseDeviceNotAvailable(pIemCpu);
3870 if (GCPtrEff & 15)
3871 {
3872 /** @todo CPU/VM detection possible! \#AC might not be signal for
3873 * all/any misalignment sizes, intel says its an implementation detail. */
3874 if ( (pCtx->cr0 & X86_CR0_AM)
3875 && pCtx->eflags.Bits.u1AC
3876 && pIemCpu->uCpl == 3)
3877 return iemRaiseAlignmentCheckException(pIemCpu);
3878 return iemRaiseGeneralProtectionFault0(pIemCpu);
3879 }
3880 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3881
3882 /*
3883 * Access the memory.
3884 */
3885 void *pvMem512;
3886 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W);
3887 if (rcStrict != VINF_SUCCESS)
3888 return rcStrict;
3889 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
3890
3891 /*
3892 * Store the registers.
3893 */
3894 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
3895 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
3896
3897 /* common for all formats */
3898 pDst->FCW = pCtx->fpu.FCW;
3899 pDst->FSW = pCtx->fpu.FSW;
3900 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
3901 pDst->FOP = pCtx->fpu.FOP;
3902 pDst->MXCSR = pCtx->fpu.MXCSR;
3903 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
3904 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
3905 {
3906 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
3907 * them for now... */
3908 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
3909 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
3910 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
3911 pDst->aRegs[i].au32[3] = 0;
3912 }
3913
3914 /* FPU IP, CS, DP and DS. */
3915 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
3916 * state information. :-/
3917 * Storing zeros now to prevent any potential leakage of host info. */
3918 pDst->FPUIP = 0;
3919 pDst->CS = 0;
3920 pDst->Rsrvd1 = 0;
3921 pDst->FPUDP = 0;
3922 pDst->DS = 0;
3923 pDst->Rsrvd2 = 0;
3924
3925 /* XMM registers. */
3926 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
3927 || pIemCpu->enmCpuMode != IEMMODE_64BIT
3928 || pIemCpu->uCpl != 0)
3929 {
3930 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
3931 for (uint32_t i = 0; i < cXmmRegs; i++)
3932 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
3933 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
3934 * right? */
3935 }
3936
3937 /*
3938 * Commit the memory.
3939 */
3940 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W);
3941 if (rcStrict != VINF_SUCCESS)
3942 return rcStrict;
3943
3944 iemRegAddToRip(pIemCpu, cbInstr);
3945 return VINF_SUCCESS;
3946}
3947
3948
3949/**
3950 * Implements 'FXRSTOR'.
3951 *
3952 * @param GCPtrEff The address of the image.
3953 * @param enmEffOpSize The operand size (only REX.W really matters).
3954 */
3955IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3956{
3957 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3958
3959 /*
3960 * Raise exceptions.
3961 */
3962 if (pCtx->cr0 & X86_CR0_EM)
3963 return iemRaiseUndefinedOpcode(pIemCpu);
3964 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3965 return iemRaiseDeviceNotAvailable(pIemCpu);
3966 if (GCPtrEff & 15)
3967 {
3968 /** @todo CPU/VM detection possible! \#AC might not be signal for
3969 * all/any misalignment sizes, intel says its an implementation detail. */
3970 if ( (pCtx->cr0 & X86_CR0_AM)
3971 && pCtx->eflags.Bits.u1AC
3972 && pIemCpu->uCpl == 3)
3973 return iemRaiseAlignmentCheckException(pIemCpu);
3974 return iemRaiseGeneralProtectionFault0(pIemCpu);
3975 }
3976 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3977
3978 /*
3979 * Access the memory.
3980 */
3981 void *pvMem512;
3982 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
3983 if (rcStrict != VINF_SUCCESS)
3984 return rcStrict;
3985 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
3986
3987 /*
3988 * Check the state for stuff which will GP(0).
3989 */
3990 uint32_t const fMXCSR = pSrc->MXCSR;
3991 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
3992 if (fMXCSR & ~fMXCSR_MASK)
3993 {
3994 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
3995 return iemRaiseGeneralProtectionFault0(pIemCpu);
3996 }
3997
3998 /*
3999 * Load the registers.
4000 */
4001 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4002 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4003
4004 /* common for all formats */
4005 pCtx->fpu.FCW = pSrc->FCW;
4006 pCtx->fpu.FSW = pSrc->FSW;
4007 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4008 pCtx->fpu.FOP = pSrc->FOP;
4009 pCtx->fpu.MXCSR = fMXCSR;
4010 /* (MXCSR_MASK is read-only) */
4011 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4012 {
4013 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4014 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4015 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4016 pCtx->fpu.aRegs[i].au32[3] = 0;
4017 }
4018
4019 /* FPU IP, CS, DP and DS. */
4020 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4021 {
4022 pCtx->fpu.FPUIP = pSrc->FPUIP;
4023 pCtx->fpu.CS = pSrc->CS;
4024 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4025 pCtx->fpu.FPUDP = pSrc->FPUDP;
4026 pCtx->fpu.DS = pSrc->DS;
4027 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4028 }
4029 else
4030 {
4031 pCtx->fpu.FPUIP = pSrc->FPUIP;
4032 pCtx->fpu.CS = pSrc->CS;
4033 pCtx->fpu.Rsrvd1 = 0;
4034 pCtx->fpu.FPUDP = pSrc->FPUDP;
4035 pCtx->fpu.DS = pSrc->DS;
4036 pCtx->fpu.Rsrvd2 = 0;
4037 }
4038
4039 /* XMM registers. */
4040 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4041 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4042 || pIemCpu->uCpl != 0)
4043 {
4044 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4045 for (uint32_t i = 0; i < cXmmRegs; i++)
4046 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4047 }
4048
4049 /*
4050 * Commit the memory.
4051 */
4052 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4053 if (rcStrict != VINF_SUCCESS)
4054 return rcStrict;
4055
4056 iemRegAddToRip(pIemCpu, cbInstr);
4057 return VINF_SUCCESS;
4058}
4059
4060/** @} */
4061
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette