VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 42641

Last change on this file since 42641 was 42641, checked in by vboxsync, 13 years ago

IEM: Implemented WRMSR. Fixed mixed up src/dst in XADD.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 150.5 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 42641 2012-08-06 23:17:02Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
42 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n"));
43 }
44 return VINF_SUCCESS;
45}
46
47
48#if 0
49/**
50 * Calculates the parity bit.
51 *
52 * @returns true if the bit is set, false if not.
53 * @param u8Result The least significant byte of the result.
54 */
55static bool iemHlpCalcParityFlag(uint8_t u8Result)
56{
57 /*
58 * Parity is set if the number of bits in the least significant byte of
59 * the result is even.
60 */
61 uint8_t cBits;
62 cBits = u8Result & 1; /* 0 */
63 u8Result >>= 1;
64 cBits += u8Result & 1;
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1; /* 4 */
71 u8Result >>= 1;
72 cBits += u8Result & 1;
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 return !(cBits & 1);
78}
79#endif /* not used */
80
81
82/**
83 * Updates the specified flags according to a 8-bit result.
84 *
85 * @param pIemCpu The.
86 * @param u8Result The result to set the flags according to.
87 * @param fToUpdate The flags to update.
88 * @param fUndefined The flags that are specified as undefined.
89 */
90static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
91{
92 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
93
94 uint32_t fEFlags = pCtx->eflags.u;
95 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
96 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
97 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
98}
99
100
101/**
102 * Loads a NULL data selector into a selector register, both the hidden and
103 * visible parts, in protected mode.
104 *
105 * @param pSReg Pointer to the segment register.
106 * @param uRpl The RPL.
107 */
108static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg, RTSEL uRpl)
109{
110 /** @todo Testcase: write a testcase checking what happends when loading a NULL
111 * data selector in protected mode. */
112 pSReg->Sel = uRpl;
113 pSReg->ValidSel = uRpl;
114 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
115 pSReg->u64Base = 0;
116 pSReg->u32Limit = 0;
117 pSReg->Attr.u = 0;
118}
119
120
121/**
122 * Helper used by iret.
123 *
124 * @param uCpl The new CPL.
125 * @param pSReg Pointer to the segment register.
126 */
127static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
128{
129#ifdef VBOX_WITH_RAW_MODE_NOT_R0
130 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
131 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
132#else
133 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
134#endif
135
136 if ( uCpl > pSReg->Attr.n.u2Dpl
137 && pSReg->Attr.n.u1DescType /* code or data, not system */
138 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
139 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
140 iemHlpLoadNullDataSelectorProt(pSReg, 0);
141}
142
143/** @} */
144
145/** @name C Implementations
146 * @{
147 */
148
149/**
150 * Implements a 16-bit popa.
151 */
152IEM_CIMPL_DEF_0(iemCImpl_popa_16)
153{
154 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
155 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
156 RTGCPTR GCPtrLast = GCPtrStart + 15;
157 VBOXSTRICTRC rcStrict;
158
159 /*
160 * The docs are a bit hard to comprehend here, but it looks like we wrap
161 * around in real mode as long as none of the individual "popa" crosses the
162 * end of the stack segment. In protected mode we check the whole access
163 * in one go. For efficiency, only do the word-by-word thing if we're in
164 * danger of wrapping around.
165 */
166 /** @todo do popa boundary / wrap-around checks. */
167 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
168 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
169 {
170 /* word-by-word */
171 RTUINT64U TmpRsp;
172 TmpRsp.u = pCtx->rsp;
173 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
174 if (rcStrict == VINF_SUCCESS)
175 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
176 if (rcStrict == VINF_SUCCESS)
177 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
178 if (rcStrict == VINF_SUCCESS)
179 {
180 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
181 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
182 }
183 if (rcStrict == VINF_SUCCESS)
184 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
185 if (rcStrict == VINF_SUCCESS)
186 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
187 if (rcStrict == VINF_SUCCESS)
188 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
189 if (rcStrict == VINF_SUCCESS)
190 {
191 pCtx->rsp = TmpRsp.u;
192 iemRegAddToRip(pIemCpu, cbInstr);
193 }
194 }
195 else
196 {
197 uint16_t const *pa16Mem = NULL;
198 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
199 if (rcStrict == VINF_SUCCESS)
200 {
201 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
202 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
203 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
204 /* skip sp */
205 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
206 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
207 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
208 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
209 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
210 if (rcStrict == VINF_SUCCESS)
211 {
212 iemRegAddToRsp(pCtx, 16);
213 iemRegAddToRip(pIemCpu, cbInstr);
214 }
215 }
216 }
217 return rcStrict;
218}
219
220
221/**
222 * Implements a 32-bit popa.
223 */
224IEM_CIMPL_DEF_0(iemCImpl_popa_32)
225{
226 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
227 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
228 RTGCPTR GCPtrLast = GCPtrStart + 31;
229 VBOXSTRICTRC rcStrict;
230
231 /*
232 * The docs are a bit hard to comprehend here, but it looks like we wrap
233 * around in real mode as long as none of the individual "popa" crosses the
234 * end of the stack segment. In protected mode we check the whole access
235 * in one go. For efficiency, only do the word-by-word thing if we're in
236 * danger of wrapping around.
237 */
238 /** @todo do popa boundary / wrap-around checks. */
239 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
240 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
241 {
242 /* word-by-word */
243 RTUINT64U TmpRsp;
244 TmpRsp.u = pCtx->rsp;
245 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
246 if (rcStrict == VINF_SUCCESS)
247 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
248 if (rcStrict == VINF_SUCCESS)
249 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
250 if (rcStrict == VINF_SUCCESS)
251 {
252 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
253 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
254 }
255 if (rcStrict == VINF_SUCCESS)
256 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
257 if (rcStrict == VINF_SUCCESS)
258 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
261 if (rcStrict == VINF_SUCCESS)
262 {
263#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
264 pCtx->rdi &= UINT32_MAX;
265 pCtx->rsi &= UINT32_MAX;
266 pCtx->rbp &= UINT32_MAX;
267 pCtx->rbx &= UINT32_MAX;
268 pCtx->rdx &= UINT32_MAX;
269 pCtx->rcx &= UINT32_MAX;
270 pCtx->rax &= UINT32_MAX;
271#endif
272 pCtx->rsp = TmpRsp.u;
273 iemRegAddToRip(pIemCpu, cbInstr);
274 }
275 }
276 else
277 {
278 uint32_t const *pa32Mem;
279 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
280 if (rcStrict == VINF_SUCCESS)
281 {
282 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
283 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
284 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
285 /* skip esp */
286 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
287 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
288 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
289 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
290 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
291 if (rcStrict == VINF_SUCCESS)
292 {
293 iemRegAddToRsp(pCtx, 32);
294 iemRegAddToRip(pIemCpu, cbInstr);
295 }
296 }
297 }
298 return rcStrict;
299}
300
301
302/**
303 * Implements a 16-bit pusha.
304 */
305IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
306{
307 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
308 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
309 RTGCPTR GCPtrBottom = GCPtrTop - 15;
310 VBOXSTRICTRC rcStrict;
311
312 /*
313 * The docs are a bit hard to comprehend here, but it looks like we wrap
314 * around in real mode as long as none of the individual "pushd" crosses the
315 * end of the stack segment. In protected mode we check the whole access
316 * in one go. For efficiency, only do the word-by-word thing if we're in
317 * danger of wrapping around.
318 */
319 /** @todo do pusha boundary / wrap-around checks. */
320 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
321 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
322 {
323 /* word-by-word */
324 RTUINT64U TmpRsp;
325 TmpRsp.u = pCtx->rsp;
326 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
329 if (rcStrict == VINF_SUCCESS)
330 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
331 if (rcStrict == VINF_SUCCESS)
332 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
333 if (rcStrict == VINF_SUCCESS)
334 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
335 if (rcStrict == VINF_SUCCESS)
336 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
337 if (rcStrict == VINF_SUCCESS)
338 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
339 if (rcStrict == VINF_SUCCESS)
340 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
341 if (rcStrict == VINF_SUCCESS)
342 {
343 pCtx->rsp = TmpRsp.u;
344 iemRegAddToRip(pIemCpu, cbInstr);
345 }
346 }
347 else
348 {
349 GCPtrBottom--;
350 uint16_t *pa16Mem = NULL;
351 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
352 if (rcStrict == VINF_SUCCESS)
353 {
354 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
355 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
356 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
357 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
358 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
359 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
360 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
361 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
362 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
363 if (rcStrict == VINF_SUCCESS)
364 {
365 iemRegSubFromRsp(pCtx, 16);
366 iemRegAddToRip(pIemCpu, cbInstr);
367 }
368 }
369 }
370 return rcStrict;
371}
372
373
374/**
375 * Implements a 32-bit pusha.
376 */
377IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
378{
379 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
380 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
381 RTGCPTR GCPtrBottom = GCPtrTop - 31;
382 VBOXSTRICTRC rcStrict;
383
384 /*
385 * The docs are a bit hard to comprehend here, but it looks like we wrap
386 * around in real mode as long as none of the individual "pusha" crosses the
387 * end of the stack segment. In protected mode we check the whole access
388 * in one go. For efficiency, only do the word-by-word thing if we're in
389 * danger of wrapping around.
390 */
391 /** @todo do pusha boundary / wrap-around checks. */
392 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
393 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
394 {
395 /* word-by-word */
396 RTUINT64U TmpRsp;
397 TmpRsp.u = pCtx->rsp;
398 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
399 if (rcStrict == VINF_SUCCESS)
400 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
401 if (rcStrict == VINF_SUCCESS)
402 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
403 if (rcStrict == VINF_SUCCESS)
404 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
405 if (rcStrict == VINF_SUCCESS)
406 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
407 if (rcStrict == VINF_SUCCESS)
408 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
409 if (rcStrict == VINF_SUCCESS)
410 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
411 if (rcStrict == VINF_SUCCESS)
412 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
413 if (rcStrict == VINF_SUCCESS)
414 {
415 pCtx->rsp = TmpRsp.u;
416 iemRegAddToRip(pIemCpu, cbInstr);
417 }
418 }
419 else
420 {
421 GCPtrBottom--;
422 uint32_t *pa32Mem;
423 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
424 if (rcStrict == VINF_SUCCESS)
425 {
426 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
427 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
428 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
429 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
430 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
431 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
432 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
433 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
434 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
435 if (rcStrict == VINF_SUCCESS)
436 {
437 iemRegSubFromRsp(pCtx, 32);
438 iemRegAddToRip(pIemCpu, cbInstr);
439 }
440 }
441 }
442 return rcStrict;
443}
444
445
446/**
447 * Implements pushf.
448 *
449 *
450 * @param enmEffOpSize The effective operand size.
451 */
452IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
453{
454 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
455
456 /*
457 * If we're in V8086 mode some care is required (which is why we're in
458 * doing this in a C implementation).
459 */
460 uint32_t fEfl = pCtx->eflags.u;
461 if ( (fEfl & X86_EFL_VM)
462 && X86_EFL_GET_IOPL(fEfl) != 3 )
463 {
464 Assert(pCtx->cr0 & X86_CR0_PE);
465 if ( enmEffOpSize != IEMMODE_16BIT
466 || !(pCtx->cr4 & X86_CR4_VME))
467 return iemRaiseGeneralProtectionFault0(pIemCpu);
468 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
469 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
470 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
471 }
472
473 /*
474 * Ok, clear RF and VM and push the flags.
475 */
476 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
477
478 VBOXSTRICTRC rcStrict;
479 switch (enmEffOpSize)
480 {
481 case IEMMODE_16BIT:
482 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
483 break;
484 case IEMMODE_32BIT:
485 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
486 break;
487 case IEMMODE_64BIT:
488 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
489 break;
490 IEM_NOT_REACHED_DEFAULT_CASE_RET();
491 }
492 if (rcStrict != VINF_SUCCESS)
493 return rcStrict;
494
495 iemRegAddToRip(pIemCpu, cbInstr);
496 return VINF_SUCCESS;
497}
498
499
500/**
501 * Implements popf.
502 *
503 * @param enmEffOpSize The effective operand size.
504 */
505IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
506{
507 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
508 uint32_t const fEflOld = pCtx->eflags.u;
509 VBOXSTRICTRC rcStrict;
510 uint32_t fEflNew;
511
512 /*
513 * V8086 is special as usual.
514 */
515 if (fEflOld & X86_EFL_VM)
516 {
517 /*
518 * Almost anything goes if IOPL is 3.
519 */
520 if (X86_EFL_GET_IOPL(fEflOld) == 3)
521 {
522 switch (enmEffOpSize)
523 {
524 case IEMMODE_16BIT:
525 {
526 uint16_t u16Value;
527 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
528 if (rcStrict != VINF_SUCCESS)
529 return rcStrict;
530 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
531 break;
532 }
533 case IEMMODE_32BIT:
534 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
535 if (rcStrict != VINF_SUCCESS)
536 return rcStrict;
537 break;
538 IEM_NOT_REACHED_DEFAULT_CASE_RET();
539 }
540
541 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
542 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
543 }
544 /*
545 * Interrupt flag virtualization with CR4.VME=1.
546 */
547 else if ( enmEffOpSize == IEMMODE_16BIT
548 && (pCtx->cr4 & X86_CR4_VME) )
549 {
550 uint16_t u16Value;
551 RTUINT64U TmpRsp;
552 TmpRsp.u = pCtx->rsp;
553 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
554 if (rcStrict != VINF_SUCCESS)
555 return rcStrict;
556
557 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
558 * or before? */
559 if ( ( (u16Value & X86_EFL_IF)
560 && (fEflOld & X86_EFL_VIP))
561 || (u16Value & X86_EFL_TF) )
562 return iemRaiseGeneralProtectionFault0(pIemCpu);
563
564 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
565 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
566 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
567 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
568
569 pCtx->rsp = TmpRsp.u;
570 }
571 else
572 return iemRaiseGeneralProtectionFault0(pIemCpu);
573
574 }
575 /*
576 * Not in V8086 mode.
577 */
578 else
579 {
580 /* Pop the flags. */
581 switch (enmEffOpSize)
582 {
583 case IEMMODE_16BIT:
584 {
585 uint16_t u16Value;
586 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
587 if (rcStrict != VINF_SUCCESS)
588 return rcStrict;
589 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
590 break;
591 }
592 case IEMMODE_32BIT:
593 case IEMMODE_64BIT:
594 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
595 if (rcStrict != VINF_SUCCESS)
596 return rcStrict;
597 break;
598 IEM_NOT_REACHED_DEFAULT_CASE_RET();
599 }
600
601 /* Merge them with the current flags. */
602 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
603 || pIemCpu->uCpl == 0)
604 {
605 fEflNew &= X86_EFL_POPF_BITS;
606 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
607 }
608 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
609 {
610 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
611 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
612 }
613 else
614 {
615 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
616 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
617 }
618 }
619
620 /*
621 * Commit the flags.
622 */
623 Assert(fEflNew & RT_BIT_32(1));
624 pCtx->eflags.u = fEflNew;
625 iemRegAddToRip(pIemCpu, cbInstr);
626
627 return VINF_SUCCESS;
628}
629
630
631/**
632 * Implements an indirect call.
633 *
634 * @param uNewPC The new program counter (RIP) value (loaded from the
635 * operand).
636 * @param enmEffOpSize The effective operand size.
637 */
638IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
639{
640 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
641 uint16_t uOldPC = pCtx->ip + cbInstr;
642 if (uNewPC > pCtx->cs.u32Limit)
643 return iemRaiseGeneralProtectionFault0(pIemCpu);
644
645 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
646 if (rcStrict != VINF_SUCCESS)
647 return rcStrict;
648
649 pCtx->rip = uNewPC;
650 return VINF_SUCCESS;
651
652}
653
654
655/**
656 * Implements a 16-bit relative call.
657 *
658 * @param offDisp The displacment offset.
659 */
660IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
661{
662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
663 uint16_t uOldPC = pCtx->ip + cbInstr;
664 uint16_t uNewPC = uOldPC + offDisp;
665 if (uNewPC > pCtx->cs.u32Limit)
666 return iemRaiseGeneralProtectionFault0(pIemCpu);
667
668 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
669 if (rcStrict != VINF_SUCCESS)
670 return rcStrict;
671
672 pCtx->rip = uNewPC;
673 return VINF_SUCCESS;
674}
675
676
677/**
678 * Implements a 32-bit indirect call.
679 *
680 * @param uNewPC The new program counter (RIP) value (loaded from the
681 * operand).
682 * @param enmEffOpSize The effective operand size.
683 */
684IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
685{
686 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
687 uint32_t uOldPC = pCtx->eip + cbInstr;
688 if (uNewPC > pCtx->cs.u32Limit)
689 return iemRaiseGeneralProtectionFault0(pIemCpu);
690
691 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
692 if (rcStrict != VINF_SUCCESS)
693 return rcStrict;
694
695 pCtx->rip = uNewPC;
696 return VINF_SUCCESS;
697
698}
699
700
701/**
702 * Implements a 32-bit relative call.
703 *
704 * @param offDisp The displacment offset.
705 */
706IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
707{
708 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
709 uint32_t uOldPC = pCtx->eip + cbInstr;
710 uint32_t uNewPC = uOldPC + offDisp;
711 if (uNewPC > pCtx->cs.u32Limit)
712 return iemRaiseGeneralProtectionFault0(pIemCpu);
713
714 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
715 if (rcStrict != VINF_SUCCESS)
716 return rcStrict;
717
718 pCtx->rip = uNewPC;
719 return VINF_SUCCESS;
720}
721
722
723/**
724 * Implements a 64-bit indirect call.
725 *
726 * @param uNewPC The new program counter (RIP) value (loaded from the
727 * operand).
728 * @param enmEffOpSize The effective operand size.
729 */
730IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
731{
732 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
733 uint64_t uOldPC = pCtx->rip + cbInstr;
734 if (!IEM_IS_CANONICAL(uNewPC))
735 return iemRaiseGeneralProtectionFault0(pIemCpu);
736
737 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
738 if (rcStrict != VINF_SUCCESS)
739 return rcStrict;
740
741 pCtx->rip = uNewPC;
742 return VINF_SUCCESS;
743
744}
745
746
747/**
748 * Implements a 64-bit relative call.
749 *
750 * @param offDisp The displacment offset.
751 */
752IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
753{
754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
755 uint64_t uOldPC = pCtx->rip + cbInstr;
756 uint64_t uNewPC = uOldPC + offDisp;
757 if (!IEM_IS_CANONICAL(uNewPC))
758 return iemRaiseNotCanonical(pIemCpu);
759
760 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
761 if (rcStrict != VINF_SUCCESS)
762 return rcStrict;
763
764 pCtx->rip = uNewPC;
765 return VINF_SUCCESS;
766}
767
768
769/**
770 * Implements far jumps and calls thru task segments (TSS).
771 *
772 * @param uSel The selector.
773 * @param enmBranch The kind of branching we're performing.
774 * @param enmEffOpSize The effective operand size.
775 * @param pDesc The descriptor corrsponding to @a uSel. The type is
776 * call gate.
777 */
778IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
779{
780 /* Call various functions to do the work. */
781 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
782}
783
784
785/**
786 * Implements far jumps and calls thru task gates.
787 *
788 * @param uSel The selector.
789 * @param enmBranch The kind of branching we're performing.
790 * @param enmEffOpSize The effective operand size.
791 * @param pDesc The descriptor corrsponding to @a uSel. The type is
792 * call gate.
793 */
794IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
795{
796 /* Call various functions to do the work. */
797 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
798}
799
800
801/**
802 * Implements far jumps and calls thru call gates.
803 *
804 * @param uSel The selector.
805 * @param enmBranch The kind of branching we're performing.
806 * @param enmEffOpSize The effective operand size.
807 * @param pDesc The descriptor corrsponding to @a uSel. The type is
808 * call gate.
809 */
810IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
811{
812 /* Call various functions to do the work. */
813 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
814}
815
816
817/**
818 * Implements far jumps and calls thru system selectors.
819 *
820 * @param uSel The selector.
821 * @param enmBranch The kind of branching we're performing.
822 * @param enmEffOpSize The effective operand size.
823 * @param pDesc The descriptor corrsponding to @a uSel.
824 */
825IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
826{
827 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
828 Assert((uSel & X86_SEL_MASK_OFF_RPL));
829
830 if (IEM_IS_LONG_MODE(pIemCpu))
831 switch (pDesc->Legacy.Gen.u4Type)
832 {
833 case AMD64_SEL_TYPE_SYS_CALL_GATE:
834 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
835
836 default:
837 case AMD64_SEL_TYPE_SYS_LDT:
838 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
839 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
840 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
841 case AMD64_SEL_TYPE_SYS_INT_GATE:
842 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
843 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
844
845 }
846
847 switch (pDesc->Legacy.Gen.u4Type)
848 {
849 case X86_SEL_TYPE_SYS_286_CALL_GATE:
850 case X86_SEL_TYPE_SYS_386_CALL_GATE:
851 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
852
853 case X86_SEL_TYPE_SYS_TASK_GATE:
854 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
855
856 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
857 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
858 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
859
860 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
861 Log(("branch %04x -> busy 286 TSS\n", uSel));
862 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
863
864 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
865 Log(("branch %04x -> busy 386 TSS\n", uSel));
866 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
867
868 default:
869 case X86_SEL_TYPE_SYS_LDT:
870 case X86_SEL_TYPE_SYS_286_INT_GATE:
871 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
872 case X86_SEL_TYPE_SYS_386_INT_GATE:
873 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
874 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
875 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
876 }
877}
878
879
880/**
881 * Implements far jumps.
882 *
883 * @param uSel The selector.
884 * @param offSeg The segment offset.
885 * @param enmEffOpSize The effective operand size.
886 */
887IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
888{
889 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
890 NOREF(cbInstr);
891 Assert(offSeg <= UINT32_MAX);
892
893 /*
894 * Real mode and V8086 mode are easy. The only snag seems to be that
895 * CS.limit doesn't change and the limit check is done against the current
896 * limit.
897 */
898 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
899 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
900 {
901 if (offSeg > pCtx->cs.u32Limit)
902 return iemRaiseGeneralProtectionFault0(pIemCpu);
903
904 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
905 pCtx->rip = offSeg;
906 else
907 pCtx->rip = offSeg & UINT16_MAX;
908 pCtx->cs.Sel = uSel;
909 pCtx->cs.ValidSel = uSel;
910 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
911 pCtx->cs.u64Base = (uint32_t)uSel << 4;
912 return VINF_SUCCESS;
913 }
914
915 /*
916 * Protected mode. Need to parse the specified descriptor...
917 */
918 if (!(uSel & X86_SEL_MASK_OFF_RPL))
919 {
920 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
921 return iemRaiseGeneralProtectionFault0(pIemCpu);
922 }
923
924 /* Fetch the descriptor. */
925 IEMSELDESC Desc;
926 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
927 if (rcStrict != VINF_SUCCESS)
928 return rcStrict;
929
930 /* Is it there? */
931 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
932 {
933 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
934 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
935 }
936
937 /*
938 * Deal with it according to its type. We do the standard code selectors
939 * here and dispatch the system selectors to worker functions.
940 */
941 if (!Desc.Legacy.Gen.u1DescType)
942 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
943
944 /* Only code segments. */
945 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
946 {
947 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
948 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
949 }
950
951 /* L vs D. */
952 if ( Desc.Legacy.Gen.u1Long
953 && Desc.Legacy.Gen.u1DefBig
954 && IEM_IS_LONG_MODE(pIemCpu))
955 {
956 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
957 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
958 }
959
960 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
961 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
962 {
963 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
964 {
965 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
966 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
967 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
968 }
969 }
970 else
971 {
972 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
973 {
974 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
975 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
976 }
977 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
978 {
979 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
980 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
981 }
982 }
983
984 /* Chop the high bits if 16-bit (Intel says so). */
985 if (enmEffOpSize == IEMMODE_16BIT)
986 offSeg &= UINT16_MAX;
987
988 /* Limit check. (Should alternatively check for non-canonical addresses
989 here, but that is ruled out by offSeg being 32-bit, right?) */
990 uint64_t u64Base;
991 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
992 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
993 u64Base = 0;
994 else
995 {
996 if (offSeg > cbLimit)
997 {
998 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
999 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1000 }
1001 u64Base = X86DESC_BASE(&Desc.Legacy);
1002 }
1003
1004 /*
1005 * Ok, everything checked out fine. Now set the accessed bit before
1006 * committing the result into CS, CSHID and RIP.
1007 */
1008 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1009 {
1010 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1011 if (rcStrict != VINF_SUCCESS)
1012 return rcStrict;
1013#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1014 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1015#endif
1016 }
1017
1018 /* commit */
1019 pCtx->rip = offSeg;
1020 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1021 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1022 pCtx->cs.ValidSel = pCtx->cs.Sel;
1023 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1024 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1025 pCtx->cs.u32Limit = cbLimit;
1026 pCtx->cs.u64Base = u64Base;
1027 /** @todo check if the hidden bits are loaded correctly for 64-bit
1028 * mode. */
1029 return VINF_SUCCESS;
1030}
1031
1032
1033/**
1034 * Implements far calls.
1035 *
1036 * This very similar to iemCImpl_FarJmp.
1037 *
1038 * @param uSel The selector.
1039 * @param offSeg The segment offset.
1040 * @param enmEffOpSize The operand size (in case we need it).
1041 */
1042IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1043{
1044 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1045 VBOXSTRICTRC rcStrict;
1046 uint64_t uNewRsp;
1047 RTPTRUNION uPtrRet;
1048
1049 /*
1050 * Real mode and V8086 mode are easy. The only snag seems to be that
1051 * CS.limit doesn't change and the limit check is done against the current
1052 * limit.
1053 */
1054 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1055 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1056 {
1057 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1058
1059 /* Check stack first - may #SS(0). */
1060 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1061 &uPtrRet.pv, &uNewRsp);
1062 if (rcStrict != VINF_SUCCESS)
1063 return rcStrict;
1064
1065 /* Check the target address range. */
1066 if (offSeg > UINT32_MAX)
1067 return iemRaiseGeneralProtectionFault0(pIemCpu);
1068
1069 /* Everything is fine, push the return address. */
1070 if (enmEffOpSize == IEMMODE_16BIT)
1071 {
1072 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1073 uPtrRet.pu16[1] = pCtx->cs.Sel;
1074 }
1075 else
1076 {
1077 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1078 uPtrRet.pu16[3] = pCtx->cs.Sel;
1079 }
1080 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1081 if (rcStrict != VINF_SUCCESS)
1082 return rcStrict;
1083
1084 /* Branch. */
1085 pCtx->rip = offSeg;
1086 pCtx->cs.Sel = uSel;
1087 pCtx->cs.ValidSel = uSel;
1088 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1089 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1090 return VINF_SUCCESS;
1091 }
1092
1093 /*
1094 * Protected mode. Need to parse the specified descriptor...
1095 */
1096 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1097 {
1098 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1099 return iemRaiseGeneralProtectionFault0(pIemCpu);
1100 }
1101
1102 /* Fetch the descriptor. */
1103 IEMSELDESC Desc;
1104 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1105 if (rcStrict != VINF_SUCCESS)
1106 return rcStrict;
1107
1108 /*
1109 * Deal with it according to its type. We do the standard code selectors
1110 * here and dispatch the system selectors to worker functions.
1111 */
1112 if (!Desc.Legacy.Gen.u1DescType)
1113 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1114
1115 /* Only code segments. */
1116 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1117 {
1118 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1119 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1120 }
1121
1122 /* L vs D. */
1123 if ( Desc.Legacy.Gen.u1Long
1124 && Desc.Legacy.Gen.u1DefBig
1125 && IEM_IS_LONG_MODE(pIemCpu))
1126 {
1127 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1128 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1129 }
1130
1131 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1132 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1133 {
1134 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1135 {
1136 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1137 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1138 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1139 }
1140 }
1141 else
1142 {
1143 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1144 {
1145 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1146 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1147 }
1148 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1149 {
1150 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1151 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1152 }
1153 }
1154
1155 /* Is it there? */
1156 if (!Desc.Legacy.Gen.u1Present)
1157 {
1158 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1159 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1160 }
1161
1162 /* Check stack first - may #SS(0). */
1163 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1164 * 16-bit code cause a two or four byte CS to be pushed? */
1165 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1166 enmEffOpSize == IEMMODE_64BIT ? 8+8
1167 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1168 &uPtrRet.pv, &uNewRsp);
1169 if (rcStrict != VINF_SUCCESS)
1170 return rcStrict;
1171
1172 /* Chop the high bits if 16-bit (Intel says so). */
1173 if (enmEffOpSize == IEMMODE_16BIT)
1174 offSeg &= UINT16_MAX;
1175
1176 /* Limit / canonical check. */
1177 uint64_t u64Base;
1178 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1179 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1180 {
1181 if (!IEM_IS_CANONICAL(offSeg))
1182 {
1183 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1184 return iemRaiseNotCanonical(pIemCpu);
1185 }
1186 u64Base = 0;
1187 }
1188 else
1189 {
1190 if (offSeg > cbLimit)
1191 {
1192 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1193 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1194 }
1195 u64Base = X86DESC_BASE(&Desc.Legacy);
1196 }
1197
1198 /*
1199 * Now set the accessed bit before
1200 * writing the return address to the stack and committing the result into
1201 * CS, CSHID and RIP.
1202 */
1203 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1204 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1205 {
1206 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1207 if (rcStrict != VINF_SUCCESS)
1208 return rcStrict;
1209#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1210 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1211#endif
1212 }
1213
1214 /* stack */
1215 if (enmEffOpSize == IEMMODE_16BIT)
1216 {
1217 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1218 uPtrRet.pu16[1] = pCtx->cs.Sel;
1219 }
1220 else if (enmEffOpSize == IEMMODE_32BIT)
1221 {
1222 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1223 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1224 }
1225 else
1226 {
1227 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1228 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1229 }
1230 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1231 if (rcStrict != VINF_SUCCESS)
1232 return rcStrict;
1233
1234 /* commit */
1235 pCtx->rip = offSeg;
1236 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1237 pCtx->cs.Sel |= pIemCpu->uCpl;
1238 pCtx->cs.ValidSel = pCtx->cs.Sel;
1239 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1240 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1241 pCtx->cs.u32Limit = cbLimit;
1242 pCtx->cs.u64Base = u64Base;
1243 /** @todo check if the hidden bits are loaded correctly for 64-bit
1244 * mode. */
1245 return VINF_SUCCESS;
1246}
1247
1248
1249/**
1250 * Implements retf.
1251 *
1252 * @param enmEffOpSize The effective operand size.
1253 * @param cbPop The amount of arguments to pop from the stack
1254 * (bytes).
1255 */
1256IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1257{
1258 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1259 VBOXSTRICTRC rcStrict;
1260 RTCPTRUNION uPtrFrame;
1261 uint64_t uNewRsp;
1262 uint64_t uNewRip;
1263 uint16_t uNewCs;
1264 NOREF(cbInstr);
1265
1266 /*
1267 * Read the stack values first.
1268 */
1269 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1270 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1271 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1272 if (rcStrict != VINF_SUCCESS)
1273 return rcStrict;
1274 if (enmEffOpSize == IEMMODE_16BIT)
1275 {
1276 uNewRip = uPtrFrame.pu16[0];
1277 uNewCs = uPtrFrame.pu16[1];
1278 }
1279 else if (enmEffOpSize == IEMMODE_32BIT)
1280 {
1281 uNewRip = uPtrFrame.pu32[0];
1282 uNewCs = uPtrFrame.pu16[2];
1283 }
1284 else
1285 {
1286 uNewRip = uPtrFrame.pu64[0];
1287 uNewCs = uPtrFrame.pu16[4];
1288 }
1289
1290 /*
1291 * Real mode and V8086 mode are easy.
1292 */
1293 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1294 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1295 {
1296 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1297 /** @todo check how this is supposed to work if sp=0xfffe. */
1298
1299 /* Check the limit of the new EIP. */
1300 /** @todo Intel pseudo code only does the limit check for 16-bit
1301 * operands, AMD does not make any distinction. What is right? */
1302 if (uNewRip > pCtx->cs.u32Limit)
1303 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1304
1305 /* commit the operation. */
1306 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1307 if (rcStrict != VINF_SUCCESS)
1308 return rcStrict;
1309 pCtx->rip = uNewRip;
1310 pCtx->cs.Sel = uNewCs;
1311 pCtx->cs.ValidSel = uNewCs;
1312 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1313 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1314 /** @todo do we load attribs and limit as well? */
1315 if (cbPop)
1316 iemRegAddToRsp(pCtx, cbPop);
1317 return VINF_SUCCESS;
1318 }
1319
1320 /*
1321 * Protected mode is complicated, of course.
1322 */
1323 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
1324 {
1325 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1326 return iemRaiseGeneralProtectionFault0(pIemCpu);
1327 }
1328
1329 /* Fetch the descriptor. */
1330 IEMSELDESC DescCs;
1331 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1332 if (rcStrict != VINF_SUCCESS)
1333 return rcStrict;
1334
1335 /* Can only return to a code selector. */
1336 if ( !DescCs.Legacy.Gen.u1DescType
1337 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1338 {
1339 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1340 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1341 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1342 }
1343
1344 /* L vs D. */
1345 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1346 && DescCs.Legacy.Gen.u1DefBig
1347 && IEM_IS_LONG_MODE(pIemCpu))
1348 {
1349 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1350 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1351 }
1352
1353 /* DPL/RPL/CPL checks. */
1354 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1355 {
1356 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1357 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1358 }
1359
1360 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1361 {
1362 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1363 {
1364 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1365 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1366 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1367 }
1368 }
1369 else
1370 {
1371 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1372 {
1373 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1374 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1375 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1376 }
1377 }
1378
1379 /* Is it there? */
1380 if (!DescCs.Legacy.Gen.u1Present)
1381 {
1382 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1383 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1384 }
1385
1386 /*
1387 * Return to outer privilege? (We'll typically have entered via a call gate.)
1388 */
1389 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1390 {
1391 /* Read the return pointer, it comes before the parameters. */
1392 RTCPTRUNION uPtrStack;
1393 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1394 if (rcStrict != VINF_SUCCESS)
1395 return rcStrict;
1396 uint16_t uNewOuterSs;
1397 uint64_t uNewOuterRsp;
1398 if (enmEffOpSize == IEMMODE_16BIT)
1399 {
1400 uNewOuterRsp = uPtrFrame.pu16[0];
1401 uNewOuterSs = uPtrFrame.pu16[1];
1402 }
1403 else if (enmEffOpSize == IEMMODE_32BIT)
1404 {
1405 uNewOuterRsp = uPtrFrame.pu32[0];
1406 uNewOuterSs = uPtrFrame.pu16[2];
1407 }
1408 else
1409 {
1410 uNewOuterRsp = uPtrFrame.pu64[0];
1411 uNewOuterSs = uPtrFrame.pu16[4];
1412 }
1413
1414 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1415 and read the selector. */
1416 IEMSELDESC DescSs;
1417 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
1418 {
1419 if ( !DescCs.Legacy.Gen.u1Long
1420 || (uNewOuterSs & X86_SEL_RPL) == 3)
1421 {
1422 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1423 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1424 return iemRaiseGeneralProtectionFault0(pIemCpu);
1425 }
1426 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1427 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1428 }
1429 else
1430 {
1431 /* Fetch the descriptor for the new stack segment. */
1432 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1433 if (rcStrict != VINF_SUCCESS)
1434 return rcStrict;
1435 }
1436
1437 /* Check that RPL of stack and code selectors match. */
1438 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1439 {
1440 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1441 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1442 }
1443
1444 /* Must be a writable data segment. */
1445 if ( !DescSs.Legacy.Gen.u1DescType
1446 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1447 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1448 {
1449 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1450 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1451 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1452 }
1453
1454 /* L vs D. (Not mentioned by intel.) */
1455 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1456 && DescSs.Legacy.Gen.u1DefBig
1457 && IEM_IS_LONG_MODE(pIemCpu))
1458 {
1459 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1460 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1461 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1462 }
1463
1464 /* DPL/RPL/CPL checks. */
1465 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1466 {
1467 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1468 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1469 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1470 }
1471
1472 /* Is it there? */
1473 if (!DescSs.Legacy.Gen.u1Present)
1474 {
1475 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1476 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1477 }
1478
1479 /* Calc SS limit.*/
1480 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
1481
1482 /* Is RIP canonical or within CS.limit? */
1483 uint64_t u64Base;
1484 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1485
1486 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1487 {
1488 if (!IEM_IS_CANONICAL(uNewRip))
1489 {
1490 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1491 return iemRaiseNotCanonical(pIemCpu);
1492 }
1493 u64Base = 0;
1494 }
1495 else
1496 {
1497 if (uNewRip > cbLimitCs)
1498 {
1499 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1500 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1501 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1502 }
1503 u64Base = X86DESC_BASE(&DescCs.Legacy);
1504 }
1505
1506 /*
1507 * Now set the accessed bit before
1508 * writing the return address to the stack and committing the result into
1509 * CS, CSHID and RIP.
1510 */
1511 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1512 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1513 {
1514 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1515 if (rcStrict != VINF_SUCCESS)
1516 return rcStrict;
1517#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1518 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1519#endif
1520 }
1521 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1522 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1523 {
1524 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1525 if (rcStrict != VINF_SUCCESS)
1526 return rcStrict;
1527#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1528 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1529#endif
1530 }
1531
1532 /* commit */
1533 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1534 if (rcStrict != VINF_SUCCESS)
1535 return rcStrict;
1536 if (enmEffOpSize == IEMMODE_16BIT)
1537 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1538 else
1539 pCtx->rip = uNewRip;
1540 pCtx->cs.Sel = uNewCs;
1541 pCtx->cs.ValidSel = uNewCs;
1542 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1543 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1544 pCtx->cs.u32Limit = cbLimitCs;
1545 pCtx->cs.u64Base = u64Base;
1546 pCtx->rsp = uNewRsp;
1547 pCtx->ss.Sel = uNewOuterSs;
1548 pCtx->ss.ValidSel = uNewOuterSs;
1549 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1550 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
1551 pCtx->ss.u32Limit = cbLimitSs;
1552 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1553 pCtx->ss.u64Base = 0;
1554 else
1555 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
1556
1557 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1558 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
1559 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
1560 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
1561 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
1562
1563 /** @todo check if the hidden bits are loaded correctly for 64-bit
1564 * mode. */
1565
1566 if (cbPop)
1567 iemRegAddToRsp(pCtx, cbPop);
1568
1569 /* Done! */
1570 }
1571 /*
1572 * Return to the same privilege level
1573 */
1574 else
1575 {
1576 /* Limit / canonical check. */
1577 uint64_t u64Base;
1578 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1579
1580 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1581 {
1582 if (!IEM_IS_CANONICAL(uNewRip))
1583 {
1584 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1585 return iemRaiseNotCanonical(pIemCpu);
1586 }
1587 u64Base = 0;
1588 }
1589 else
1590 {
1591 if (uNewRip > cbLimitCs)
1592 {
1593 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1594 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1595 }
1596 u64Base = X86DESC_BASE(&DescCs.Legacy);
1597 }
1598
1599 /*
1600 * Now set the accessed bit before
1601 * writing the return address to the stack and committing the result into
1602 * CS, CSHID and RIP.
1603 */
1604 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1605 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1606 {
1607 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1608 if (rcStrict != VINF_SUCCESS)
1609 return rcStrict;
1610#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1611 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1612#endif
1613 }
1614
1615 /* commit */
1616 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1617 if (rcStrict != VINF_SUCCESS)
1618 return rcStrict;
1619 if (enmEffOpSize == IEMMODE_16BIT)
1620 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1621 else
1622 pCtx->rip = uNewRip;
1623 pCtx->cs.Sel = uNewCs;
1624 pCtx->cs.ValidSel = uNewCs;
1625 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1626 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1627 pCtx->cs.u32Limit = cbLimitCs;
1628 pCtx->cs.u64Base = u64Base;
1629 /** @todo check if the hidden bits are loaded correctly for 64-bit
1630 * mode. */
1631 if (cbPop)
1632 iemRegAddToRsp(pCtx, cbPop);
1633 }
1634 return VINF_SUCCESS;
1635}
1636
1637
1638/**
1639 * Implements retn.
1640 *
1641 * We're doing this in C because of the \#GP that might be raised if the popped
1642 * program counter is out of bounds.
1643 *
1644 * @param enmEffOpSize The effective operand size.
1645 * @param cbPop The amount of arguments to pop from the stack
1646 * (bytes).
1647 */
1648IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1649{
1650 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1651 NOREF(cbInstr);
1652
1653 /* Fetch the RSP from the stack. */
1654 VBOXSTRICTRC rcStrict;
1655 RTUINT64U NewRip;
1656 RTUINT64U NewRsp;
1657 NewRsp.u = pCtx->rsp;
1658 switch (enmEffOpSize)
1659 {
1660 case IEMMODE_16BIT:
1661 NewRip.u = 0;
1662 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1663 break;
1664 case IEMMODE_32BIT:
1665 NewRip.u = 0;
1666 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1667 break;
1668 case IEMMODE_64BIT:
1669 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1670 break;
1671 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1672 }
1673 if (rcStrict != VINF_SUCCESS)
1674 return rcStrict;
1675
1676 /* Check the new RSP before loading it. */
1677 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1678 * of it. The canonical test is performed here and for call. */
1679 if (enmEffOpSize != IEMMODE_64BIT)
1680 {
1681 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
1682 {
1683 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
1684 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1685 }
1686 }
1687 else
1688 {
1689 if (!IEM_IS_CANONICAL(NewRip.u))
1690 {
1691 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1692 return iemRaiseNotCanonical(pIemCpu);
1693 }
1694 }
1695
1696 /* Commit it. */
1697 pCtx->rip = NewRip.u;
1698 pCtx->rsp = NewRsp.u;
1699 if (cbPop)
1700 iemRegAddToRsp(pCtx, cbPop);
1701
1702 return VINF_SUCCESS;
1703}
1704
1705
1706/**
1707 * Implements enter.
1708 *
1709 * We're doing this in C because the instruction is insane, even for the
1710 * u8NestingLevel=0 case dealing with the stack is tedious.
1711 *
1712 * @param enmEffOpSize The effective operand size.
1713 */
1714IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
1715{
1716 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1717
1718 /* Push RBP, saving the old value in TmpRbp. */
1719 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
1720 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
1721 RTUINT64U NewRbp;
1722 VBOXSTRICTRC rcStrict;
1723 if (enmEffOpSize == IEMMODE_64BIT)
1724 {
1725 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
1726 NewRbp = NewRsp;
1727 }
1728 else if (pCtx->ss.Attr.n.u1DefBig)
1729 {
1730 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
1731 NewRbp = NewRsp;
1732 }
1733 else
1734 {
1735 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
1736 NewRbp = TmpRbp;
1737 NewRbp.Words.w0 = NewRsp.Words.w0;
1738 }
1739 if (rcStrict != VINF_SUCCESS)
1740 return rcStrict;
1741
1742 /* Copy the parameters (aka nesting levels by Intel). */
1743 cParameters &= 0x1f;
1744 if (cParameters > 0)
1745 {
1746 switch (enmEffOpSize)
1747 {
1748 case IEMMODE_16BIT:
1749 if (pCtx->ss.Attr.n.u1DefBig)
1750 TmpRbp.DWords.dw0 -= 2;
1751 else
1752 TmpRbp.Words.w0 -= 2;
1753 do
1754 {
1755 uint16_t u16Tmp;
1756 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
1757 if (rcStrict != VINF_SUCCESS)
1758 break;
1759 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
1760 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1761 break;
1762
1763 case IEMMODE_32BIT:
1764 if (pCtx->ss.Attr.n.u1DefBig)
1765 TmpRbp.DWords.dw0 -= 4;
1766 else
1767 TmpRbp.Words.w0 -= 4;
1768 do
1769 {
1770 uint32_t u32Tmp;
1771 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
1772 if (rcStrict != VINF_SUCCESS)
1773 break;
1774 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
1775 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1776 break;
1777
1778 case IEMMODE_64BIT:
1779 TmpRbp.u -= 8;
1780 do
1781 {
1782 uint64_t u64Tmp;
1783 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
1784 if (rcStrict != VINF_SUCCESS)
1785 break;
1786 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
1787 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1788 break;
1789
1790 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1791 }
1792 if (rcStrict != VINF_SUCCESS)
1793 return VINF_SUCCESS;
1794
1795 /* Push the new RBP */
1796 if (enmEffOpSize == IEMMODE_64BIT)
1797 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
1798 else if (pCtx->ss.Attr.n.u1DefBig)
1799 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
1800 else
1801 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
1802 if (rcStrict != VINF_SUCCESS)
1803 return rcStrict;
1804
1805 }
1806
1807 /* Recalc RSP. */
1808 iemRegSubFromRspEx(&NewRsp, cbFrame, pCtx);
1809
1810 /** @todo Should probe write access at the new RSP according to AMD. */
1811
1812 /* Commit it. */
1813 pCtx->rbp = NewRbp.u;
1814 pCtx->rsp = NewRsp.u;
1815 iemRegAddToRip(pIemCpu, cbInstr);
1816
1817 return VINF_SUCCESS;
1818}
1819
1820
1821
1822/**
1823 * Implements leave.
1824 *
1825 * We're doing this in C because messing with the stack registers is annoying
1826 * since they depends on SS attributes.
1827 *
1828 * @param enmEffOpSize The effective operand size.
1829 */
1830IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1831{
1832 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1833
1834 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1835 RTUINT64U NewRsp;
1836 if (pCtx->ss.Attr.n.u1Long)
1837 NewRsp.u = pCtx->rbp;
1838 else if (pCtx->ss.Attr.n.u1DefBig)
1839 NewRsp.u = pCtx->ebp;
1840 else
1841 {
1842 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1843 NewRsp.u = pCtx->rsp;
1844 NewRsp.Words.w0 = pCtx->bp;
1845 }
1846
1847 /* Pop RBP according to the operand size. */
1848 VBOXSTRICTRC rcStrict;
1849 RTUINT64U NewRbp;
1850 switch (enmEffOpSize)
1851 {
1852 case IEMMODE_16BIT:
1853 NewRbp.u = pCtx->rbp;
1854 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1855 break;
1856 case IEMMODE_32BIT:
1857 NewRbp.u = 0;
1858 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1859 break;
1860 case IEMMODE_64BIT:
1861 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1862 break;
1863 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1864 }
1865 if (rcStrict != VINF_SUCCESS)
1866 return rcStrict;
1867
1868
1869 /* Commit it. */
1870 pCtx->rbp = NewRbp.u;
1871 pCtx->rsp = NewRsp.u;
1872 iemRegAddToRip(pIemCpu, cbInstr);
1873
1874 return VINF_SUCCESS;
1875}
1876
1877
1878/**
1879 * Implements int3 and int XX.
1880 *
1881 * @param u8Int The interrupt vector number.
1882 * @param fIsBpInstr Is it the breakpoint instruction.
1883 */
1884IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1885{
1886 Assert(pIemCpu->cXcptRecursions == 0);
1887 return iemRaiseXcptOrInt(pIemCpu,
1888 cbInstr,
1889 u8Int,
1890 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1891 0,
1892 0);
1893}
1894
1895
1896/**
1897 * Implements iret for real mode and V8086 mode.
1898 *
1899 * @param enmEffOpSize The effective operand size.
1900 */
1901IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1902{
1903 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1904 NOREF(cbInstr);
1905
1906 /*
1907 * iret throws an exception if VME isn't enabled.
1908 */
1909 if ( pCtx->eflags.Bits.u1VM
1910 && !(pCtx->cr4 & X86_CR4_VME))
1911 return iemRaiseGeneralProtectionFault0(pIemCpu);
1912
1913 /*
1914 * Do the stack bits, but don't commit RSP before everything checks
1915 * out right.
1916 */
1917 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1918 VBOXSTRICTRC rcStrict;
1919 RTCPTRUNION uFrame;
1920 uint16_t uNewCs;
1921 uint32_t uNewEip;
1922 uint32_t uNewFlags;
1923 uint64_t uNewRsp;
1924 if (enmEffOpSize == IEMMODE_32BIT)
1925 {
1926 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1927 if (rcStrict != VINF_SUCCESS)
1928 return rcStrict;
1929 uNewEip = uFrame.pu32[0];
1930 uNewCs = (uint16_t)uFrame.pu32[1];
1931 uNewFlags = uFrame.pu32[2];
1932 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1933 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1934 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1935 | X86_EFL_ID;
1936 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1937 }
1938 else
1939 {
1940 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1941 if (rcStrict != VINF_SUCCESS)
1942 return rcStrict;
1943 uNewEip = uFrame.pu16[0];
1944 uNewCs = uFrame.pu16[1];
1945 uNewFlags = uFrame.pu16[2];
1946 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1947 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1948 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1949 /** @todo The intel pseudo code does not indicate what happens to
1950 * reserved flags. We just ignore them. */
1951 }
1952 /** @todo Check how this is supposed to work if sp=0xfffe. */
1953
1954 /*
1955 * Check the limit of the new EIP.
1956 */
1957 /** @todo Only the AMD pseudo code check the limit here, what's
1958 * right? */
1959 if (uNewEip > pCtx->cs.u32Limit)
1960 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1961
1962 /*
1963 * V8086 checks and flag adjustments
1964 */
1965 if (pCtx->eflags.Bits.u1VM)
1966 {
1967 if (pCtx->eflags.Bits.u2IOPL == 3)
1968 {
1969 /* Preserve IOPL and clear RF. */
1970 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1971 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1972 }
1973 else if ( enmEffOpSize == IEMMODE_16BIT
1974 && ( !(uNewFlags & X86_EFL_IF)
1975 || !pCtx->eflags.Bits.u1VIP )
1976 && !(uNewFlags & X86_EFL_TF) )
1977 {
1978 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1979 uNewFlags &= ~X86_EFL_VIF;
1980 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1981 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1982 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1983 }
1984 else
1985 return iemRaiseGeneralProtectionFault0(pIemCpu);
1986 }
1987
1988 /*
1989 * Commit the operation.
1990 */
1991 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1992 if (rcStrict != VINF_SUCCESS)
1993 return rcStrict;
1994 pCtx->rip = uNewEip;
1995 pCtx->cs.Sel = uNewCs;
1996 pCtx->cs.ValidSel = uNewCs;
1997 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1998 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1999 /** @todo do we load attribs and limit as well? */
2000 Assert(uNewFlags & X86_EFL_1);
2001 pCtx->eflags.u = uNewFlags;
2002
2003 return VINF_SUCCESS;
2004}
2005
2006
2007/**
2008 * Implements iret for protected mode returning to V8086 mode.
2009 *
2010 * @param enmEffOpSize The effective operand size.
2011 * @param uNewEip The new EIP.
2012 * @param uNewCs The new CS.
2013 * @param uNewFlags The new EFLAGS.
2014 * @param uNewRsp The RSP after the initial IRET frame.
2015 */
2016IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, IEMMODE, enmEffOpSize, uint32_t, uNewEip, uint16_t, uNewCs,
2017 uint32_t, uNewFlags, uint64_t, uNewRsp)
2018{
2019 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2020}
2021
2022
2023/**
2024 * Implements iret for protected mode returning via a nested task.
2025 *
2026 * @param enmEffOpSize The effective operand size.
2027 */
2028IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2029{
2030 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2031}
2032
2033
2034/**
2035 * Implements iret for protected mode
2036 *
2037 * @param enmEffOpSize The effective operand size.
2038 */
2039IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
2040{
2041 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2042 NOREF(cbInstr);
2043
2044 /*
2045 * Nested task return.
2046 */
2047 if (pCtx->eflags.Bits.u1NT)
2048 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
2049
2050 /*
2051 * Normal return.
2052 *
2053 * Do the stack bits, but don't commit RSP before everything checks
2054 * out right.
2055 */
2056 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2057 VBOXSTRICTRC rcStrict;
2058 RTCPTRUNION uFrame;
2059 uint16_t uNewCs;
2060 uint32_t uNewEip;
2061 uint32_t uNewFlags;
2062 uint64_t uNewRsp;
2063 if (enmEffOpSize == IEMMODE_32BIT)
2064 {
2065 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2066 if (rcStrict != VINF_SUCCESS)
2067 return rcStrict;
2068 uNewEip = uFrame.pu32[0];
2069 uNewCs = (uint16_t)uFrame.pu32[1];
2070 uNewFlags = uFrame.pu32[2];
2071 }
2072 else
2073 {
2074 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2075 if (rcStrict != VINF_SUCCESS)
2076 return rcStrict;
2077 uNewEip = uFrame.pu16[0];
2078 uNewCs = uFrame.pu16[1];
2079 uNewFlags = uFrame.pu16[2];
2080 }
2081 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2082 if (rcStrict != VINF_SUCCESS)
2083 return rcStrict;
2084
2085 /*
2086 * We're hopefully not returning to V8086 mode...
2087 */
2088 if ( (uNewFlags & X86_EFL_VM)
2089 && pIemCpu->uCpl == 0)
2090 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, enmEffOpSize, uNewEip, uNewCs, uNewFlags, uNewRsp);
2091
2092 /*
2093 * Protected mode.
2094 */
2095 /* Read the CS descriptor. */
2096 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2097 {
2098 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
2099 return iemRaiseGeneralProtectionFault0(pIemCpu);
2100 }
2101
2102 IEMSELDESC DescCS;
2103 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
2104 if (rcStrict != VINF_SUCCESS)
2105 {
2106 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
2107 return rcStrict;
2108 }
2109
2110 /* Must be a code descriptor. */
2111 if (!DescCS.Legacy.Gen.u1DescType)
2112 {
2113 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2114 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2115 }
2116 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2117 {
2118 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2119 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2120 }
2121
2122 /* Privilege checks. */
2123 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2124 {
2125 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
2126 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2127 }
2128 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2129 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2130 {
2131 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
2132 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2133 }
2134
2135 /* Present? */
2136 if (!DescCS.Legacy.Gen.u1Present)
2137 {
2138 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2139 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2140 }
2141
2142 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2143
2144 /*
2145 * Return to outer level?
2146 */
2147 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2148 {
2149 uint16_t uNewSS;
2150 uint32_t uNewESP;
2151 if (enmEffOpSize == IEMMODE_32BIT)
2152 {
2153 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2154 if (rcStrict != VINF_SUCCESS)
2155 return rcStrict;
2156 uNewESP = uFrame.pu32[0];
2157 uNewSS = (uint16_t)uFrame.pu32[1];
2158 }
2159 else
2160 {
2161 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2162 if (rcStrict != VINF_SUCCESS)
2163 return rcStrict;
2164 uNewESP = uFrame.pu16[0];
2165 uNewSS = uFrame.pu16[1];
2166 }
2167 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2168 if (rcStrict != VINF_SUCCESS)
2169 return rcStrict;
2170
2171 /* Read the SS descriptor. */
2172 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2173 {
2174 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2175 return iemRaiseGeneralProtectionFault0(pIemCpu);
2176 }
2177
2178 IEMSELDESC DescSS;
2179 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2180 if (rcStrict != VINF_SUCCESS)
2181 {
2182 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2183 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2184 return rcStrict;
2185 }
2186
2187 /* Privilege checks. */
2188 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2189 {
2190 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2191 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2192 }
2193 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2194 {
2195 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2196 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2197 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2198 }
2199
2200 /* Must be a writeable data segment descriptor. */
2201 if (!DescSS.Legacy.Gen.u1DescType)
2202 {
2203 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2204 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2205 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2206 }
2207 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2208 {
2209 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2210 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2211 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2212 }
2213
2214 /* Present? */
2215 if (!DescSS.Legacy.Gen.u1Present)
2216 {
2217 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2218 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2219 }
2220
2221 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2222
2223 /* Check EIP. */
2224 if (uNewEip > cbLimitCS)
2225 {
2226 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2227 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2228 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2229 }
2230
2231 /*
2232 * Commit the changes, marking CS and SS accessed first since
2233 * that may fail.
2234 */
2235 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2236 {
2237 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2238 if (rcStrict != VINF_SUCCESS)
2239 return rcStrict;
2240 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2241 }
2242 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2243 {
2244 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2245 if (rcStrict != VINF_SUCCESS)
2246 return rcStrict;
2247 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2248 }
2249
2250 pCtx->rip = uNewEip;
2251 pCtx->cs.Sel = uNewCs;
2252 pCtx->cs.ValidSel = uNewCs;
2253 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2254 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2255 pCtx->cs.u32Limit = cbLimitCS;
2256 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2257 pCtx->rsp = uNewESP;
2258 pCtx->ss.Sel = uNewSS;
2259 pCtx->ss.ValidSel = uNewSS;
2260 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2261 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2262 pCtx->ss.u32Limit = cbLimitSs;
2263 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2264
2265 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2266 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2267 if (enmEffOpSize != IEMMODE_16BIT)
2268 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2269 if (pIemCpu->uCpl == 0)
2270 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2271 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2272 fEFlagsMask |= X86_EFL_IF;
2273 pCtx->eflags.u &= ~fEFlagsMask;
2274 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2275
2276 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2277 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2278 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2279 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2280 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2281
2282 /* Done! */
2283
2284 }
2285 /*
2286 * Return to the same level.
2287 */
2288 else
2289 {
2290 /* Check EIP. */
2291 if (uNewEip > cbLimitCS)
2292 {
2293 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2294 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2295 }
2296
2297 /*
2298 * Commit the changes, marking CS first since it may fail.
2299 */
2300 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2301 {
2302 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2303 if (rcStrict != VINF_SUCCESS)
2304 return rcStrict;
2305 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2306 }
2307
2308 pCtx->rip = uNewEip;
2309 pCtx->cs.Sel = uNewCs;
2310 pCtx->cs.ValidSel = uNewCs;
2311 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2312 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2313 pCtx->cs.u32Limit = cbLimitCS;
2314 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2315 pCtx->rsp = uNewRsp;
2316
2317 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2318 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2319 if (enmEffOpSize != IEMMODE_16BIT)
2320 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2321 if (pIemCpu->uCpl == 0)
2322 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2323 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2324 fEFlagsMask |= X86_EFL_IF;
2325 pCtx->eflags.u &= ~fEFlagsMask;
2326 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2327 /* Done! */
2328 }
2329 return VINF_SUCCESS;
2330}
2331
2332
2333/**
2334 * Implements iret for long mode
2335 *
2336 * @param enmEffOpSize The effective operand size.
2337 */
2338IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2339{
2340 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2341 //VBOXSTRICTRC rcStrict;
2342 //uint64_t uNewRsp;
2343
2344 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2345 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2346}
2347
2348
2349/**
2350 * Implements iret.
2351 *
2352 * @param enmEffOpSize The effective operand size.
2353 */
2354IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2355{
2356 /*
2357 * Call a mode specific worker.
2358 */
2359 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2360 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2361 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2362 if (IEM_IS_LONG_MODE(pIemCpu))
2363 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2364
2365 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2366}
2367
2368
2369/**
2370 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2371 *
2372 * @param iSegReg The segment register number (valid).
2373 * @param uSel The new selector value.
2374 */
2375IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2376{
2377 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2378 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2379 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2380
2381 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2382
2383 /*
2384 * Real mode and V8086 mode are easy.
2385 */
2386 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2387 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2388 {
2389 *pSel = uSel;
2390 pHid->u64Base = (uint32_t)uSel << 4;
2391 pHid->ValidSel = uSel;
2392 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2393#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2394 /** @todo Does the CPU actually load limits and attributes in the
2395 * real/V8086 mode segment load case? It doesn't for CS in far
2396 * jumps... Affects unreal mode. */
2397 pHid->u32Limit = 0xffff;
2398 pHid->Attr.u = 0;
2399 pHid->Attr.n.u1Present = 1;
2400 pHid->Attr.n.u1DescType = 1;
2401 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2402 ? X86_SEL_TYPE_RW
2403 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2404#endif
2405 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2406 iemRegAddToRip(pIemCpu, cbInstr);
2407 return VINF_SUCCESS;
2408 }
2409
2410 /*
2411 * Protected mode.
2412 *
2413 * Check if it's a null segment selector value first, that's OK for DS, ES,
2414 * FS and GS. If not null, then we have to load and parse the descriptor.
2415 */
2416 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2417 {
2418 if (iSegReg == X86_SREG_SS)
2419 {
2420 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2421 || pIemCpu->uCpl != 0
2422 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2423 {
2424 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2425 return iemRaiseGeneralProtectionFault0(pIemCpu);
2426 }
2427
2428 /* In 64-bit kernel mode, the stack can be 0 because of the way
2429 interrupts are dispatched when in kernel ctx. Just load the
2430 selector value into the register and leave the hidden bits
2431 as is. */
2432 *pSel = uSel;
2433 pHid->ValidSel = uSel;
2434 iemRegAddToRip(pIemCpu, cbInstr);
2435 return VINF_SUCCESS;
2436 }
2437
2438 *pSel = uSel; /* Not RPL, remember :-) */
2439 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2440 && iSegReg != X86_SREG_FS
2441 && iSegReg != X86_SREG_GS)
2442 {
2443 /** @todo figure out what this actually does, it works. Needs
2444 * testcase! */
2445 pHid->Attr.u = 0;
2446 pHid->Attr.n.u1Present = 1;
2447 pHid->Attr.n.u1Long = 1;
2448 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2449 pHid->Attr.n.u2Dpl = 3;
2450 pHid->u32Limit = 0;
2451 pHid->u64Base = 0;
2452 pHid->ValidSel = uSel;
2453 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2454 }
2455 else
2456 iemHlpLoadNullDataSelectorProt(pHid, uSel);
2457 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2458 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2459
2460 iemRegAddToRip(pIemCpu, cbInstr);
2461 return VINF_SUCCESS;
2462 }
2463
2464 /* Fetch the descriptor. */
2465 IEMSELDESC Desc;
2466 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2467 if (rcStrict != VINF_SUCCESS)
2468 return rcStrict;
2469
2470 /* Check GPs first. */
2471 if (!Desc.Legacy.Gen.u1DescType)
2472 {
2473 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2474 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2475 }
2476 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2477 {
2478 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2479 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2480 {
2481 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2482 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2483 }
2484 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2485 {
2486 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2487 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2488 }
2489 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2490 {
2491 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2492 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2493 }
2494 }
2495 else
2496 {
2497 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2498 {
2499 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2500 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2501 }
2502 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2503 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2504 {
2505#if 0 /* this is what intel says. */
2506 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2507 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2508 {
2509 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2510 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2511 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2512 }
2513#else /* this is what makes more sense. */
2514 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2515 {
2516 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2517 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2518 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2519 }
2520 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2521 {
2522 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2523 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2524 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2525 }
2526#endif
2527 }
2528 }
2529
2530 /* Is it there? */
2531 if (!Desc.Legacy.Gen.u1Present)
2532 {
2533 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2534 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2535 }
2536
2537 /* The base and limit. */
2538 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2539 uint64_t u64Base;
2540 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2541 && iSegReg < X86_SREG_FS)
2542 u64Base = 0;
2543 else
2544 u64Base = X86DESC_BASE(&Desc.Legacy);
2545
2546 /*
2547 * Ok, everything checked out fine. Now set the accessed bit before
2548 * committing the result into the registers.
2549 */
2550 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2551 {
2552 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2553 if (rcStrict != VINF_SUCCESS)
2554 return rcStrict;
2555 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2556 }
2557
2558 /* commit */
2559 *pSel = uSel;
2560 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2561 pHid->u32Limit = cbLimit;
2562 pHid->u64Base = u64Base;
2563 pHid->ValidSel = uSel;
2564 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2565
2566 /** @todo check if the hidden bits are loaded correctly for 64-bit
2567 * mode. */
2568 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2569
2570 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2571 iemRegAddToRip(pIemCpu, cbInstr);
2572 return VINF_SUCCESS;
2573}
2574
2575
2576/**
2577 * Implements 'mov SReg, r/m'.
2578 *
2579 * @param iSegReg The segment register number (valid).
2580 * @param uSel The new selector value.
2581 */
2582IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2583{
2584 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2585 if (rcStrict == VINF_SUCCESS)
2586 {
2587 if (iSegReg == X86_SREG_SS)
2588 {
2589 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2590 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2591 }
2592 }
2593 return rcStrict;
2594}
2595
2596
2597/**
2598 * Implements 'pop SReg'.
2599 *
2600 * @param iSegReg The segment register number (valid).
2601 * @param enmEffOpSize The efficient operand size (valid).
2602 */
2603IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2604{
2605 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2606 VBOXSTRICTRC rcStrict;
2607
2608 /*
2609 * Read the selector off the stack and join paths with mov ss, reg.
2610 */
2611 RTUINT64U TmpRsp;
2612 TmpRsp.u = pCtx->rsp;
2613 switch (enmEffOpSize)
2614 {
2615 case IEMMODE_16BIT:
2616 {
2617 uint16_t uSel;
2618 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2619 if (rcStrict == VINF_SUCCESS)
2620 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2621 break;
2622 }
2623
2624 case IEMMODE_32BIT:
2625 {
2626 uint32_t u32Value;
2627 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2628 if (rcStrict == VINF_SUCCESS)
2629 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2630 break;
2631 }
2632
2633 case IEMMODE_64BIT:
2634 {
2635 uint64_t u64Value;
2636 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2637 if (rcStrict == VINF_SUCCESS)
2638 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2639 break;
2640 }
2641 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2642 }
2643
2644 /*
2645 * Commit the stack on success.
2646 */
2647 if (rcStrict == VINF_SUCCESS)
2648 {
2649 pCtx->rsp = TmpRsp.u;
2650 if (iSegReg == X86_SREG_SS)
2651 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2652 }
2653 return rcStrict;
2654}
2655
2656
2657/**
2658 * Implements lgs, lfs, les, lds & lss.
2659 */
2660IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2661 uint16_t, uSel,
2662 uint64_t, offSeg,
2663 uint8_t, iSegReg,
2664 uint8_t, iGReg,
2665 IEMMODE, enmEffOpSize)
2666{
2667 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2668 VBOXSTRICTRC rcStrict;
2669
2670 /*
2671 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2672 */
2673 /** @todo verify and test that mov, pop and lXs works the segment
2674 * register loading in the exact same way. */
2675 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2676 if (rcStrict == VINF_SUCCESS)
2677 {
2678 switch (enmEffOpSize)
2679 {
2680 case IEMMODE_16BIT:
2681 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2682 break;
2683 case IEMMODE_32BIT:
2684 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2685 break;
2686 case IEMMODE_64BIT:
2687 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2688 break;
2689 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2690 }
2691 }
2692
2693 return rcStrict;
2694}
2695
2696
2697/**
2698 * Implements lgdt.
2699 *
2700 * @param iEffSeg The segment of the new ldtr contents
2701 * @param GCPtrEffSrc The address of the new ldtr contents.
2702 * @param enmEffOpSize The effective operand size.
2703 */
2704IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2705{
2706 if (pIemCpu->uCpl != 0)
2707 return iemRaiseGeneralProtectionFault0(pIemCpu);
2708 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2709
2710 /*
2711 * Fetch the limit and base address.
2712 */
2713 uint16_t cbLimit;
2714 RTGCPTR GCPtrBase;
2715 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2716 if (rcStrict == VINF_SUCCESS)
2717 {
2718 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2719 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2720 else
2721 {
2722 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2723 pCtx->gdtr.cbGdt = cbLimit;
2724 pCtx->gdtr.pGdt = GCPtrBase;
2725 }
2726 if (rcStrict == VINF_SUCCESS)
2727 iemRegAddToRip(pIemCpu, cbInstr);
2728 }
2729 return rcStrict;
2730}
2731
2732
2733/**
2734 * Implements sgdt.
2735 *
2736 * @param iEffSeg The segment where to store the gdtr content.
2737 * @param GCPtrEffDst The address where to store the gdtr content.
2738 * @param enmEffOpSize The effective operand size.
2739 */
2740IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
2741{
2742 /*
2743 * Join paths with sidt.
2744 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
2745 * you really must know.
2746 */
2747 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2748 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
2749 if (rcStrict == VINF_SUCCESS)
2750 iemRegAddToRip(pIemCpu, cbInstr);
2751 return rcStrict;
2752}
2753
2754
2755/**
2756 * Implements lidt.
2757 *
2758 * @param iEffSeg The segment of the new ldtr contents
2759 * @param GCPtrEffSrc The address of the new ldtr contents.
2760 * @param enmEffOpSize The effective operand size.
2761 */
2762IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2763{
2764 if (pIemCpu->uCpl != 0)
2765 return iemRaiseGeneralProtectionFault0(pIemCpu);
2766 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2767
2768 /*
2769 * Fetch the limit and base address.
2770 */
2771 uint16_t cbLimit;
2772 RTGCPTR GCPtrBase;
2773 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2774 if (rcStrict == VINF_SUCCESS)
2775 {
2776 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2777 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2778 else
2779 {
2780 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2781 pCtx->idtr.cbIdt = cbLimit;
2782 pCtx->idtr.pIdt = GCPtrBase;
2783 }
2784 iemRegAddToRip(pIemCpu, cbInstr);
2785 }
2786 return rcStrict;
2787}
2788
2789
2790/**
2791 * Implements sidt.
2792 *
2793 * @param iEffSeg The segment where to store the idtr content.
2794 * @param GCPtrEffDst The address where to store the idtr content.
2795 * @param enmEffOpSize The effective operand size.
2796 */
2797IEM_CIMPL_DEF_3(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
2798{
2799 /*
2800 * Join paths with sgdt.
2801 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
2802 * you really must know.
2803 */
2804 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2805 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
2806 if (rcStrict == VINF_SUCCESS)
2807 iemRegAddToRip(pIemCpu, cbInstr);
2808 return rcStrict;
2809}
2810
2811
2812/**
2813 * Implements lldt.
2814 *
2815 * @param uNewLdt The new LDT selector value.
2816 */
2817IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2818{
2819 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2820
2821 /*
2822 * Check preconditions.
2823 */
2824 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2825 {
2826 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2827 return iemRaiseUndefinedOpcode(pIemCpu);
2828 }
2829 if (pIemCpu->uCpl != 0)
2830 {
2831 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2832 return iemRaiseGeneralProtectionFault0(pIemCpu);
2833 }
2834 if (uNewLdt & X86_SEL_LDT)
2835 {
2836 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2837 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2838 }
2839
2840 /*
2841 * Now, loading a NULL selector is easy.
2842 */
2843 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2844 {
2845 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2846 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2847 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
2848 else
2849 pCtx->ldtr.Sel = uNewLdt;
2850 pCtx->ldtr.ValidSel = uNewLdt;
2851 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2852 if (IEM_IS_GUEST_CPU_AMD(pIemCpu) && !IEM_VERIFICATION_ENABLED(pIemCpu))
2853 pCtx->ldtr.Attr.u = 0;
2854 else
2855 {
2856 pCtx->ldtr.u64Base = 0;
2857 pCtx->ldtr.u32Limit = 0;
2858 }
2859
2860 iemRegAddToRip(pIemCpu, cbInstr);
2861 return VINF_SUCCESS;
2862 }
2863
2864 /*
2865 * Read the descriptor.
2866 */
2867 IEMSELDESC Desc;
2868 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2869 if (rcStrict != VINF_SUCCESS)
2870 return rcStrict;
2871
2872 /* Check GPs first. */
2873 if (Desc.Legacy.Gen.u1DescType)
2874 {
2875 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2876 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2877 }
2878 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2879 {
2880 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2881 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2882 }
2883 uint64_t u64Base;
2884 if (!IEM_IS_LONG_MODE(pIemCpu))
2885 u64Base = X86DESC_BASE(&Desc.Legacy);
2886 else
2887 {
2888 if (Desc.Long.Gen.u5Zeros)
2889 {
2890 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2891 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2892 }
2893
2894 u64Base = X86DESC64_BASE(&Desc.Long);
2895 if (!IEM_IS_CANONICAL(u64Base))
2896 {
2897 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2898 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2899 }
2900 }
2901
2902 /* NP */
2903 if (!Desc.Legacy.Gen.u1Present)
2904 {
2905 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2906 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2907 }
2908
2909 /*
2910 * It checks out alright, update the registers.
2911 */
2912/** @todo check if the actual value is loaded or if the RPL is dropped */
2913 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2914 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
2915 else
2916 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
2917 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
2918 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2919 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2920 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
2921 pCtx->ldtr.u64Base = u64Base;
2922
2923 iemRegAddToRip(pIemCpu, cbInstr);
2924 return VINF_SUCCESS;
2925}
2926
2927
2928/**
2929 * Implements lldt.
2930 *
2931 * @param uNewLdt The new LDT selector value.
2932 */
2933IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2934{
2935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2936
2937 /*
2938 * Check preconditions.
2939 */
2940 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2941 {
2942 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2943 return iemRaiseUndefinedOpcode(pIemCpu);
2944 }
2945 if (pIemCpu->uCpl != 0)
2946 {
2947 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2948 return iemRaiseGeneralProtectionFault0(pIemCpu);
2949 }
2950 if (uNewTr & X86_SEL_LDT)
2951 {
2952 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2953 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
2954 }
2955 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
2956 {
2957 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2958 return iemRaiseGeneralProtectionFault0(pIemCpu);
2959 }
2960
2961 /*
2962 * Read the descriptor.
2963 */
2964 IEMSELDESC Desc;
2965 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2966 if (rcStrict != VINF_SUCCESS)
2967 return rcStrict;
2968
2969 /* Check GPs first. */
2970 if (Desc.Legacy.Gen.u1DescType)
2971 {
2972 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2973 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2974 }
2975 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2976 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2977 || IEM_IS_LONG_MODE(pIemCpu)) )
2978 {
2979 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2980 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2981 }
2982 uint64_t u64Base;
2983 if (!IEM_IS_LONG_MODE(pIemCpu))
2984 u64Base = X86DESC_BASE(&Desc.Legacy);
2985 else
2986 {
2987 if (Desc.Long.Gen.u5Zeros)
2988 {
2989 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2990 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2991 }
2992
2993 u64Base = X86DESC64_BASE(&Desc.Long);
2994 if (!IEM_IS_CANONICAL(u64Base))
2995 {
2996 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2997 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2998 }
2999 }
3000
3001 /* NP */
3002 if (!Desc.Legacy.Gen.u1Present)
3003 {
3004 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
3005 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
3006 }
3007
3008 /*
3009 * Set it busy.
3010 * Note! Intel says this should lock down the whole descriptor, but we'll
3011 * restrict our selves to 32-bit for now due to lack of inline
3012 * assembly and such.
3013 */
3014 void *pvDesc;
3015 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
3016 if (rcStrict != VINF_SUCCESS)
3017 return rcStrict;
3018 switch ((uintptr_t)pvDesc & 3)
3019 {
3020 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
3021 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
3022 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
3023 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
3024 }
3025 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
3026 if (rcStrict != VINF_SUCCESS)
3027 return rcStrict;
3028 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3029
3030 /*
3031 * It checks out alright, update the registers.
3032 */
3033/** @todo check if the actual value is loaded or if the RPL is dropped */
3034 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3035 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
3036 else
3037 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
3038 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
3039 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3040 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3041 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3042 pCtx->tr.u64Base = u64Base;
3043
3044 iemRegAddToRip(pIemCpu, cbInstr);
3045 return VINF_SUCCESS;
3046}
3047
3048
3049/**
3050 * Implements mov GReg,CRx.
3051 *
3052 * @param iGReg The general register to store the CRx value in.
3053 * @param iCrReg The CRx register to read (valid).
3054 */
3055IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
3056{
3057 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3058 if (pIemCpu->uCpl != 0)
3059 return iemRaiseGeneralProtectionFault0(pIemCpu);
3060 Assert(!pCtx->eflags.Bits.u1VM);
3061
3062 /* read it */
3063 uint64_t crX;
3064 switch (iCrReg)
3065 {
3066 case 0: crX = pCtx->cr0; break;
3067 case 2: crX = pCtx->cr2; break;
3068 case 3: crX = pCtx->cr3; break;
3069 case 4: crX = pCtx->cr4; break;
3070 case 8:
3071 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3072 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
3073 else
3074 crX = 0xff;
3075 break;
3076 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3077 }
3078
3079 /* store it */
3080 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3081 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
3082 else
3083 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
3084
3085 iemRegAddToRip(pIemCpu, cbInstr);
3086 return VINF_SUCCESS;
3087}
3088
3089
3090/**
3091 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
3092 *
3093 * @param iCrReg The CRx register to write (valid).
3094 * @param uNewCrX The new value.
3095 */
3096IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
3097{
3098 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3099 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3100 VBOXSTRICTRC rcStrict;
3101 int rc;
3102
3103 /*
3104 * Try store it.
3105 * Unfortunately, CPUM only does a tiny bit of the work.
3106 */
3107 switch (iCrReg)
3108 {
3109 case 0:
3110 {
3111 /*
3112 * Perform checks.
3113 */
3114 uint64_t const uOldCrX = pCtx->cr0;
3115 uNewCrX |= X86_CR0_ET; /* hardcoded */
3116
3117 /* Check for reserved bits. */
3118 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
3119 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
3120 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
3121 if (uNewCrX & ~(uint64_t)fValid)
3122 {
3123 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3124 return iemRaiseGeneralProtectionFault0(pIemCpu);
3125 }
3126
3127 /* Check for invalid combinations. */
3128 if ( (uNewCrX & X86_CR0_PG)
3129 && !(uNewCrX & X86_CR0_PE) )
3130 {
3131 Log(("Trying to set CR0.PG without CR0.PE\n"));
3132 return iemRaiseGeneralProtectionFault0(pIemCpu);
3133 }
3134
3135 if ( !(uNewCrX & X86_CR0_CD)
3136 && (uNewCrX & X86_CR0_NW) )
3137 {
3138 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
3139 return iemRaiseGeneralProtectionFault0(pIemCpu);
3140 }
3141
3142 /* Long mode consistency checks. */
3143 if ( (uNewCrX & X86_CR0_PG)
3144 && !(uOldCrX & X86_CR0_PG)
3145 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3146 {
3147 if (!(pCtx->cr4 & X86_CR4_PAE))
3148 {
3149 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
3150 return iemRaiseGeneralProtectionFault0(pIemCpu);
3151 }
3152 if (pCtx->cs.Attr.n.u1Long)
3153 {
3154 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
3155 return iemRaiseGeneralProtectionFault0(pIemCpu);
3156 }
3157 }
3158
3159 /** @todo check reserved PDPTR bits as AMD states. */
3160
3161 /*
3162 * Change CR0.
3163 */
3164 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3165 CPUMSetGuestCR0(pVCpu, uNewCrX);
3166 else
3167 pCtx->cr0 = uNewCrX;
3168 Assert(pCtx->cr0 == uNewCrX);
3169
3170 /*
3171 * Change EFER.LMA if entering or leaving long mode.
3172 */
3173 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
3174 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3175 {
3176 uint64_t NewEFER = pCtx->msrEFER;
3177 if (uNewCrX & X86_CR0_PG)
3178 NewEFER |= MSR_K6_EFER_LME;
3179 else
3180 NewEFER &= ~MSR_K6_EFER_LME;
3181
3182 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3183 CPUMSetGuestEFER(pVCpu, NewEFER);
3184 else
3185 pCtx->msrEFER = NewEFER;
3186 Assert(pCtx->msrEFER == NewEFER);
3187 }
3188
3189 /*
3190 * Inform PGM.
3191 */
3192 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3193 {
3194 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3195 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3196 {
3197 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3198 AssertRCReturn(rc, rc);
3199 /* ignore informational status codes */
3200 }
3201 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3202 }
3203 else
3204 rcStrict = VINF_SUCCESS;
3205 break;
3206 }
3207
3208 /*
3209 * CR2 can be changed without any restrictions.
3210 */
3211 case 2:
3212 pCtx->cr2 = uNewCrX;
3213 rcStrict = VINF_SUCCESS;
3214 break;
3215
3216 /*
3217 * CR3 is relatively simple, although AMD and Intel have different
3218 * accounts of how setting reserved bits are handled. We take intel's
3219 * word for the lower bits and AMD's for the high bits (63:52).
3220 */
3221 /** @todo Testcase: Setting reserved bits in CR3, especially before
3222 * enabling paging. */
3223 case 3:
3224 {
3225 /* check / mask the value. */
3226 if (uNewCrX & UINT64_C(0xfff0000000000000))
3227 {
3228 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3229 return iemRaiseGeneralProtectionFault0(pIemCpu);
3230 }
3231
3232 uint64_t fValid;
3233 if ( (pCtx->cr4 & X86_CR4_PAE)
3234 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3235 fValid = UINT64_C(0x000ffffffffff014);
3236 else if (pCtx->cr4 & X86_CR4_PAE)
3237 fValid = UINT64_C(0xfffffff4);
3238 else
3239 fValid = UINT64_C(0xfffff014);
3240 if (uNewCrX & ~fValid)
3241 {
3242 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3243 uNewCrX, uNewCrX & ~fValid));
3244 uNewCrX &= fValid;
3245 }
3246
3247 /** @todo If we're in PAE mode we should check the PDPTRs for
3248 * invalid bits. */
3249
3250 /* Make the change. */
3251 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3252 {
3253 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3254 AssertRCSuccessReturn(rc, rc);
3255 }
3256 else
3257 pCtx->cr3 = uNewCrX;
3258
3259 /* Inform PGM. */
3260 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3261 {
3262 if (pCtx->cr0 & X86_CR0_PG)
3263 {
3264 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
3265 AssertRCReturn(rc, rc);
3266 /* ignore informational status codes */
3267 }
3268 }
3269 rcStrict = VINF_SUCCESS;
3270 break;
3271 }
3272
3273 /*
3274 * CR4 is a bit more tedious as there are bits which cannot be cleared
3275 * under some circumstances and such.
3276 */
3277 case 4:
3278 {
3279 uint64_t const uOldCrX = pCtx->cr4;
3280
3281 /* reserved bits */
3282 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3283 | X86_CR4_TSD | X86_CR4_DE
3284 | X86_CR4_PSE | X86_CR4_PAE
3285 | X86_CR4_MCE | X86_CR4_PGE
3286 | X86_CR4_PCE | X86_CR4_OSFSXR
3287 | X86_CR4_OSXMMEEXCPT;
3288 //if (xxx)
3289 // fValid |= X86_CR4_VMXE;
3290 //if (xxx)
3291 // fValid |= X86_CR4_OSXSAVE;
3292 if (uNewCrX & ~(uint64_t)fValid)
3293 {
3294 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3295 return iemRaiseGeneralProtectionFault0(pIemCpu);
3296 }
3297
3298 /* long mode checks. */
3299 if ( (uOldCrX & X86_CR4_PAE)
3300 && !(uNewCrX & X86_CR4_PAE)
3301 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3302 {
3303 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3304 return iemRaiseGeneralProtectionFault0(pIemCpu);
3305 }
3306
3307
3308 /*
3309 * Change it.
3310 */
3311 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3312 {
3313 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3314 AssertRCSuccessReturn(rc, rc);
3315 }
3316 else
3317 pCtx->cr4 = uNewCrX;
3318 Assert(pCtx->cr4 == uNewCrX);
3319
3320 /*
3321 * Notify SELM and PGM.
3322 */
3323 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3324 {
3325 /* SELM - VME may change things wrt to the TSS shadowing. */
3326 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3327 {
3328 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
3329 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
3330 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3331 }
3332
3333 /* PGM - flushing and mode. */
3334 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3335 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3336 {
3337 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3338 AssertRCReturn(rc, rc);
3339 /* ignore informational status codes */
3340 }
3341 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3342 }
3343 else
3344 rcStrict = VINF_SUCCESS;
3345 break;
3346 }
3347
3348 /*
3349 * CR8 maps to the APIC TPR.
3350 */
3351 case 8:
3352 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3353 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
3354 else
3355 rcStrict = VINF_SUCCESS;
3356 break;
3357
3358 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3359 }
3360
3361 /*
3362 * Advance the RIP on success.
3363 */
3364 if (RT_SUCCESS(rcStrict))
3365 {
3366 if (rcStrict != VINF_SUCCESS)
3367 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3368 iemRegAddToRip(pIemCpu, cbInstr);
3369 }
3370
3371 return rcStrict;
3372}
3373
3374
3375/**
3376 * Implements mov CRx,GReg.
3377 *
3378 * @param iCrReg The CRx register to write (valid).
3379 * @param iGReg The general register to load the DRx value from.
3380 */
3381IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3382{
3383 if (pIemCpu->uCpl != 0)
3384 return iemRaiseGeneralProtectionFault0(pIemCpu);
3385 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3386
3387 /*
3388 * Read the new value from the source register and call common worker.
3389 */
3390 uint64_t uNewCrX;
3391 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3392 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3393 else
3394 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3395 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3396}
3397
3398
3399/**
3400 * Implements 'LMSW r/m16'
3401 *
3402 * @param u16NewMsw The new value.
3403 */
3404IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3405{
3406 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3407
3408 if (pIemCpu->uCpl != 0)
3409 return iemRaiseGeneralProtectionFault0(pIemCpu);
3410 Assert(!pCtx->eflags.Bits.u1VM);
3411
3412 /*
3413 * Compose the new CR0 value and call common worker.
3414 */
3415 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3416 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3417 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3418}
3419
3420
3421/**
3422 * Implements 'CLTS'.
3423 */
3424IEM_CIMPL_DEF_0(iemCImpl_clts)
3425{
3426 if (pIemCpu->uCpl != 0)
3427 return iemRaiseGeneralProtectionFault0(pIemCpu);
3428
3429 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3430 uint64_t uNewCr0 = pCtx->cr0;
3431 uNewCr0 &= ~X86_CR0_TS;
3432 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3433}
3434
3435
3436/**
3437 * Implements mov GReg,DRx.
3438 *
3439 * @param iGReg The general register to store the DRx value in.
3440 * @param iDrReg The DRx register to read (0-7).
3441 */
3442IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3443{
3444 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3445
3446 /*
3447 * Check preconditions.
3448 */
3449
3450 /* Raise GPs. */
3451 if (pIemCpu->uCpl != 0)
3452 return iemRaiseGeneralProtectionFault0(pIemCpu);
3453 Assert(!pCtx->eflags.Bits.u1VM);
3454
3455 if ( (iDrReg == 4 || iDrReg == 5)
3456 && (pCtx->cr4 & X86_CR4_DE) )
3457 {
3458 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3459 return iemRaiseGeneralProtectionFault0(pIemCpu);
3460 }
3461
3462 /* Raise #DB if general access detect is enabled. */
3463 if (pCtx->dr[7] & X86_DR7_GD)
3464 {
3465 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3466 return iemRaiseDebugException(pIemCpu);
3467 }
3468
3469 /*
3470 * Read the debug register and store it in the specified general register.
3471 */
3472 uint64_t drX;
3473 switch (iDrReg)
3474 {
3475 case 0: drX = pCtx->dr[0]; break;
3476 case 1: drX = pCtx->dr[1]; break;
3477 case 2: drX = pCtx->dr[2]; break;
3478 case 3: drX = pCtx->dr[3]; break;
3479 case 6:
3480 case 4:
3481 drX = pCtx->dr[6];
3482 drX &= ~RT_BIT_32(12);
3483 drX |= UINT32_C(0xffff0ff0);
3484 break;
3485 case 7:
3486 case 5:
3487 drX = pCtx->dr[7];
3488 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3489 drX |= RT_BIT_32(10);
3490 break;
3491 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3492 }
3493
3494 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3495 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3496 else
3497 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3498
3499 iemRegAddToRip(pIemCpu, cbInstr);
3500 return VINF_SUCCESS;
3501}
3502
3503
3504/**
3505 * Implements mov DRx,GReg.
3506 *
3507 * @param iDrReg The DRx register to write (valid).
3508 * @param iGReg The general register to load the DRx value from.
3509 */
3510IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3511{
3512 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3513
3514 /*
3515 * Check preconditions.
3516 */
3517 if (pIemCpu->uCpl != 0)
3518 return iemRaiseGeneralProtectionFault0(pIemCpu);
3519 Assert(!pCtx->eflags.Bits.u1VM);
3520
3521 if ( (iDrReg == 4 || iDrReg == 5)
3522 && (pCtx->cr4 & X86_CR4_DE) )
3523 {
3524 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3525 return iemRaiseGeneralProtectionFault0(pIemCpu);
3526 }
3527
3528 /* Raise #DB if general access detect is enabled. */
3529 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3530 * \#GP? */
3531 if (pCtx->dr[7] & X86_DR7_GD)
3532 {
3533 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3534 return iemRaiseDebugException(pIemCpu);
3535 }
3536
3537 /*
3538 * Read the new value from the source register.
3539 */
3540 uint64_t uNewDrX;
3541 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3542 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3543 else
3544 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3545
3546 /*
3547 * Adjust it.
3548 */
3549 switch (iDrReg)
3550 {
3551 case 0:
3552 case 1:
3553 case 2:
3554 case 3:
3555 /* nothing to adjust */
3556 break;
3557
3558 case 6:
3559 case 4:
3560 if (uNewDrX & UINT64_C(0xffffffff00000000))
3561 {
3562 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3563 return iemRaiseGeneralProtectionFault0(pIemCpu);
3564 }
3565 uNewDrX &= ~RT_BIT_32(12);
3566 uNewDrX |= UINT32_C(0xffff0ff0);
3567 break;
3568
3569 case 7:
3570 case 5:
3571 if (uNewDrX & UINT64_C(0xffffffff00000000))
3572 {
3573 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3574 return iemRaiseGeneralProtectionFault0(pIemCpu);
3575 }
3576 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3577 uNewDrX |= RT_BIT_32(10);
3578 break;
3579
3580 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3581 }
3582
3583 /*
3584 * Do the actual setting.
3585 */
3586 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3587 {
3588 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3589 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3590 }
3591 else
3592 pCtx->dr[iDrReg] = uNewDrX;
3593
3594 iemRegAddToRip(pIemCpu, cbInstr);
3595 return VINF_SUCCESS;
3596}
3597
3598
3599/**
3600 * Implements 'INVLPG m'.
3601 *
3602 * @param GCPtrPage The effective address of the page to invalidate.
3603 * @remarks Updates the RIP.
3604 */
3605IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3606{
3607 /* ring-0 only. */
3608 if (pIemCpu->uCpl != 0)
3609 return iemRaiseGeneralProtectionFault0(pIemCpu);
3610 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3611
3612 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3613 iemRegAddToRip(pIemCpu, cbInstr);
3614
3615 if (rc == VINF_SUCCESS)
3616 return VINF_SUCCESS;
3617 if (rc == VINF_PGM_SYNC_CR3)
3618 return iemSetPassUpStatus(pIemCpu, rc);
3619
3620 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3621 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3622 return rc;
3623}
3624
3625
3626/**
3627 * Implements RDTSC.
3628 */
3629IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3630{
3631 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3632
3633 /*
3634 * Check preconditions.
3635 */
3636 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3637 return iemRaiseUndefinedOpcode(pIemCpu);
3638
3639 if ( (pCtx->cr4 & X86_CR4_TSD)
3640 && pIemCpu->uCpl != 0)
3641 {
3642 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3643 return iemRaiseGeneralProtectionFault0(pIemCpu);
3644 }
3645
3646 /*
3647 * Do the job.
3648 */
3649 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3650 pCtx->rax = (uint32_t)uTicks;
3651 pCtx->rdx = uTicks >> 32;
3652#ifdef IEM_VERIFICATION_MODE
3653 pIemCpu->fIgnoreRaxRdx = true;
3654#endif
3655
3656 iemRegAddToRip(pIemCpu, cbInstr);
3657 return VINF_SUCCESS;
3658}
3659
3660
3661/**
3662 * Implements RDMSR.
3663 */
3664IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3665{
3666 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3667
3668 /*
3669 * Check preconditions.
3670 */
3671 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3672 return iemRaiseUndefinedOpcode(pIemCpu);
3673 if (pIemCpu->uCpl != 0)
3674 return iemRaiseGeneralProtectionFault0(pIemCpu);
3675
3676 /*
3677 * Do the job.
3678 */
3679 RTUINT64U uValue;
3680 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3681 if (rc != VINF_SUCCESS)
3682 {
3683 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3684 return iemRaiseGeneralProtectionFault0(pIemCpu);
3685 }
3686
3687 pCtx->rax = uValue.au32[0];
3688 pCtx->rdx = uValue.au32[1];
3689
3690 iemRegAddToRip(pIemCpu, cbInstr);
3691 return VINF_SUCCESS;
3692}
3693
3694
3695/**
3696 * Implements WRMSR.
3697 */
3698IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
3699{
3700 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3701
3702 /*
3703 * Check preconditions.
3704 */
3705 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3706 return iemRaiseUndefinedOpcode(pIemCpu);
3707 if (pIemCpu->uCpl != 0)
3708 return iemRaiseGeneralProtectionFault0(pIemCpu);
3709
3710 /*
3711 * Do the job.
3712 */
3713 RTUINT64U uValue;
3714 uValue.au32[0] = pCtx->eax;
3715 uValue.au32[1] = pCtx->edx;
3716
3717 int rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
3718 if (rc != VINF_SUCCESS)
3719 {
3720 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3721 return iemRaiseGeneralProtectionFault0(pIemCpu);
3722 }
3723
3724 iemRegAddToRip(pIemCpu, cbInstr);
3725 return VINF_SUCCESS;
3726}
3727
3728
3729/**
3730 * Implements 'IN eAX, port'.
3731 *
3732 * @param u16Port The source port.
3733 * @param cbReg The register size.
3734 */
3735IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3736{
3737 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3738
3739 /*
3740 * CPL check
3741 */
3742 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3743 if (rcStrict != VINF_SUCCESS)
3744 return rcStrict;
3745
3746 /*
3747 * Perform the I/O.
3748 */
3749 uint32_t u32Value;
3750 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3751 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
3752 else
3753 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3754 if (IOM_SUCCESS(rcStrict))
3755 {
3756 switch (cbReg)
3757 {
3758 case 1: pCtx->al = (uint8_t)u32Value; break;
3759 case 2: pCtx->ax = (uint16_t)u32Value; break;
3760 case 4: pCtx->rax = u32Value; break;
3761 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3762 }
3763 iemRegAddToRip(pIemCpu, cbInstr);
3764 pIemCpu->cPotentialExits++;
3765 if (rcStrict != VINF_SUCCESS)
3766 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3767 }
3768
3769 return rcStrict;
3770}
3771
3772
3773/**
3774 * Implements 'IN eAX, DX'.
3775 *
3776 * @param cbReg The register size.
3777 */
3778IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3779{
3780 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3781}
3782
3783
3784/**
3785 * Implements 'OUT port, eAX'.
3786 *
3787 * @param u16Port The destination port.
3788 * @param cbReg The register size.
3789 */
3790IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3791{
3792 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3793
3794 /*
3795 * CPL check
3796 */
3797 if ( (pCtx->cr0 & X86_CR0_PE)
3798 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3799 || pCtx->eflags.Bits.u1VM) )
3800 {
3801 /** @todo I/O port permission bitmap check */
3802 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap checks.\n"));
3803 }
3804
3805 /*
3806 * Perform the I/O.
3807 */
3808 uint32_t u32Value;
3809 switch (cbReg)
3810 {
3811 case 1: u32Value = pCtx->al; break;
3812 case 2: u32Value = pCtx->ax; break;
3813 case 4: u32Value = pCtx->eax; break;
3814 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3815 }
3816 VBOXSTRICTRC rcStrict;
3817 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3818 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
3819 else
3820 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3821 if (IOM_SUCCESS(rcStrict))
3822 {
3823 iemRegAddToRip(pIemCpu, cbInstr);
3824 pIemCpu->cPotentialExits++;
3825 if (rcStrict != VINF_SUCCESS)
3826 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3827 }
3828 return rcStrict;
3829}
3830
3831
3832/**
3833 * Implements 'OUT DX, eAX'.
3834 *
3835 * @param cbReg The register size.
3836 */
3837IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3838{
3839 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3840}
3841
3842
3843/**
3844 * Implements 'CLI'.
3845 */
3846IEM_CIMPL_DEF_0(iemCImpl_cli)
3847{
3848 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3849
3850 if (pCtx->cr0 & X86_CR0_PE)
3851 {
3852 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3853 if (!pCtx->eflags.Bits.u1VM)
3854 {
3855 if (pIemCpu->uCpl <= uIopl)
3856 pCtx->eflags.Bits.u1IF = 0;
3857 else if ( pIemCpu->uCpl == 3
3858 && (pCtx->cr4 & X86_CR4_PVI) )
3859 pCtx->eflags.Bits.u1VIF = 0;
3860 else
3861 return iemRaiseGeneralProtectionFault0(pIemCpu);
3862 }
3863 /* V8086 */
3864 else if (uIopl == 3)
3865 pCtx->eflags.Bits.u1IF = 0;
3866 else if ( uIopl < 3
3867 && (pCtx->cr4 & X86_CR4_VME) )
3868 pCtx->eflags.Bits.u1VIF = 0;
3869 else
3870 return iemRaiseGeneralProtectionFault0(pIemCpu);
3871 }
3872 /* real mode */
3873 else
3874 pCtx->eflags.Bits.u1IF = 0;
3875 iemRegAddToRip(pIemCpu, cbInstr);
3876 return VINF_SUCCESS;
3877}
3878
3879
3880/**
3881 * Implements 'STI'.
3882 */
3883IEM_CIMPL_DEF_0(iemCImpl_sti)
3884{
3885 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3886
3887 if (pCtx->cr0 & X86_CR0_PE)
3888 {
3889 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3890 if (!pCtx->eflags.Bits.u1VM)
3891 {
3892 if (pIemCpu->uCpl <= uIopl)
3893 pCtx->eflags.Bits.u1IF = 1;
3894 else if ( pIemCpu->uCpl == 3
3895 && (pCtx->cr4 & X86_CR4_PVI)
3896 && !pCtx->eflags.Bits.u1VIP )
3897 pCtx->eflags.Bits.u1VIF = 1;
3898 else
3899 return iemRaiseGeneralProtectionFault0(pIemCpu);
3900 }
3901 /* V8086 */
3902 else if (uIopl == 3)
3903 pCtx->eflags.Bits.u1IF = 1;
3904 else if ( uIopl < 3
3905 && (pCtx->cr4 & X86_CR4_VME)
3906 && !pCtx->eflags.Bits.u1VIP )
3907 pCtx->eflags.Bits.u1VIF = 1;
3908 else
3909 return iemRaiseGeneralProtectionFault0(pIemCpu);
3910 }
3911 /* real mode */
3912 else
3913 pCtx->eflags.Bits.u1IF = 1;
3914
3915 iemRegAddToRip(pIemCpu, cbInstr);
3916 /** @todo don't do this unconditionally... */
3917 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3918 return VINF_SUCCESS;
3919}
3920
3921
3922/**
3923 * Implements 'HLT'.
3924 */
3925IEM_CIMPL_DEF_0(iemCImpl_hlt)
3926{
3927 if (pIemCpu->uCpl != 0)
3928 return iemRaiseGeneralProtectionFault0(pIemCpu);
3929 iemRegAddToRip(pIemCpu, cbInstr);
3930 return VINF_EM_HALT;
3931}
3932
3933
3934/**
3935 * Implements 'CPUID'.
3936 */
3937IEM_CIMPL_DEF_0(iemCImpl_cpuid)
3938{
3939 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3940
3941 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
3942 pCtx->rax &= UINT32_C(0xffffffff);
3943 pCtx->rbx &= UINT32_C(0xffffffff);
3944 pCtx->rcx &= UINT32_C(0xffffffff);
3945 pCtx->rdx &= UINT32_C(0xffffffff);
3946
3947 iemRegAddToRip(pIemCpu, cbInstr);
3948 return VINF_SUCCESS;
3949}
3950
3951
3952/**
3953 * Implements 'AAD'.
3954 *
3955 * @param enmEffOpSize The effective operand size.
3956 */
3957IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
3958{
3959 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3960
3961 uint16_t const ax = pCtx->ax;
3962 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
3963 pCtx->ax = al;
3964 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3965 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3966 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3967
3968 iemRegAddToRip(pIemCpu, cbInstr);
3969 return VINF_SUCCESS;
3970}
3971
3972
3973/**
3974 * Implements 'AAM'.
3975 *
3976 * @param bImm The immediate operand. Cannot be 0.
3977 */
3978IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
3979{
3980 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3981 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
3982
3983 uint16_t const ax = pCtx->ax;
3984 uint8_t const al = (uint8_t)ax % bImm;
3985 uint8_t const ah = (uint8_t)ax / bImm;
3986 pCtx->ax = (ah << 8) + al;
3987 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3988 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3989 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3990
3991 iemRegAddToRip(pIemCpu, cbInstr);
3992 return VINF_SUCCESS;
3993}
3994
3995
3996
3997
3998/*
3999 * Instantiate the various string operation combinations.
4000 */
4001#define OP_SIZE 8
4002#define ADDR_SIZE 16
4003#include "IEMAllCImplStrInstr.cpp.h"
4004#define OP_SIZE 8
4005#define ADDR_SIZE 32
4006#include "IEMAllCImplStrInstr.cpp.h"
4007#define OP_SIZE 8
4008#define ADDR_SIZE 64
4009#include "IEMAllCImplStrInstr.cpp.h"
4010
4011#define OP_SIZE 16
4012#define ADDR_SIZE 16
4013#include "IEMAllCImplStrInstr.cpp.h"
4014#define OP_SIZE 16
4015#define ADDR_SIZE 32
4016#include "IEMAllCImplStrInstr.cpp.h"
4017#define OP_SIZE 16
4018#define ADDR_SIZE 64
4019#include "IEMAllCImplStrInstr.cpp.h"
4020
4021#define OP_SIZE 32
4022#define ADDR_SIZE 16
4023#include "IEMAllCImplStrInstr.cpp.h"
4024#define OP_SIZE 32
4025#define ADDR_SIZE 32
4026#include "IEMAllCImplStrInstr.cpp.h"
4027#define OP_SIZE 32
4028#define ADDR_SIZE 64
4029#include "IEMAllCImplStrInstr.cpp.h"
4030
4031#define OP_SIZE 64
4032#define ADDR_SIZE 32
4033#include "IEMAllCImplStrInstr.cpp.h"
4034#define OP_SIZE 64
4035#define ADDR_SIZE 64
4036#include "IEMAllCImplStrInstr.cpp.h"
4037
4038
4039/**
4040 * Implements 'FINIT' and 'FNINIT'.
4041 *
4042 * @param fCheckXcpts Whether to check for umasked pending exceptions or
4043 * not.
4044 */
4045IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
4046{
4047 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4048
4049 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4050 return iemRaiseDeviceNotAvailable(pIemCpu);
4051
4052 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
4053 if (fCheckXcpts && TODO )
4054 return iemRaiseMathFault(pIemCpu);
4055 */
4056
4057 if (iemFRegIsFxSaveFormat(pIemCpu))
4058 {
4059 pCtx->fpu.FCW = 0x37f;
4060 pCtx->fpu.FSW = 0;
4061 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
4062 pCtx->fpu.FPUDP = 0;
4063 pCtx->fpu.DS = 0; //??
4064 pCtx->fpu.FPUIP = 0;
4065 pCtx->fpu.CS = 0; //??
4066 pCtx->fpu.FOP = 0;
4067 }
4068 else
4069 {
4070 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
4071 pFpu->FCW = 0x37f;
4072 pFpu->FSW = 0;
4073 pFpu->FTW = 0xffff; /* 11 - empty */
4074 pFpu->FPUOO = 0; //??
4075 pFpu->FPUOS = 0; //??
4076 pFpu->FPUIP = 0;
4077 pFpu->CS = 0; //??
4078 pFpu->FOP = 0;
4079 }
4080
4081 iemRegAddToRip(pIemCpu, cbInstr);
4082 return VINF_SUCCESS;
4083}
4084
4085
4086/**
4087 * Implements 'FXSAVE'.
4088 *
4089 * @param iEffSeg The effective segment.
4090 * @param GCPtrEff The address of the image.
4091 * @param enmEffOpSize The operand size (only REX.W really matters).
4092 */
4093IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4094{
4095 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4096
4097 /*
4098 * Raise exceptions.
4099 */
4100 if (pCtx->cr0 & X86_CR0_EM)
4101 return iemRaiseUndefinedOpcode(pIemCpu);
4102 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4103 return iemRaiseDeviceNotAvailable(pIemCpu);
4104 if (GCPtrEff & 15)
4105 {
4106 /** @todo CPU/VM detection possible! \#AC might not be signal for
4107 * all/any misalignment sizes, intel says its an implementation detail. */
4108 if ( (pCtx->cr0 & X86_CR0_AM)
4109 && pCtx->eflags.Bits.u1AC
4110 && pIemCpu->uCpl == 3)
4111 return iemRaiseAlignmentCheckException(pIemCpu);
4112 return iemRaiseGeneralProtectionFault0(pIemCpu);
4113 }
4114 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4115
4116 /*
4117 * Access the memory.
4118 */
4119 void *pvMem512;
4120 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4121 if (rcStrict != VINF_SUCCESS)
4122 return rcStrict;
4123 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
4124
4125 /*
4126 * Store the registers.
4127 */
4128 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4129 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
4130
4131 /* common for all formats */
4132 pDst->FCW = pCtx->fpu.FCW;
4133 pDst->FSW = pCtx->fpu.FSW;
4134 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
4135 pDst->FOP = pCtx->fpu.FOP;
4136 pDst->MXCSR = pCtx->fpu.MXCSR;
4137 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
4138 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
4139 {
4140 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
4141 * them for now... */
4142 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4143 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4144 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
4145 pDst->aRegs[i].au32[3] = 0;
4146 }
4147
4148 /* FPU IP, CS, DP and DS. */
4149 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
4150 * state information. :-/
4151 * Storing zeros now to prevent any potential leakage of host info. */
4152 pDst->FPUIP = 0;
4153 pDst->CS = 0;
4154 pDst->Rsrvd1 = 0;
4155 pDst->FPUDP = 0;
4156 pDst->DS = 0;
4157 pDst->Rsrvd2 = 0;
4158
4159 /* XMM registers. */
4160 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4161 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4162 || pIemCpu->uCpl != 0)
4163 {
4164 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4165 for (uint32_t i = 0; i < cXmmRegs; i++)
4166 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
4167 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
4168 * right? */
4169 }
4170
4171 /*
4172 * Commit the memory.
4173 */
4174 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4175 if (rcStrict != VINF_SUCCESS)
4176 return rcStrict;
4177
4178 iemRegAddToRip(pIemCpu, cbInstr);
4179 return VINF_SUCCESS;
4180}
4181
4182
4183/**
4184 * Implements 'FXRSTOR'.
4185 *
4186 * @param GCPtrEff The address of the image.
4187 * @param enmEffOpSize The operand size (only REX.W really matters).
4188 */
4189IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4190{
4191 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4192
4193 /*
4194 * Raise exceptions.
4195 */
4196 if (pCtx->cr0 & X86_CR0_EM)
4197 return iemRaiseUndefinedOpcode(pIemCpu);
4198 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4199 return iemRaiseDeviceNotAvailable(pIemCpu);
4200 if (GCPtrEff & 15)
4201 {
4202 /** @todo CPU/VM detection possible! \#AC might not be signal for
4203 * all/any misalignment sizes, intel says its an implementation detail. */
4204 if ( (pCtx->cr0 & X86_CR0_AM)
4205 && pCtx->eflags.Bits.u1AC
4206 && pIemCpu->uCpl == 3)
4207 return iemRaiseAlignmentCheckException(pIemCpu);
4208 return iemRaiseGeneralProtectionFault0(pIemCpu);
4209 }
4210 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4211
4212 /*
4213 * Access the memory.
4214 */
4215 void *pvMem512;
4216 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
4217 if (rcStrict != VINF_SUCCESS)
4218 return rcStrict;
4219 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
4220
4221 /*
4222 * Check the state for stuff which will GP(0).
4223 */
4224 uint32_t const fMXCSR = pSrc->MXCSR;
4225 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
4226 if (fMXCSR & ~fMXCSR_MASK)
4227 {
4228 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
4229 return iemRaiseGeneralProtectionFault0(pIemCpu);
4230 }
4231
4232 /*
4233 * Load the registers.
4234 */
4235 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4236 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4237
4238 /* common for all formats */
4239 pCtx->fpu.FCW = pSrc->FCW;
4240 pCtx->fpu.FSW = pSrc->FSW;
4241 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4242 pCtx->fpu.FOP = pSrc->FOP;
4243 pCtx->fpu.MXCSR = fMXCSR;
4244 /* (MXCSR_MASK is read-only) */
4245 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4246 {
4247 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4248 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4249 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4250 pCtx->fpu.aRegs[i].au32[3] = 0;
4251 }
4252
4253 /* FPU IP, CS, DP and DS. */
4254 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4255 {
4256 pCtx->fpu.FPUIP = pSrc->FPUIP;
4257 pCtx->fpu.CS = pSrc->CS;
4258 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4259 pCtx->fpu.FPUDP = pSrc->FPUDP;
4260 pCtx->fpu.DS = pSrc->DS;
4261 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4262 }
4263 else
4264 {
4265 pCtx->fpu.FPUIP = pSrc->FPUIP;
4266 pCtx->fpu.CS = pSrc->CS;
4267 pCtx->fpu.Rsrvd1 = 0;
4268 pCtx->fpu.FPUDP = pSrc->FPUDP;
4269 pCtx->fpu.DS = pSrc->DS;
4270 pCtx->fpu.Rsrvd2 = 0;
4271 }
4272
4273 /* XMM registers. */
4274 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4275 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4276 || pIemCpu->uCpl != 0)
4277 {
4278 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4279 for (uint32_t i = 0; i < cXmmRegs; i++)
4280 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4281 }
4282
4283 /*
4284 * Commit the memory.
4285 */
4286 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4287 if (rcStrict != VINF_SUCCESS)
4288 return rcStrict;
4289
4290 iemRegAddToRip(pIemCpu, cbInstr);
4291 return VINF_SUCCESS;
4292}
4293
4294
4295/**
4296 * Commmon routine for fnstenv and fnsave.
4297 *
4298 * @param uPtr Where to store the state.
4299 * @param pCtx The CPU context.
4300 */
4301static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4302{
4303 if (enmEffOpSize == IEMMODE_16BIT)
4304 {
4305 uPtr.pu16[0] = pCtx->fpu.FCW;
4306 uPtr.pu16[1] = pCtx->fpu.FSW;
4307 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4308 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4309 {
4310 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4311 * protected mode or long mode and we save it in real mode? And vice
4312 * versa? And with 32-bit operand size? I think CPU is storing the
4313 * effective address ((CS << 4) + IP) in the offset register and not
4314 * doing any address calculations here. */
4315 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4316 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4317 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4318 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4319 }
4320 else
4321 {
4322 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4323 uPtr.pu16[4] = pCtx->fpu.CS;
4324 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4325 uPtr.pu16[6] = pCtx->fpu.DS;
4326 }
4327 }
4328 else
4329 {
4330 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4331 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4332 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4333 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4334 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4335 {
4336 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4337 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4338 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4339 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4340 }
4341 else
4342 {
4343 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4344 uPtr.pu16[4*2] = pCtx->fpu.CS;
4345 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4346 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4347 uPtr.pu16[6*2] = pCtx->fpu.DS;
4348 }
4349 }
4350}
4351
4352
4353/**
4354 * Commmon routine for fnstenv and fnsave.
4355 *
4356 * @param uPtr Where to store the state.
4357 * @param pCtx The CPU context.
4358 */
4359static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4360{
4361 if (enmEffOpSize == IEMMODE_16BIT)
4362 {
4363 pCtx->fpu.FCW = uPtr.pu16[0];
4364 pCtx->fpu.FSW = uPtr.pu16[1];
4365 pCtx->fpu.FTW = uPtr.pu16[2];
4366 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4367 {
4368 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4369 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4370 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4371 pCtx->fpu.CS = 0;
4372 pCtx->fpu.DS = 0;
4373 }
4374 else
4375 {
4376 pCtx->fpu.FPUIP = uPtr.pu16[3];
4377 pCtx->fpu.CS = uPtr.pu16[4];
4378 pCtx->fpu.FPUDP = uPtr.pu16[5];
4379 pCtx->fpu.DS = uPtr.pu16[6];
4380 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4381 }
4382 }
4383 else
4384 {
4385 pCtx->fpu.FCW = uPtr.pu16[0*2];
4386 pCtx->fpu.FSW = uPtr.pu16[1*2];
4387 pCtx->fpu.FTW = uPtr.pu16[2*2];
4388 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4389 {
4390 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4391 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4392 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4393 pCtx->fpu.CS = 0;
4394 pCtx->fpu.DS = 0;
4395 }
4396 else
4397 {
4398 pCtx->fpu.FPUIP = uPtr.pu32[3];
4399 pCtx->fpu.CS = uPtr.pu16[4*2];
4400 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4401 pCtx->fpu.FPUDP = uPtr.pu32[5];
4402 pCtx->fpu.DS = uPtr.pu16[6*2];
4403 }
4404 }
4405
4406 /* Make adjustments. */
4407 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4408 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4409 iemFpuRecalcExceptionStatus(pCtx);
4410 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4411 * exceptions are pending after loading the saved state? */
4412}
4413
4414
4415/**
4416 * Implements 'FNSTENV'.
4417 *
4418 * @param enmEffOpSize The operand size (only REX.W really matters).
4419 * @param iEffSeg The effective segment register for @a GCPtrEff.
4420 * @param GCPtrEffDst The address of the image.
4421 */
4422IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4423{
4424 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4425 RTPTRUNION uPtr;
4426 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4427 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4428 if (rcStrict != VINF_SUCCESS)
4429 return rcStrict;
4430
4431 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4432
4433 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4434 if (rcStrict != VINF_SUCCESS)
4435 return rcStrict;
4436
4437 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4438 iemRegAddToRip(pIemCpu, cbInstr);
4439 return VINF_SUCCESS;
4440}
4441
4442
4443/**
4444 * Implements 'FLDENV'.
4445 *
4446 * @param enmEffOpSize The operand size (only REX.W really matters).
4447 * @param iEffSeg The effective segment register for @a GCPtrEff.
4448 * @param GCPtrEffSrc The address of the image.
4449 */
4450IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4451{
4452 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4453 RTCPTRUNION uPtr;
4454 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4455 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4456 if (rcStrict != VINF_SUCCESS)
4457 return rcStrict;
4458
4459 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4460
4461 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4462 if (rcStrict != VINF_SUCCESS)
4463 return rcStrict;
4464
4465 iemRegAddToRip(pIemCpu, cbInstr);
4466 return VINF_SUCCESS;
4467}
4468
4469
4470/**
4471 * Implements 'FLDCW'.
4472 *
4473 * @param u16Fcw The new FCW.
4474 */
4475IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4476{
4477 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4478
4479 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4480 /** @todo Testcase: Try see what happens when trying to set undefined bits
4481 * (other than 6 and 7). Currently ignoring them. */
4482 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4483 * according to FSW. (This is was is currently implemented.) */
4484 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4485 iemFpuRecalcExceptionStatus(pCtx);
4486
4487 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4488 iemRegAddToRip(pIemCpu, cbInstr);
4489 return VINF_SUCCESS;
4490}
4491
4492
4493
4494/**
4495 * Implements the underflow case of fxch.
4496 *
4497 * @param iStReg The other stack register.
4498 */
4499IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4500{
4501 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4502
4503 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4504 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4505 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4506
4507 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4508 * registers are read as QNaN and then exchanged. This could be
4509 * wrong... */
4510 if (pCtx->fpu.FCW & X86_FCW_IM)
4511 {
4512 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4513 {
4514 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4515 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4516 else
4517 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4518 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4519 }
4520 else
4521 {
4522 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4523 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4524 }
4525 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4526 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4527 }
4528 else
4529 {
4530 /* raise underflow exception, don't change anything. */
4531 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
4532 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4533 }
4534 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4535
4536 iemRegAddToRip(pIemCpu, cbInstr);
4537 return VINF_SUCCESS;
4538}
4539
4540
4541/**
4542 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
4543 *
4544 * @param cToAdd 1 or 7.
4545 */
4546IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
4547{
4548 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4549 Assert(iStReg < 8);
4550
4551 /*
4552 * Raise exceptions.
4553 */
4554 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4555 return iemRaiseDeviceNotAvailable(pIemCpu);
4556 uint16_t u16Fsw = pCtx->fpu.FSW;
4557 if (u16Fsw & X86_FSW_ES)
4558 return iemRaiseMathFault(pIemCpu);
4559
4560 /*
4561 * Check if any of the register accesses causes #SF + #IA.
4562 */
4563 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
4564 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4565 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
4566 {
4567 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
4568 pCtx->fpu.FSW &= ~X86_FSW_C1;
4569 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
4570 if ( !(u16Fsw & X86_FSW_IE)
4571 || (pCtx->fpu.FCW & X86_FCW_IM) )
4572 {
4573 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4574 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4575 }
4576 }
4577 else if (pCtx->fpu.FCW & X86_FCW_IM)
4578 {
4579 /* Masked underflow. */
4580 pCtx->fpu.FSW &= ~X86_FSW_C1;
4581 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4582 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4583 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
4584 }
4585 else
4586 {
4587 /* Raise underflow - don't touch EFLAGS or TOP. */
4588 pCtx->fpu.FSW &= ~X86_FSW_C1;
4589 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4590 fPop = false;
4591 }
4592
4593 /*
4594 * Pop if necessary.
4595 */
4596 if (fPop)
4597 {
4598 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
4599 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
4600 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
4601 }
4602
4603 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4604 iemRegAddToRip(pIemCpu, cbInstr);
4605 return VINF_SUCCESS;
4606}
4607
4608/** @} */
4609
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette