VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 42453

Last change on this file since 42453 was 42453, checked in by vboxsync, 13 years ago

IEM: Status code handling. PGM interface for R0 and RC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 144.2 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 42453 2012-07-30 15:23:18Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
42 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n"));
43 }
44 return VINF_SUCCESS;
45}
46
47
48#if 0
49/**
50 * Calculates the parity bit.
51 *
52 * @returns true if the bit is set, false if not.
53 * @param u8Result The least significant byte of the result.
54 */
55static bool iemHlpCalcParityFlag(uint8_t u8Result)
56{
57 /*
58 * Parity is set if the number of bits in the least significant byte of
59 * the result is even.
60 */
61 uint8_t cBits;
62 cBits = u8Result & 1; /* 0 */
63 u8Result >>= 1;
64 cBits += u8Result & 1;
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1; /* 4 */
71 u8Result >>= 1;
72 cBits += u8Result & 1;
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 return !(cBits & 1);
78}
79#endif /* not used */
80
81
82/**
83 * Updates the specified flags according to a 8-bit result.
84 *
85 * @param pIemCpu The.
86 * @param u8Result The result to set the flags according to.
87 * @param fToUpdate The flags to update.
88 * @param fUndefined The flags that are specified as undefined.
89 */
90static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
91{
92 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
93
94 uint32_t fEFlags = pCtx->eflags.u;
95 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
96 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
97 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
98}
99
100
101/**
102 * Loads a NULL data selector into a selector register, both the hidden and
103 * visible parts, in protected mode.
104 *
105 * @param pSReg Pointer to the segment register.
106 */
107static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg)
108{
109 /** @todo Testcase: write a testcase checking what happends when loading a NULL
110 * data selector in protected mode. */
111 pSReg->Sel = 0;
112 pSReg->ValidSel = 0;
113 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
114 pSReg->u64Base = 0;
115 pSReg->u32Limit = 0;
116 pSReg->Attr.u = 0;
117}
118
119
120/**
121 * Helper used by iret.
122 *
123 * @param uCpl The new CPL.
124 * @param pSReg Pointer to the segment register.
125 */
126static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
127{
128#ifdef VBOX_WITH_RAW_MODE_NOT_R0
129 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
130 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
131#else
132 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
133#endif
134
135 if ( uCpl > pSReg->Attr.n.u2Dpl
136 && pSReg->Attr.n.u1DescType /* code or data, not system */
137 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
138 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
139 iemHlpLoadNullDataSelectorProt(pSReg);
140}
141
142/** @} */
143
144/** @name C Implementations
145 * @{
146 */
147
148/**
149 * Implements a 16-bit popa.
150 */
151IEM_CIMPL_DEF_0(iemCImpl_popa_16)
152{
153 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
154 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
155 RTGCPTR GCPtrLast = GCPtrStart + 15;
156 VBOXSTRICTRC rcStrict;
157
158 /*
159 * The docs are a bit hard to comprehend here, but it looks like we wrap
160 * around in real mode as long as none of the individual "popa" crosses the
161 * end of the stack segment. In protected mode we check the whole access
162 * in one go. For efficiency, only do the word-by-word thing if we're in
163 * danger of wrapping around.
164 */
165 /** @todo do popa boundary / wrap-around checks. */
166 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
167 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
168 {
169 /* word-by-word */
170 RTUINT64U TmpRsp;
171 TmpRsp.u = pCtx->rsp;
172 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
173 if (rcStrict == VINF_SUCCESS)
174 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
175 if (rcStrict == VINF_SUCCESS)
176 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
177 if (rcStrict == VINF_SUCCESS)
178 {
179 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
180 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
181 }
182 if (rcStrict == VINF_SUCCESS)
183 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
184 if (rcStrict == VINF_SUCCESS)
185 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
186 if (rcStrict == VINF_SUCCESS)
187 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
188 if (rcStrict == VINF_SUCCESS)
189 {
190 pCtx->rsp = TmpRsp.u;
191 iemRegAddToRip(pIemCpu, cbInstr);
192 }
193 }
194 else
195 {
196 uint16_t const *pa16Mem = NULL;
197 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
198 if (rcStrict == VINF_SUCCESS)
199 {
200 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
201 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
202 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
203 /* skip sp */
204 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
205 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
206 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
207 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
208 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
209 if (rcStrict == VINF_SUCCESS)
210 {
211 iemRegAddToRsp(pCtx, 16);
212 iemRegAddToRip(pIemCpu, cbInstr);
213 }
214 }
215 }
216 return rcStrict;
217}
218
219
220/**
221 * Implements a 32-bit popa.
222 */
223IEM_CIMPL_DEF_0(iemCImpl_popa_32)
224{
225 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
226 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
227 RTGCPTR GCPtrLast = GCPtrStart + 31;
228 VBOXSTRICTRC rcStrict;
229
230 /*
231 * The docs are a bit hard to comprehend here, but it looks like we wrap
232 * around in real mode as long as none of the individual "popa" crosses the
233 * end of the stack segment. In protected mode we check the whole access
234 * in one go. For efficiency, only do the word-by-word thing if we're in
235 * danger of wrapping around.
236 */
237 /** @todo do popa boundary / wrap-around checks. */
238 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
239 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
240 {
241 /* word-by-word */
242 RTUINT64U TmpRsp;
243 TmpRsp.u = pCtx->rsp;
244 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
245 if (rcStrict == VINF_SUCCESS)
246 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
247 if (rcStrict == VINF_SUCCESS)
248 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
249 if (rcStrict == VINF_SUCCESS)
250 {
251 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
252 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
253 }
254 if (rcStrict == VINF_SUCCESS)
255 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
256 if (rcStrict == VINF_SUCCESS)
257 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
258 if (rcStrict == VINF_SUCCESS)
259 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
260 if (rcStrict == VINF_SUCCESS)
261 {
262#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
263 pCtx->rdi &= UINT32_MAX;
264 pCtx->rsi &= UINT32_MAX;
265 pCtx->rbp &= UINT32_MAX;
266 pCtx->rbx &= UINT32_MAX;
267 pCtx->rdx &= UINT32_MAX;
268 pCtx->rcx &= UINT32_MAX;
269 pCtx->rax &= UINT32_MAX;
270#endif
271 pCtx->rsp = TmpRsp.u;
272 iemRegAddToRip(pIemCpu, cbInstr);
273 }
274 }
275 else
276 {
277 uint32_t const *pa32Mem;
278 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
279 if (rcStrict == VINF_SUCCESS)
280 {
281 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
282 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
283 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
284 /* skip esp */
285 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
286 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
287 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
288 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
289 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
290 if (rcStrict == VINF_SUCCESS)
291 {
292 iemRegAddToRsp(pCtx, 32);
293 iemRegAddToRip(pIemCpu, cbInstr);
294 }
295 }
296 }
297 return rcStrict;
298}
299
300
301/**
302 * Implements a 16-bit pusha.
303 */
304IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
305{
306 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
307 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
308 RTGCPTR GCPtrBottom = GCPtrTop - 15;
309 VBOXSTRICTRC rcStrict;
310
311 /*
312 * The docs are a bit hard to comprehend here, but it looks like we wrap
313 * around in real mode as long as none of the individual "pushd" crosses the
314 * end of the stack segment. In protected mode we check the whole access
315 * in one go. For efficiency, only do the word-by-word thing if we're in
316 * danger of wrapping around.
317 */
318 /** @todo do pusha boundary / wrap-around checks. */
319 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
320 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
321 {
322 /* word-by-word */
323 RTUINT64U TmpRsp;
324 TmpRsp.u = pCtx->rsp;
325 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
326 if (rcStrict == VINF_SUCCESS)
327 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
328 if (rcStrict == VINF_SUCCESS)
329 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
330 if (rcStrict == VINF_SUCCESS)
331 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
332 if (rcStrict == VINF_SUCCESS)
333 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
334 if (rcStrict == VINF_SUCCESS)
335 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
336 if (rcStrict == VINF_SUCCESS)
337 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
338 if (rcStrict == VINF_SUCCESS)
339 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 {
342 pCtx->rsp = TmpRsp.u;
343 iemRegAddToRip(pIemCpu, cbInstr);
344 }
345 }
346 else
347 {
348 GCPtrBottom--;
349 uint16_t *pa16Mem = NULL;
350 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
351 if (rcStrict == VINF_SUCCESS)
352 {
353 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
354 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
355 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
356 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
357 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
358 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
359 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
360 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
361 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
362 if (rcStrict == VINF_SUCCESS)
363 {
364 iemRegSubFromRsp(pCtx, 16);
365 iemRegAddToRip(pIemCpu, cbInstr);
366 }
367 }
368 }
369 return rcStrict;
370}
371
372
373/**
374 * Implements a 32-bit pusha.
375 */
376IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
377{
378 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
379 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
380 RTGCPTR GCPtrBottom = GCPtrTop - 31;
381 VBOXSTRICTRC rcStrict;
382
383 /*
384 * The docs are a bit hard to comprehend here, but it looks like we wrap
385 * around in real mode as long as none of the individual "pusha" crosses the
386 * end of the stack segment. In protected mode we check the whole access
387 * in one go. For efficiency, only do the word-by-word thing if we're in
388 * danger of wrapping around.
389 */
390 /** @todo do pusha boundary / wrap-around checks. */
391 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
392 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
393 {
394 /* word-by-word */
395 RTUINT64U TmpRsp;
396 TmpRsp.u = pCtx->rsp;
397 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
398 if (rcStrict == VINF_SUCCESS)
399 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
400 if (rcStrict == VINF_SUCCESS)
401 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
402 if (rcStrict == VINF_SUCCESS)
403 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
404 if (rcStrict == VINF_SUCCESS)
405 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
406 if (rcStrict == VINF_SUCCESS)
407 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
408 if (rcStrict == VINF_SUCCESS)
409 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
410 if (rcStrict == VINF_SUCCESS)
411 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 {
414 pCtx->rsp = TmpRsp.u;
415 iemRegAddToRip(pIemCpu, cbInstr);
416 }
417 }
418 else
419 {
420 GCPtrBottom--;
421 uint32_t *pa32Mem;
422 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
423 if (rcStrict == VINF_SUCCESS)
424 {
425 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
426 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
427 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
428 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
429 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
430 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
431 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
432 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
433 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
434 if (rcStrict == VINF_SUCCESS)
435 {
436 iemRegSubFromRsp(pCtx, 32);
437 iemRegAddToRip(pIemCpu, cbInstr);
438 }
439 }
440 }
441 return rcStrict;
442}
443
444
445/**
446 * Implements pushf.
447 *
448 *
449 * @param enmEffOpSize The effective operand size.
450 */
451IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
452{
453 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
454
455 /*
456 * If we're in V8086 mode some care is required (which is why we're in
457 * doing this in a C implementation).
458 */
459 uint32_t fEfl = pCtx->eflags.u;
460 if ( (fEfl & X86_EFL_VM)
461 && X86_EFL_GET_IOPL(fEfl) != 3 )
462 {
463 Assert(pCtx->cr0 & X86_CR0_PE);
464 if ( enmEffOpSize != IEMMODE_16BIT
465 || !(pCtx->cr4 & X86_CR4_VME))
466 return iemRaiseGeneralProtectionFault0(pIemCpu);
467 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
468 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
469 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
470 }
471
472 /*
473 * Ok, clear RF and VM and push the flags.
474 */
475 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
476
477 VBOXSTRICTRC rcStrict;
478 switch (enmEffOpSize)
479 {
480 case IEMMODE_16BIT:
481 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
482 break;
483 case IEMMODE_32BIT:
484 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
485 break;
486 case IEMMODE_64BIT:
487 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
488 break;
489 IEM_NOT_REACHED_DEFAULT_CASE_RET();
490 }
491 if (rcStrict != VINF_SUCCESS)
492 return rcStrict;
493
494 iemRegAddToRip(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements popf.
501 *
502 * @param enmEffOpSize The effective operand size.
503 */
504IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
505{
506 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
507 uint32_t const fEflOld = pCtx->eflags.u;
508 VBOXSTRICTRC rcStrict;
509 uint32_t fEflNew;
510
511 /*
512 * V8086 is special as usual.
513 */
514 if (fEflOld & X86_EFL_VM)
515 {
516 /*
517 * Almost anything goes if IOPL is 3.
518 */
519 if (X86_EFL_GET_IOPL(fEflOld) == 3)
520 {
521 switch (enmEffOpSize)
522 {
523 case IEMMODE_16BIT:
524 {
525 uint16_t u16Value;
526 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
527 if (rcStrict != VINF_SUCCESS)
528 return rcStrict;
529 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
530 break;
531 }
532 case IEMMODE_32BIT:
533 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
534 if (rcStrict != VINF_SUCCESS)
535 return rcStrict;
536 break;
537 IEM_NOT_REACHED_DEFAULT_CASE_RET();
538 }
539
540 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
541 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
542 }
543 /*
544 * Interrupt flag virtualization with CR4.VME=1.
545 */
546 else if ( enmEffOpSize == IEMMODE_16BIT
547 && (pCtx->cr4 & X86_CR4_VME) )
548 {
549 uint16_t u16Value;
550 RTUINT64U TmpRsp;
551 TmpRsp.u = pCtx->rsp;
552 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
553 if (rcStrict != VINF_SUCCESS)
554 return rcStrict;
555
556 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
557 * or before? */
558 if ( ( (u16Value & X86_EFL_IF)
559 && (fEflOld & X86_EFL_VIP))
560 || (u16Value & X86_EFL_TF) )
561 return iemRaiseGeneralProtectionFault0(pIemCpu);
562
563 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
564 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
565 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
566 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
567
568 pCtx->rsp = TmpRsp.u;
569 }
570 else
571 return iemRaiseGeneralProtectionFault0(pIemCpu);
572
573 }
574 /*
575 * Not in V8086 mode.
576 */
577 else
578 {
579 /* Pop the flags. */
580 switch (enmEffOpSize)
581 {
582 case IEMMODE_16BIT:
583 {
584 uint16_t u16Value;
585 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
586 if (rcStrict != VINF_SUCCESS)
587 return rcStrict;
588 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
589 break;
590 }
591 case IEMMODE_32BIT:
592 case IEMMODE_64BIT:
593 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
594 if (rcStrict != VINF_SUCCESS)
595 return rcStrict;
596 break;
597 IEM_NOT_REACHED_DEFAULT_CASE_RET();
598 }
599
600 /* Merge them with the current flags. */
601 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
602 || pIemCpu->uCpl == 0)
603 {
604 fEflNew &= X86_EFL_POPF_BITS;
605 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
606 }
607 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
608 {
609 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
610 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
611 }
612 else
613 {
614 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
615 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
616 }
617 }
618
619 /*
620 * Commit the flags.
621 */
622 Assert(fEflNew & RT_BIT_32(1));
623 pCtx->eflags.u = fEflNew;
624 iemRegAddToRip(pIemCpu, cbInstr);
625
626 return VINF_SUCCESS;
627}
628
629
630/**
631 * Implements an indirect call.
632 *
633 * @param uNewPC The new program counter (RIP) value (loaded from the
634 * operand).
635 * @param enmEffOpSize The effective operand size.
636 */
637IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
638{
639 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
640 uint16_t uOldPC = pCtx->ip + cbInstr;
641 if (uNewPC > pCtx->cs.u32Limit)
642 return iemRaiseGeneralProtectionFault0(pIemCpu);
643
644 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
645 if (rcStrict != VINF_SUCCESS)
646 return rcStrict;
647
648 pCtx->rip = uNewPC;
649 return VINF_SUCCESS;
650
651}
652
653
654/**
655 * Implements a 16-bit relative call.
656 *
657 * @param offDisp The displacment offset.
658 */
659IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
660{
661 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
662 uint16_t uOldPC = pCtx->ip + cbInstr;
663 uint16_t uNewPC = uOldPC + offDisp;
664 if (uNewPC > pCtx->cs.u32Limit)
665 return iemRaiseGeneralProtectionFault0(pIemCpu);
666
667 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
668 if (rcStrict != VINF_SUCCESS)
669 return rcStrict;
670
671 pCtx->rip = uNewPC;
672 return VINF_SUCCESS;
673}
674
675
676/**
677 * Implements a 32-bit indirect call.
678 *
679 * @param uNewPC The new program counter (RIP) value (loaded from the
680 * operand).
681 * @param enmEffOpSize The effective operand size.
682 */
683IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
684{
685 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
686 uint32_t uOldPC = pCtx->eip + cbInstr;
687 if (uNewPC > pCtx->cs.u32Limit)
688 return iemRaiseGeneralProtectionFault0(pIemCpu);
689
690 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
691 if (rcStrict != VINF_SUCCESS)
692 return rcStrict;
693
694 pCtx->rip = uNewPC;
695 return VINF_SUCCESS;
696
697}
698
699
700/**
701 * Implements a 32-bit relative call.
702 *
703 * @param offDisp The displacment offset.
704 */
705IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
706{
707 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
708 uint32_t uOldPC = pCtx->eip + cbInstr;
709 uint32_t uNewPC = uOldPC + offDisp;
710 if (uNewPC > pCtx->cs.u32Limit)
711 return iemRaiseGeneralProtectionFault0(pIemCpu);
712
713 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
714 if (rcStrict != VINF_SUCCESS)
715 return rcStrict;
716
717 pCtx->rip = uNewPC;
718 return VINF_SUCCESS;
719}
720
721
722/**
723 * Implements a 64-bit indirect call.
724 *
725 * @param uNewPC The new program counter (RIP) value (loaded from the
726 * operand).
727 * @param enmEffOpSize The effective operand size.
728 */
729IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
730{
731 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
732 uint64_t uOldPC = pCtx->rip + cbInstr;
733 if (!IEM_IS_CANONICAL(uNewPC))
734 return iemRaiseGeneralProtectionFault0(pIemCpu);
735
736 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
737 if (rcStrict != VINF_SUCCESS)
738 return rcStrict;
739
740 pCtx->rip = uNewPC;
741 return VINF_SUCCESS;
742
743}
744
745
746/**
747 * Implements a 64-bit relative call.
748 *
749 * @param offDisp The displacment offset.
750 */
751IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
752{
753 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
754 uint64_t uOldPC = pCtx->rip + cbInstr;
755 uint64_t uNewPC = uOldPC + offDisp;
756 if (!IEM_IS_CANONICAL(uNewPC))
757 return iemRaiseNotCanonical(pIemCpu);
758
759 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
760 if (rcStrict != VINF_SUCCESS)
761 return rcStrict;
762
763 pCtx->rip = uNewPC;
764 return VINF_SUCCESS;
765}
766
767
768/**
769 * Implements far jumps and calls thru task segments (TSS).
770 *
771 * @param uSel The selector.
772 * @param enmBranch The kind of branching we're performing.
773 * @param enmEffOpSize The effective operand size.
774 * @param pDesc The descriptor corrsponding to @a uSel. The type is
775 * call gate.
776 */
777IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
778{
779 /* Call various functions to do the work. */
780 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
781}
782
783
784/**
785 * Implements far jumps and calls thru task gates.
786 *
787 * @param uSel The selector.
788 * @param enmBranch The kind of branching we're performing.
789 * @param enmEffOpSize The effective operand size.
790 * @param pDesc The descriptor corrsponding to @a uSel. The type is
791 * call gate.
792 */
793IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
794{
795 /* Call various functions to do the work. */
796 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
797}
798
799
800/**
801 * Implements far jumps and calls thru call gates.
802 *
803 * @param uSel The selector.
804 * @param enmBranch The kind of branching we're performing.
805 * @param enmEffOpSize The effective operand size.
806 * @param pDesc The descriptor corrsponding to @a uSel. The type is
807 * call gate.
808 */
809IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
810{
811 /* Call various functions to do the work. */
812 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
813}
814
815
816/**
817 * Implements far jumps and calls thru system selectors.
818 *
819 * @param uSel The selector.
820 * @param enmBranch The kind of branching we're performing.
821 * @param enmEffOpSize The effective operand size.
822 * @param pDesc The descriptor corrsponding to @a uSel.
823 */
824IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
825{
826 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
827 Assert((uSel & X86_SEL_MASK_OFF_RPL));
828
829 if (IEM_IS_LONG_MODE(pIemCpu))
830 switch (pDesc->Legacy.Gen.u4Type)
831 {
832 case AMD64_SEL_TYPE_SYS_CALL_GATE:
833 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
834
835 default:
836 case AMD64_SEL_TYPE_SYS_LDT:
837 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
838 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
839 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
840 case AMD64_SEL_TYPE_SYS_INT_GATE:
841 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
842 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
843
844 }
845
846 switch (pDesc->Legacy.Gen.u4Type)
847 {
848 case X86_SEL_TYPE_SYS_286_CALL_GATE:
849 case X86_SEL_TYPE_SYS_386_CALL_GATE:
850 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
851
852 case X86_SEL_TYPE_SYS_TASK_GATE:
853 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
854
855 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
856 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
857 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
858
859 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
860 Log(("branch %04x -> busy 286 TSS\n", uSel));
861 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
862
863 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
864 Log(("branch %04x -> busy 386 TSS\n", uSel));
865 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
866
867 default:
868 case X86_SEL_TYPE_SYS_LDT:
869 case X86_SEL_TYPE_SYS_286_INT_GATE:
870 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
871 case X86_SEL_TYPE_SYS_386_INT_GATE:
872 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
873 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
874 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
875 }
876}
877
878
879/**
880 * Implements far jumps.
881 *
882 * @param uSel The selector.
883 * @param offSeg The segment offset.
884 * @param enmEffOpSize The effective operand size.
885 */
886IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
887{
888 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
889 NOREF(cbInstr);
890 Assert(offSeg <= UINT32_MAX);
891
892 /*
893 * Real mode and V8086 mode are easy. The only snag seems to be that
894 * CS.limit doesn't change and the limit check is done against the current
895 * limit.
896 */
897 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
898 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
899 {
900 if (offSeg > pCtx->cs.u32Limit)
901 return iemRaiseGeneralProtectionFault0(pIemCpu);
902
903 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
904 pCtx->rip = offSeg;
905 else
906 pCtx->rip = offSeg & UINT16_MAX;
907 pCtx->cs.Sel = uSel;
908 pCtx->cs.ValidSel = uSel;
909 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
910 pCtx->cs.u64Base = (uint32_t)uSel << 4;
911 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
912 * PE. Check with VT-x and AMD-V. */
913#ifdef IEM_VERIFICATION_MODE
914 pCtx->cs.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
915#endif
916 return VINF_SUCCESS;
917 }
918
919 /*
920 * Protected mode. Need to parse the specified descriptor...
921 */
922 if (!(uSel & X86_SEL_MASK_OFF_RPL))
923 {
924 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
925 return iemRaiseGeneralProtectionFault0(pIemCpu);
926 }
927
928 /* Fetch the descriptor. */
929 IEMSELDESC Desc;
930 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
931 if (rcStrict != VINF_SUCCESS)
932 return rcStrict;
933
934 /* Is it there? */
935 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
936 {
937 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
938 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
939 }
940
941 /*
942 * Deal with it according to its type. We do the standard code selectors
943 * here and dispatch the system selectors to worker functions.
944 */
945 if (!Desc.Legacy.Gen.u1DescType)
946 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
947
948 /* Only code segments. */
949 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
950 {
951 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
952 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
953 }
954
955 /* L vs D. */
956 if ( Desc.Legacy.Gen.u1Long
957 && Desc.Legacy.Gen.u1DefBig
958 && IEM_IS_LONG_MODE(pIemCpu))
959 {
960 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
961 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
962 }
963
964 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
965 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
966 {
967 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
968 {
969 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
970 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
971 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
972 }
973 }
974 else
975 {
976 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
977 {
978 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
979 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
980 }
981 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
982 {
983 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
984 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
985 }
986 }
987
988 /* Chop the high bits if 16-bit (Intel says so). */
989 if (enmEffOpSize == IEMMODE_16BIT)
990 offSeg &= UINT16_MAX;
991
992 /* Limit check. (Should alternatively check for non-canonical addresses
993 here, but that is ruled out by offSeg being 32-bit, right?) */
994 uint64_t u64Base;
995 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
996 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
997 u64Base = 0;
998 else
999 {
1000 if (offSeg > cbLimit)
1001 {
1002 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1003 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1004 }
1005 u64Base = X86DESC_BASE(&Desc.Legacy);
1006 }
1007
1008 /*
1009 * Ok, everything checked out fine. Now set the accessed bit before
1010 * committing the result into CS, CSHID and RIP.
1011 */
1012 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1013 {
1014 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1015 if (rcStrict != VINF_SUCCESS)
1016 return rcStrict;
1017#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1018 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1019#endif
1020 }
1021
1022 /* commit */
1023 pCtx->rip = offSeg;
1024 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1025 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1026 pCtx->cs.ValidSel = pCtx->cs.Sel;
1027 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1028 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1029 pCtx->cs.u32Limit = cbLimit;
1030 pCtx->cs.u64Base = u64Base;
1031 /** @todo check if the hidden bits are loaded correctly for 64-bit
1032 * mode. */
1033 return VINF_SUCCESS;
1034}
1035
1036
1037/**
1038 * Implements far calls.
1039 *
1040 * This very similar to iemCImpl_FarJmp.
1041 *
1042 * @param uSel The selector.
1043 * @param offSeg The segment offset.
1044 * @param enmEffOpSize The operand size (in case we need it).
1045 */
1046IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1047{
1048 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1049 VBOXSTRICTRC rcStrict;
1050 uint64_t uNewRsp;
1051 RTPTRUNION uPtrRet;
1052
1053 /*
1054 * Real mode and V8086 mode are easy. The only snag seems to be that
1055 * CS.limit doesn't change and the limit check is done against the current
1056 * limit.
1057 */
1058 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1059 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1060 {
1061 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1062
1063 /* Check stack first - may #SS(0). */
1064 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1065 &uPtrRet.pv, &uNewRsp);
1066 if (rcStrict != VINF_SUCCESS)
1067 return rcStrict;
1068
1069 /* Check the target address range. */
1070 if (offSeg > UINT32_MAX)
1071 return iemRaiseGeneralProtectionFault0(pIemCpu);
1072
1073 /* Everything is fine, push the return address. */
1074 if (enmEffOpSize == IEMMODE_16BIT)
1075 {
1076 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1077 uPtrRet.pu16[1] = pCtx->cs.Sel;
1078 }
1079 else
1080 {
1081 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1082 uPtrRet.pu16[3] = pCtx->cs.Sel;
1083 }
1084 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1085 if (rcStrict != VINF_SUCCESS)
1086 return rcStrict;
1087
1088 /* Branch. */
1089 pCtx->rip = offSeg;
1090 pCtx->cs.Sel = uSel;
1091 pCtx->cs.ValidSel = uSel;
1092 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1093 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1094 /** @todo Does REM reset the accessed bit here too? (See on jmp far16
1095 * after disabling PE.) Check with VT-x and AMD-V. */
1096#ifdef IEM_VERIFICATION_MODE
1097 pCtx->cs.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
1098#endif
1099 return VINF_SUCCESS;
1100 }
1101
1102 /*
1103 * Protected mode. Need to parse the specified descriptor...
1104 */
1105 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1106 {
1107 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1108 return iemRaiseGeneralProtectionFault0(pIemCpu);
1109 }
1110
1111 /* Fetch the descriptor. */
1112 IEMSELDESC Desc;
1113 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1114 if (rcStrict != VINF_SUCCESS)
1115 return rcStrict;
1116
1117 /*
1118 * Deal with it according to its type. We do the standard code selectors
1119 * here and dispatch the system selectors to worker functions.
1120 */
1121 if (!Desc.Legacy.Gen.u1DescType)
1122 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1123
1124 /* Only code segments. */
1125 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1126 {
1127 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1128 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1129 }
1130
1131 /* L vs D. */
1132 if ( Desc.Legacy.Gen.u1Long
1133 && Desc.Legacy.Gen.u1DefBig
1134 && IEM_IS_LONG_MODE(pIemCpu))
1135 {
1136 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1137 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1138 }
1139
1140 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1141 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1142 {
1143 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1144 {
1145 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1146 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1147 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1148 }
1149 }
1150 else
1151 {
1152 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1153 {
1154 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1155 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1156 }
1157 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1158 {
1159 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1160 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1161 }
1162 }
1163
1164 /* Is it there? */
1165 if (!Desc.Legacy.Gen.u1Present)
1166 {
1167 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1168 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1169 }
1170
1171 /* Check stack first - may #SS(0). */
1172 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1173 * 16-bit code cause a two or four byte CS to be pushed? */
1174 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1175 enmEffOpSize == IEMMODE_64BIT ? 8+8
1176 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1177 &uPtrRet.pv, &uNewRsp);
1178 if (rcStrict != VINF_SUCCESS)
1179 return rcStrict;
1180
1181 /* Chop the high bits if 16-bit (Intel says so). */
1182 if (enmEffOpSize == IEMMODE_16BIT)
1183 offSeg &= UINT16_MAX;
1184
1185 /* Limit / canonical check. */
1186 uint64_t u64Base;
1187 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1188 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1189 {
1190 if (!IEM_IS_CANONICAL(offSeg))
1191 {
1192 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1193 return iemRaiseNotCanonical(pIemCpu);
1194 }
1195 u64Base = 0;
1196 }
1197 else
1198 {
1199 if (offSeg > cbLimit)
1200 {
1201 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1202 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1203 }
1204 u64Base = X86DESC_BASE(&Desc.Legacy);
1205 }
1206
1207 /*
1208 * Now set the accessed bit before
1209 * writing the return address to the stack and committing the result into
1210 * CS, CSHID and RIP.
1211 */
1212 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1213 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1214 {
1215 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1216 if (rcStrict != VINF_SUCCESS)
1217 return rcStrict;
1218#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1219 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1220#endif
1221 }
1222
1223 /* stack */
1224 if (enmEffOpSize == IEMMODE_16BIT)
1225 {
1226 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1227 uPtrRet.pu16[1] = pCtx->cs.Sel;
1228 }
1229 else if (enmEffOpSize == IEMMODE_32BIT)
1230 {
1231 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1232 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1233 }
1234 else
1235 {
1236 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1237 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1238 }
1239 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1240 if (rcStrict != VINF_SUCCESS)
1241 return rcStrict;
1242
1243 /* commit */
1244 pCtx->rip = offSeg;
1245 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1246 pCtx->cs.Sel |= pIemCpu->uCpl;
1247 pCtx->cs.ValidSel = pCtx->cs.Sel;
1248 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1249 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1250 pCtx->cs.u32Limit = cbLimit;
1251 pCtx->cs.u64Base = u64Base;
1252 /** @todo check if the hidden bits are loaded correctly for 64-bit
1253 * mode. */
1254 return VINF_SUCCESS;
1255}
1256
1257
1258/**
1259 * Implements retf.
1260 *
1261 * @param enmEffOpSize The effective operand size.
1262 * @param cbPop The amount of arguments to pop from the stack
1263 * (bytes).
1264 */
1265IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1266{
1267 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1268 VBOXSTRICTRC rcStrict;
1269 RTCPTRUNION uPtrFrame;
1270 uint64_t uNewRsp;
1271 uint64_t uNewRip;
1272 uint16_t uNewCs;
1273 NOREF(cbInstr);
1274
1275 /*
1276 * Read the stack values first.
1277 */
1278 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1279 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1280 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1281 if (rcStrict != VINF_SUCCESS)
1282 return rcStrict;
1283 if (enmEffOpSize == IEMMODE_16BIT)
1284 {
1285 uNewRip = uPtrFrame.pu16[0];
1286 uNewCs = uPtrFrame.pu16[1];
1287 }
1288 else if (enmEffOpSize == IEMMODE_32BIT)
1289 {
1290 uNewRip = uPtrFrame.pu32[0];
1291 uNewCs = uPtrFrame.pu16[2];
1292 }
1293 else
1294 {
1295 uNewRip = uPtrFrame.pu64[0];
1296 uNewCs = uPtrFrame.pu16[4];
1297 }
1298
1299 /*
1300 * Real mode and V8086 mode are easy.
1301 */
1302 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1303 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1304 {
1305 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1306 /** @todo check how this is supposed to work if sp=0xfffe. */
1307
1308 /* Check the limit of the new EIP. */
1309 /** @todo Intel pseudo code only does the limit check for 16-bit
1310 * operands, AMD does not make any distinction. What is right? */
1311 if (uNewRip > pCtx->cs.u32Limit)
1312 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1313
1314 /* commit the operation. */
1315 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1316 if (rcStrict != VINF_SUCCESS)
1317 return rcStrict;
1318 pCtx->rip = uNewRip;
1319 pCtx->cs.Sel = uNewCs;
1320 pCtx->cs.ValidSel = uNewCs;
1321 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1322 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1323 /** @todo do we load attribs and limit as well? */
1324 if (cbPop)
1325 iemRegAddToRsp(pCtx, cbPop);
1326 return VINF_SUCCESS;
1327 }
1328
1329 /*
1330 * Protected mode is complicated, of course.
1331 */
1332 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
1333 {
1334 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1335 return iemRaiseGeneralProtectionFault0(pIemCpu);
1336 }
1337
1338 /* Fetch the descriptor. */
1339 IEMSELDESC DescCs;
1340 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1341 if (rcStrict != VINF_SUCCESS)
1342 return rcStrict;
1343
1344 /* Can only return to a code selector. */
1345 if ( !DescCs.Legacy.Gen.u1DescType
1346 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1347 {
1348 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1349 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1350 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1351 }
1352
1353 /* L vs D. */
1354 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1355 && DescCs.Legacy.Gen.u1DefBig
1356 && IEM_IS_LONG_MODE(pIemCpu))
1357 {
1358 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1359 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1360 }
1361
1362 /* DPL/RPL/CPL checks. */
1363 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1364 {
1365 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1366 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1367 }
1368
1369 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1370 {
1371 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1372 {
1373 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1374 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1375 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1376 }
1377 }
1378 else
1379 {
1380 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1381 {
1382 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1383 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1384 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1385 }
1386 }
1387
1388 /* Is it there? */
1389 if (!DescCs.Legacy.Gen.u1Present)
1390 {
1391 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1392 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1393 }
1394
1395 /*
1396 * Return to outer privilege? (We'll typically have entered via a call gate.)
1397 */
1398 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1399 {
1400 /* Read the return pointer, it comes before the parameters. */
1401 RTCPTRUNION uPtrStack;
1402 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1403 if (rcStrict != VINF_SUCCESS)
1404 return rcStrict;
1405 uint16_t uNewOuterSs;
1406 uint64_t uNewOuterRsp;
1407 if (enmEffOpSize == IEMMODE_16BIT)
1408 {
1409 uNewOuterRsp = uPtrFrame.pu16[0];
1410 uNewOuterSs = uPtrFrame.pu16[1];
1411 }
1412 else if (enmEffOpSize == IEMMODE_32BIT)
1413 {
1414 uNewOuterRsp = uPtrFrame.pu32[0];
1415 uNewOuterSs = uPtrFrame.pu16[2];
1416 }
1417 else
1418 {
1419 uNewOuterRsp = uPtrFrame.pu64[0];
1420 uNewOuterSs = uPtrFrame.pu16[4];
1421 }
1422
1423 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1424 and read the selector. */
1425 IEMSELDESC DescSs;
1426 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
1427 {
1428 if ( !DescCs.Legacy.Gen.u1Long
1429 || (uNewOuterSs & X86_SEL_RPL) == 3)
1430 {
1431 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1432 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1433 return iemRaiseGeneralProtectionFault0(pIemCpu);
1434 }
1435 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1436 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1437 }
1438 else
1439 {
1440 /* Fetch the descriptor for the new stack segment. */
1441 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1442 if (rcStrict != VINF_SUCCESS)
1443 return rcStrict;
1444 }
1445
1446 /* Check that RPL of stack and code selectors match. */
1447 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1448 {
1449 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1450 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1451 }
1452
1453 /* Must be a writable data segment. */
1454 if ( !DescSs.Legacy.Gen.u1DescType
1455 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1456 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1457 {
1458 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1459 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1460 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1461 }
1462
1463 /* L vs D. (Not mentioned by intel.) */
1464 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1465 && DescSs.Legacy.Gen.u1DefBig
1466 && IEM_IS_LONG_MODE(pIemCpu))
1467 {
1468 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1469 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1470 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1471 }
1472
1473 /* DPL/RPL/CPL checks. */
1474 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1475 {
1476 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1477 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1478 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1479 }
1480
1481 /* Is it there? */
1482 if (!DescSs.Legacy.Gen.u1Present)
1483 {
1484 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1485 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1486 }
1487
1488 /* Calc SS limit.*/
1489 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
1490
1491 /* Is RIP canonical or within CS.limit? */
1492 uint64_t u64Base;
1493 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1494
1495 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1496 {
1497 if (!IEM_IS_CANONICAL(uNewRip))
1498 {
1499 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1500 return iemRaiseNotCanonical(pIemCpu);
1501 }
1502 u64Base = 0;
1503 }
1504 else
1505 {
1506 if (uNewRip > cbLimitCs)
1507 {
1508 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1509 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1510 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1511 }
1512 u64Base = X86DESC_BASE(&DescCs.Legacy);
1513 }
1514
1515 /*
1516 * Now set the accessed bit before
1517 * writing the return address to the stack and committing the result into
1518 * CS, CSHID and RIP.
1519 */
1520 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1521 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1522 {
1523 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1524 if (rcStrict != VINF_SUCCESS)
1525 return rcStrict;
1526#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1527 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1528#endif
1529 }
1530 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1531 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1532 {
1533 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1534 if (rcStrict != VINF_SUCCESS)
1535 return rcStrict;
1536#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1537 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1538#endif
1539 }
1540
1541 /* commit */
1542 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1543 if (rcStrict != VINF_SUCCESS)
1544 return rcStrict;
1545 if (enmEffOpSize == IEMMODE_16BIT)
1546 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1547 else
1548 pCtx->rip = uNewRip;
1549 pCtx->cs.Sel = uNewCs;
1550 pCtx->cs.ValidSel = uNewCs;
1551 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1552 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1553 pCtx->cs.u32Limit = cbLimitCs;
1554 pCtx->cs.u64Base = u64Base;
1555 pCtx->rsp = uNewRsp;
1556 pCtx->ss.Sel = uNewOuterSs;
1557 pCtx->ss.ValidSel = uNewOuterSs;
1558 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1559 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
1560 pCtx->ss.u32Limit = cbLimitSs;
1561 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1562 pCtx->ss.u64Base = 0;
1563 else
1564 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
1565
1566 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1567 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
1568 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
1569 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
1570 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
1571
1572 /** @todo check if the hidden bits are loaded correctly for 64-bit
1573 * mode. */
1574
1575 if (cbPop)
1576 iemRegAddToRsp(pCtx, cbPop);
1577
1578 /* Done! */
1579 }
1580 /*
1581 * Return to the same privilege level
1582 */
1583 else
1584 {
1585 /* Limit / canonical check. */
1586 uint64_t u64Base;
1587 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1588
1589 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1590 {
1591 if (!IEM_IS_CANONICAL(uNewRip))
1592 {
1593 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1594 return iemRaiseNotCanonical(pIemCpu);
1595 }
1596 u64Base = 0;
1597 }
1598 else
1599 {
1600 if (uNewRip > cbLimitCs)
1601 {
1602 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1603 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1604 }
1605 u64Base = X86DESC_BASE(&DescCs.Legacy);
1606 }
1607
1608 /*
1609 * Now set the accessed bit before
1610 * writing the return address to the stack and committing the result into
1611 * CS, CSHID and RIP.
1612 */
1613 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1614 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1615 {
1616 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1617 if (rcStrict != VINF_SUCCESS)
1618 return rcStrict;
1619#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1620 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1621#endif
1622 }
1623
1624 /* commit */
1625 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1626 if (rcStrict != VINF_SUCCESS)
1627 return rcStrict;
1628 if (enmEffOpSize == IEMMODE_16BIT)
1629 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1630 else
1631 pCtx->rip = uNewRip;
1632 pCtx->cs.Sel = uNewCs;
1633 pCtx->cs.ValidSel = uNewCs;
1634 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1635 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1636 pCtx->cs.u32Limit = cbLimitCs;
1637 pCtx->cs.u64Base = u64Base;
1638 /** @todo check if the hidden bits are loaded correctly for 64-bit
1639 * mode. */
1640 if (cbPop)
1641 iemRegAddToRsp(pCtx, cbPop);
1642 }
1643 return VINF_SUCCESS;
1644}
1645
1646
1647/**
1648 * Implements retn.
1649 *
1650 * We're doing this in C because of the \#GP that might be raised if the popped
1651 * program counter is out of bounds.
1652 *
1653 * @param enmEffOpSize The effective operand size.
1654 * @param cbPop The amount of arguments to pop from the stack
1655 * (bytes).
1656 */
1657IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1658{
1659 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1660 NOREF(cbInstr);
1661
1662 /* Fetch the RSP from the stack. */
1663 VBOXSTRICTRC rcStrict;
1664 RTUINT64U NewRip;
1665 RTUINT64U NewRsp;
1666 NewRsp.u = pCtx->rsp;
1667 switch (enmEffOpSize)
1668 {
1669 case IEMMODE_16BIT:
1670 NewRip.u = 0;
1671 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1672 break;
1673 case IEMMODE_32BIT:
1674 NewRip.u = 0;
1675 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1676 break;
1677 case IEMMODE_64BIT:
1678 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1679 break;
1680 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1681 }
1682 if (rcStrict != VINF_SUCCESS)
1683 return rcStrict;
1684
1685 /* Check the new RSP before loading it. */
1686 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1687 * of it. The canonical test is performed here and for call. */
1688 if (enmEffOpSize != IEMMODE_64BIT)
1689 {
1690 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
1691 {
1692 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
1693 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1694 }
1695 }
1696 else
1697 {
1698 if (!IEM_IS_CANONICAL(NewRip.u))
1699 {
1700 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1701 return iemRaiseNotCanonical(pIemCpu);
1702 }
1703 }
1704
1705 /* Commit it. */
1706 pCtx->rip = NewRip.u;
1707 pCtx->rsp = NewRsp.u;
1708 if (cbPop)
1709 iemRegAddToRsp(pCtx, cbPop);
1710
1711 return VINF_SUCCESS;
1712}
1713
1714
1715/**
1716 * Implements leave.
1717 *
1718 * We're doing this in C because messing with the stack registers is annoying
1719 * since they depends on SS attributes.
1720 *
1721 * @param enmEffOpSize The effective operand size.
1722 */
1723IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1724{
1725 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1726
1727 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1728 RTUINT64U NewRsp;
1729 if (pCtx->ss.Attr.n.u1Long)
1730 {
1731 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1732 NewRsp.u = pCtx->rsp;
1733 NewRsp.Words.w0 = pCtx->bp;
1734 }
1735 else if (pCtx->ss.Attr.n.u1DefBig)
1736 NewRsp.u = pCtx->ebp;
1737 else
1738 NewRsp.u = pCtx->rbp;
1739
1740 /* Pop RBP according to the operand size. */
1741 VBOXSTRICTRC rcStrict;
1742 RTUINT64U NewRbp;
1743 switch (enmEffOpSize)
1744 {
1745 case IEMMODE_16BIT:
1746 NewRbp.u = pCtx->rbp;
1747 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1748 break;
1749 case IEMMODE_32BIT:
1750 NewRbp.u = 0;
1751 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1752 break;
1753 case IEMMODE_64BIT:
1754 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1755 break;
1756 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1757 }
1758 if (rcStrict != VINF_SUCCESS)
1759 return rcStrict;
1760
1761
1762 /* Commit it. */
1763 pCtx->rbp = NewRbp.u;
1764 pCtx->rsp = NewRsp.u;
1765 iemRegAddToRip(pIemCpu, cbInstr);
1766
1767 return VINF_SUCCESS;
1768}
1769
1770
1771/**
1772 * Implements int3 and int XX.
1773 *
1774 * @param u8Int The interrupt vector number.
1775 * @param fIsBpInstr Is it the breakpoint instruction.
1776 */
1777IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1778{
1779 Assert(pIemCpu->cXcptRecursions == 0);
1780 return iemRaiseXcptOrInt(pIemCpu,
1781 cbInstr,
1782 u8Int,
1783 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1784 0,
1785 0);
1786}
1787
1788
1789/**
1790 * Implements iret for real mode and V8086 mode.
1791 *
1792 * @param enmEffOpSize The effective operand size.
1793 */
1794IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1795{
1796 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1797 NOREF(cbInstr);
1798
1799 /*
1800 * iret throws an exception if VME isn't enabled.
1801 */
1802 if ( pCtx->eflags.Bits.u1VM
1803 && !(pCtx->cr4 & X86_CR4_VME))
1804 return iemRaiseGeneralProtectionFault0(pIemCpu);
1805
1806 /*
1807 * Do the stack bits, but don't commit RSP before everything checks
1808 * out right.
1809 */
1810 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1811 VBOXSTRICTRC rcStrict;
1812 RTCPTRUNION uFrame;
1813 uint16_t uNewCs;
1814 uint32_t uNewEip;
1815 uint32_t uNewFlags;
1816 uint64_t uNewRsp;
1817 if (enmEffOpSize == IEMMODE_32BIT)
1818 {
1819 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1820 if (rcStrict != VINF_SUCCESS)
1821 return rcStrict;
1822 uNewEip = uFrame.pu32[0];
1823 uNewCs = (uint16_t)uFrame.pu32[1];
1824 uNewFlags = uFrame.pu32[2];
1825 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1826 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1827 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1828 | X86_EFL_ID;
1829 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1830 }
1831 else
1832 {
1833 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1834 if (rcStrict != VINF_SUCCESS)
1835 return rcStrict;
1836 uNewEip = uFrame.pu16[0];
1837 uNewCs = uFrame.pu16[1];
1838 uNewFlags = uFrame.pu16[2];
1839 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1840 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1841 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1842 /** @todo The intel pseudo code does not indicate what happens to
1843 * reserved flags. We just ignore them. */
1844 }
1845 /** @todo Check how this is supposed to work if sp=0xfffe. */
1846
1847 /*
1848 * Check the limit of the new EIP.
1849 */
1850 /** @todo Only the AMD pseudo code check the limit here, what's
1851 * right? */
1852 if (uNewEip > pCtx->cs.u32Limit)
1853 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1854
1855 /*
1856 * V8086 checks and flag adjustments
1857 */
1858 if (pCtx->eflags.Bits.u1VM)
1859 {
1860 if (pCtx->eflags.Bits.u2IOPL == 3)
1861 {
1862 /* Preserve IOPL and clear RF. */
1863 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1864 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1865 }
1866 else if ( enmEffOpSize == IEMMODE_16BIT
1867 && ( !(uNewFlags & X86_EFL_IF)
1868 || !pCtx->eflags.Bits.u1VIP )
1869 && !(uNewFlags & X86_EFL_TF) )
1870 {
1871 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1872 uNewFlags &= ~X86_EFL_VIF;
1873 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1874 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1875 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1876 }
1877 else
1878 return iemRaiseGeneralProtectionFault0(pIemCpu);
1879 }
1880
1881 /*
1882 * Commit the operation.
1883 */
1884 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1885 if (rcStrict != VINF_SUCCESS)
1886 return rcStrict;
1887 pCtx->rip = uNewEip;
1888 pCtx->cs.Sel = uNewCs;
1889 pCtx->cs.ValidSel = uNewCs;
1890 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1891 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1892 /** @todo do we load attribs and limit as well? */
1893 Assert(uNewFlags & X86_EFL_1);
1894 pCtx->eflags.u = uNewFlags;
1895
1896 return VINF_SUCCESS;
1897}
1898
1899
1900/**
1901 * Implements iret for protected mode returning to V8086 mode.
1902 *
1903 * @param enmEffOpSize The effective operand size.
1904 * @param uNewEip The new EIP.
1905 * @param uNewCs The new CS.
1906 * @param uNewFlags The new EFLAGS.
1907 * @param uNewRsp The RSP after the initial IRET frame.
1908 */
1909IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, IEMMODE, enmEffOpSize, uint32_t, uNewEip, uint16_t, uNewCs,
1910 uint32_t, uNewFlags, uint64_t, uNewRsp)
1911{
1912 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1913}
1914
1915
1916/**
1917 * Implements iret for protected mode returning via a nested task.
1918 *
1919 * @param enmEffOpSize The effective operand size.
1920 */
1921IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
1922{
1923 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1924}
1925
1926
1927/**
1928 * Implements iret for protected mode
1929 *
1930 * @param enmEffOpSize The effective operand size.
1931 */
1932IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
1933{
1934 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1935 NOREF(cbInstr);
1936
1937 /*
1938 * Nested task return.
1939 */
1940 if (pCtx->eflags.Bits.u1NT)
1941 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
1942
1943 /*
1944 * Normal return.
1945 *
1946 * Do the stack bits, but don't commit RSP before everything checks
1947 * out right.
1948 */
1949 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1950 VBOXSTRICTRC rcStrict;
1951 RTCPTRUNION uFrame;
1952 uint16_t uNewCs;
1953 uint32_t uNewEip;
1954 uint32_t uNewFlags;
1955 uint64_t uNewRsp;
1956 if (enmEffOpSize == IEMMODE_32BIT)
1957 {
1958 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1959 if (rcStrict != VINF_SUCCESS)
1960 return rcStrict;
1961 uNewEip = uFrame.pu32[0];
1962 uNewCs = (uint16_t)uFrame.pu32[1];
1963 uNewFlags = uFrame.pu32[2];
1964 }
1965 else
1966 {
1967 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1968 if (rcStrict != VINF_SUCCESS)
1969 return rcStrict;
1970 uNewEip = uFrame.pu16[0];
1971 uNewCs = uFrame.pu16[1];
1972 uNewFlags = uFrame.pu16[2];
1973 }
1974 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
1975 if (rcStrict != VINF_SUCCESS)
1976 return rcStrict;
1977
1978 /*
1979 * We're hopefully not returning to V8086 mode...
1980 */
1981 if ( (uNewFlags & X86_EFL_VM)
1982 && pIemCpu->uCpl == 0)
1983 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, enmEffOpSize, uNewEip, uNewCs, uNewFlags, uNewRsp);
1984
1985 /*
1986 * Protected mode.
1987 */
1988 /* Read the CS descriptor. */
1989 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
1990 {
1991 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
1992 return iemRaiseGeneralProtectionFault0(pIemCpu);
1993 }
1994
1995 IEMSELDESC DescCS;
1996 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
1997 if (rcStrict != VINF_SUCCESS)
1998 {
1999 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
2000 return rcStrict;
2001 }
2002
2003 /* Must be a code descriptor. */
2004 if (!DescCS.Legacy.Gen.u1DescType)
2005 {
2006 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2007 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2008 }
2009 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2010 {
2011 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2012 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2013 }
2014
2015 /* Privilege checks. */
2016 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2017 {
2018 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
2019 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2020 }
2021 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2022 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2023 {
2024 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
2025 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2026 }
2027
2028 /* Present? */
2029 if (!DescCS.Legacy.Gen.u1Present)
2030 {
2031 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2032 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2033 }
2034
2035 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2036
2037 /*
2038 * Return to outer level?
2039 */
2040 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2041 {
2042 uint16_t uNewSS;
2043 uint32_t uNewESP;
2044 if (enmEffOpSize == IEMMODE_32BIT)
2045 {
2046 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2047 if (rcStrict != VINF_SUCCESS)
2048 return rcStrict;
2049 uNewESP = uFrame.pu32[0];
2050 uNewSS = (uint16_t)uFrame.pu32[1];
2051 }
2052 else
2053 {
2054 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2055 if (rcStrict != VINF_SUCCESS)
2056 return rcStrict;
2057 uNewESP = uFrame.pu16[0];
2058 uNewSS = uFrame.pu16[1];
2059 }
2060 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2061 if (rcStrict != VINF_SUCCESS)
2062 return rcStrict;
2063
2064 /* Read the SS descriptor. */
2065 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2066 {
2067 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2068 return iemRaiseGeneralProtectionFault0(pIemCpu);
2069 }
2070
2071 IEMSELDESC DescSS;
2072 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2073 if (rcStrict != VINF_SUCCESS)
2074 {
2075 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2076 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2077 return rcStrict;
2078 }
2079
2080 /* Privilege checks. */
2081 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2082 {
2083 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2084 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2085 }
2086 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2087 {
2088 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2089 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2090 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2091 }
2092
2093 /* Must be a writeable data segment descriptor. */
2094 if (!DescSS.Legacy.Gen.u1DescType)
2095 {
2096 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2097 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2098 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2099 }
2100 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2101 {
2102 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2103 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2104 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2105 }
2106
2107 /* Present? */
2108 if (!DescSS.Legacy.Gen.u1Present)
2109 {
2110 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2111 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2112 }
2113
2114 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2115
2116 /* Check EIP. */
2117 if (uNewEip > cbLimitCS)
2118 {
2119 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2120 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2121 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2122 }
2123
2124 /*
2125 * Commit the changes, marking CS and SS accessed first since
2126 * that may fail.
2127 */
2128 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2129 {
2130 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2131 if (rcStrict != VINF_SUCCESS)
2132 return rcStrict;
2133 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2134 }
2135 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2136 {
2137 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2138 if (rcStrict != VINF_SUCCESS)
2139 return rcStrict;
2140 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2141 }
2142
2143 pCtx->rip = uNewEip;
2144 pCtx->cs.Sel = uNewCs;
2145 pCtx->cs.ValidSel = uNewCs;
2146 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2147 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2148 pCtx->cs.u32Limit = cbLimitCS;
2149 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2150 pCtx->rsp = uNewESP;
2151 pCtx->ss.Sel = uNewSS;
2152 pCtx->ss.ValidSel = uNewSS;
2153 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2154 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2155 pCtx->ss.u32Limit = cbLimitSs;
2156 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2157
2158 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2159 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2160 if (enmEffOpSize != IEMMODE_16BIT)
2161 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2162 if (pIemCpu->uCpl == 0)
2163 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2164 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2165 fEFlagsMask |= X86_EFL_IF;
2166 pCtx->eflags.u &= ~fEFlagsMask;
2167 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2168
2169 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2170 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2171 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2172 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2173 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2174
2175 /* Done! */
2176
2177 }
2178 /*
2179 * Return to the same level.
2180 */
2181 else
2182 {
2183 /* Check EIP. */
2184 if (uNewEip > cbLimitCS)
2185 {
2186 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2187 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2188 }
2189
2190 /*
2191 * Commit the changes, marking CS first since it may fail.
2192 */
2193 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2194 {
2195 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2196 if (rcStrict != VINF_SUCCESS)
2197 return rcStrict;
2198 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2199 }
2200
2201 pCtx->rip = uNewEip;
2202 pCtx->cs.Sel = uNewCs;
2203 pCtx->cs.ValidSel = uNewCs;
2204 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2205 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2206 pCtx->cs.u32Limit = cbLimitCS;
2207 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2208 pCtx->rsp = uNewRsp;
2209
2210 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2211 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2212 if (enmEffOpSize != IEMMODE_16BIT)
2213 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2214 if (pIemCpu->uCpl == 0)
2215 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2216 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2217 fEFlagsMask |= X86_EFL_IF;
2218 pCtx->eflags.u &= ~fEFlagsMask;
2219 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2220 /* Done! */
2221 }
2222 return VINF_SUCCESS;
2223}
2224
2225
2226/**
2227 * Implements iret for long mode
2228 *
2229 * @param enmEffOpSize The effective operand size.
2230 */
2231IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2232{
2233 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2234 //VBOXSTRICTRC rcStrict;
2235 //uint64_t uNewRsp;
2236
2237 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2238 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2239}
2240
2241
2242/**
2243 * Implements iret.
2244 *
2245 * @param enmEffOpSize The effective operand size.
2246 */
2247IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2248{
2249 /*
2250 * Call a mode specific worker.
2251 */
2252 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2253 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2254 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2255 if (IEM_IS_LONG_MODE(pIemCpu))
2256 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2257
2258 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2259}
2260
2261
2262/**
2263 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2264 *
2265 * @param iSegReg The segment register number (valid).
2266 * @param uSel The new selector value.
2267 */
2268IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2269{
2270 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2271 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2272 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2273
2274 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2275
2276 /*
2277 * Real mode and V8086 mode are easy.
2278 */
2279 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2280 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2281 {
2282 *pSel = uSel;
2283 pHid->u64Base = (uint32_t)uSel << 4;
2284#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2285 /** @todo Does the CPU actually load limits and attributes in the
2286 * real/V8086 mode segment load case? It doesn't for CS in far
2287 * jumps... Affects unreal mode. */
2288 pHid->u32Limit = 0xffff;
2289 pHid->Attr.u = 0;
2290 pHid->Attr.n.u1Present = 1;
2291 pHid->Attr.n.u1DescType = 1;
2292 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2293 ? X86_SEL_TYPE_RW
2294 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2295#endif
2296 iemRegAddToRip(pIemCpu, cbInstr);
2297 return VINF_SUCCESS;
2298 }
2299
2300 /*
2301 * Protected mode.
2302 *
2303 * Check if it's a null segment selector value first, that's OK for DS, ES,
2304 * FS and GS. If not null, then we have to load and parse the descriptor.
2305 */
2306 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2307 {
2308 if (iSegReg == X86_SREG_SS)
2309 {
2310 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2311 || pIemCpu->uCpl != 0
2312 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2313 {
2314 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2315 return iemRaiseGeneralProtectionFault0(pIemCpu);
2316 }
2317
2318 /* In 64-bit kernel mode, the stack can be 0 because of the way
2319 interrupts are dispatched when in kernel ctx. Just load the
2320 selector value into the register and leave the hidden bits
2321 as is. */
2322 *pSel = uSel;
2323 iemRegAddToRip(pIemCpu, cbInstr);
2324 return VINF_SUCCESS;
2325 }
2326
2327 *pSel = uSel; /* Not RPL, remember :-) */
2328 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2329 && iSegReg != X86_SREG_FS
2330 && iSegReg != X86_SREG_GS)
2331 {
2332 /** @todo figure out what this actually does, it works. Needs
2333 * testcase! */
2334 pHid->Attr.u = 0;
2335 pHid->Attr.n.u1Present = 1;
2336 pHid->Attr.n.u1Long = 1;
2337 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2338 pHid->Attr.n.u2Dpl = 3;
2339 pHid->u32Limit = 0;
2340 pHid->u64Base = 0;
2341 }
2342 else
2343 {
2344 pHid->Attr.u = 0;
2345 pHid->u32Limit = 0;
2346 pHid->u64Base = 0;
2347 }
2348 iemRegAddToRip(pIemCpu, cbInstr);
2349 return VINF_SUCCESS;
2350 }
2351
2352 /* Fetch the descriptor. */
2353 IEMSELDESC Desc;
2354 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2355 if (rcStrict != VINF_SUCCESS)
2356 return rcStrict;
2357
2358 /* Check GPs first. */
2359 if (!Desc.Legacy.Gen.u1DescType)
2360 {
2361 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2362 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2363 }
2364 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2365 {
2366 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2367 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2368 {
2369 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2370 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2371 }
2372 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2373 {
2374 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2375 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2376 }
2377 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2378 {
2379 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2380 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2381 }
2382 }
2383 else
2384 {
2385 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2386 {
2387 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2388 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2389 }
2390 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2391 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2392 {
2393#if 0 /* this is what intel says. */
2394 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2395 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2396 {
2397 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2398 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2399 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2400 }
2401#else /* this is what makes more sense. */
2402 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2403 {
2404 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2405 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2406 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2407 }
2408 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2409 {
2410 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2411 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2412 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2413 }
2414#endif
2415 }
2416 }
2417
2418 /* Is it there? */
2419 if (!Desc.Legacy.Gen.u1Present)
2420 {
2421 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2422 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2423 }
2424
2425 /* The base and limit. */
2426 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2427 uint64_t u64Base;
2428 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2429 && iSegReg < X86_SREG_FS)
2430 u64Base = 0;
2431 else
2432 u64Base = X86DESC_BASE(&Desc.Legacy);
2433
2434 /*
2435 * Ok, everything checked out fine. Now set the accessed bit before
2436 * committing the result into the registers.
2437 */
2438 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2439 {
2440 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2441 if (rcStrict != VINF_SUCCESS)
2442 return rcStrict;
2443 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2444 }
2445
2446 /* commit */
2447 *pSel = uSel;
2448 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2449 pHid->u32Limit = cbLimit;
2450 pHid->u64Base = u64Base;
2451
2452 /** @todo check if the hidden bits are loaded correctly for 64-bit
2453 * mode. */
2454
2455 iemRegAddToRip(pIemCpu, cbInstr);
2456 return VINF_SUCCESS;
2457}
2458
2459
2460/**
2461 * Implements 'mov SReg, r/m'.
2462 *
2463 * @param iSegReg The segment register number (valid).
2464 * @param uSel The new selector value.
2465 */
2466IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2467{
2468 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2469 if (rcStrict == VINF_SUCCESS)
2470 {
2471 if (iSegReg == X86_SREG_SS)
2472 {
2473 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2474 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2475 }
2476 }
2477 return rcStrict;
2478}
2479
2480
2481/**
2482 * Implements 'pop SReg'.
2483 *
2484 * @param iSegReg The segment register number (valid).
2485 * @param enmEffOpSize The efficient operand size (valid).
2486 */
2487IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2488{
2489 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2490 VBOXSTRICTRC rcStrict;
2491
2492 /*
2493 * Read the selector off the stack and join paths with mov ss, reg.
2494 */
2495 RTUINT64U TmpRsp;
2496 TmpRsp.u = pCtx->rsp;
2497 switch (enmEffOpSize)
2498 {
2499 case IEMMODE_16BIT:
2500 {
2501 uint16_t uSel;
2502 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2503 if (rcStrict == VINF_SUCCESS)
2504 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2505 break;
2506 }
2507
2508 case IEMMODE_32BIT:
2509 {
2510 uint32_t u32Value;
2511 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2512 if (rcStrict == VINF_SUCCESS)
2513 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2514 break;
2515 }
2516
2517 case IEMMODE_64BIT:
2518 {
2519 uint64_t u64Value;
2520 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2521 if (rcStrict == VINF_SUCCESS)
2522 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2523 break;
2524 }
2525 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2526 }
2527
2528 /*
2529 * Commit the stack on success.
2530 */
2531 if (rcStrict == VINF_SUCCESS)
2532 {
2533 pCtx->rsp = TmpRsp.u;
2534 if (iSegReg == X86_SREG_SS)
2535 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2536 }
2537 return rcStrict;
2538}
2539
2540
2541/**
2542 * Implements lgs, lfs, les, lds & lss.
2543 */
2544IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2545 uint16_t, uSel,
2546 uint64_t, offSeg,
2547 uint8_t, iSegReg,
2548 uint8_t, iGReg,
2549 IEMMODE, enmEffOpSize)
2550{
2551 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2552 VBOXSTRICTRC rcStrict;
2553
2554 /*
2555 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2556 */
2557 /** @todo verify and test that mov, pop and lXs works the segment
2558 * register loading in the exact same way. */
2559 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2560 if (rcStrict == VINF_SUCCESS)
2561 {
2562 switch (enmEffOpSize)
2563 {
2564 case IEMMODE_16BIT:
2565 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2566 break;
2567 case IEMMODE_32BIT:
2568 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2569 break;
2570 case IEMMODE_64BIT:
2571 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2572 break;
2573 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2574 }
2575 }
2576
2577 return rcStrict;
2578}
2579
2580
2581/**
2582 * Implements lgdt.
2583 *
2584 * @param iEffSeg The segment of the new ldtr contents
2585 * @param GCPtrEffSrc The address of the new ldtr contents.
2586 * @param enmEffOpSize The effective operand size.
2587 */
2588IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2589{
2590 if (pIemCpu->uCpl != 0)
2591 return iemRaiseGeneralProtectionFault0(pIemCpu);
2592 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2593
2594 /*
2595 * Fetch the limit and base address.
2596 */
2597 uint16_t cbLimit;
2598 RTGCPTR GCPtrBase;
2599 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2600 if (rcStrict == VINF_SUCCESS)
2601 {
2602 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2603 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2604 else
2605 {
2606 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2607 pCtx->gdtr.cbGdt = cbLimit;
2608 pCtx->gdtr.pGdt = GCPtrBase;
2609 }
2610 if (rcStrict == VINF_SUCCESS)
2611 iemRegAddToRip(pIemCpu, cbInstr);
2612 }
2613 return rcStrict;
2614}
2615
2616
2617/**
2618 * Implements lidt.
2619 *
2620 * @param iEffSeg The segment of the new ldtr contents
2621 * @param GCPtrEffSrc The address of the new ldtr contents.
2622 * @param enmEffOpSize The effective operand size.
2623 */
2624IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2625{
2626 if (pIemCpu->uCpl != 0)
2627 return iemRaiseGeneralProtectionFault0(pIemCpu);
2628 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2629
2630 /*
2631 * Fetch the limit and base address.
2632 */
2633 uint16_t cbLimit;
2634 RTGCPTR GCPtrBase;
2635 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2636 if (rcStrict == VINF_SUCCESS)
2637 {
2638 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2639 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2640 else
2641 {
2642 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2643 pCtx->idtr.cbIdt = cbLimit;
2644 pCtx->idtr.pIdt = GCPtrBase;
2645 }
2646 iemRegAddToRip(pIemCpu, cbInstr);
2647 }
2648 return rcStrict;
2649}
2650
2651
2652/**
2653 * Implements lldt.
2654 *
2655 * @param uNewLdt The new LDT selector value.
2656 */
2657IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2658{
2659 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2660
2661 /*
2662 * Check preconditions.
2663 */
2664 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2665 {
2666 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2667 return iemRaiseUndefinedOpcode(pIemCpu);
2668 }
2669 if (pIemCpu->uCpl != 0)
2670 {
2671 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2672 return iemRaiseGeneralProtectionFault0(pIemCpu);
2673 }
2674 if (uNewLdt & X86_SEL_LDT)
2675 {
2676 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2677 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2678 }
2679
2680 /*
2681 * Now, loading a NULL selector is easy.
2682 */
2683 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2684 {
2685 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2686 /** @todo check if the actual value is loaded or if it's always 0. */
2687 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2688 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), 0);
2689 else
2690 pCtx->ldtr.Sel = 0;
2691 pCtx->ldtr.ValidSel = 0;
2692 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2693 pCtx->ldtr.Attr.u = 0;
2694 pCtx->ldtr.u64Base = 0;
2695 pCtx->ldtr.u32Limit = 0;
2696
2697 iemRegAddToRip(pIemCpu, cbInstr);
2698 return VINF_SUCCESS;
2699 }
2700
2701 /*
2702 * Read the descriptor.
2703 */
2704 IEMSELDESC Desc;
2705 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2706 if (rcStrict != VINF_SUCCESS)
2707 return rcStrict;
2708
2709 /* Check GPs first. */
2710 if (Desc.Legacy.Gen.u1DescType)
2711 {
2712 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2713 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2714 }
2715 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2716 {
2717 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2718 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2719 }
2720 uint64_t u64Base;
2721 if (!IEM_IS_LONG_MODE(pIemCpu))
2722 u64Base = X86DESC_BASE(&Desc.Legacy);
2723 else
2724 {
2725 if (Desc.Long.Gen.u5Zeros)
2726 {
2727 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2728 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2729 }
2730
2731 u64Base = X86DESC64_BASE(&Desc.Long);
2732 if (!IEM_IS_CANONICAL(u64Base))
2733 {
2734 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2735 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2736 }
2737 }
2738
2739 /* NP */
2740 if (!Desc.Legacy.Gen.u1Present)
2741 {
2742 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2743 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2744 }
2745
2746 /*
2747 * It checks out alright, update the registers.
2748 */
2749/** @todo check if the actual value is loaded or if the RPL is dropped */
2750 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2751 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
2752 else
2753 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
2754 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
2755 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2756 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2757 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
2758 pCtx->ldtr.u64Base = u64Base;
2759
2760 iemRegAddToRip(pIemCpu, cbInstr);
2761 return VINF_SUCCESS;
2762}
2763
2764
2765/**
2766 * Implements lldt.
2767 *
2768 * @param uNewLdt The new LDT selector value.
2769 */
2770IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2771{
2772 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2773
2774 /*
2775 * Check preconditions.
2776 */
2777 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2778 {
2779 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2780 return iemRaiseUndefinedOpcode(pIemCpu);
2781 }
2782 if (pIemCpu->uCpl != 0)
2783 {
2784 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2785 return iemRaiseGeneralProtectionFault0(pIemCpu);
2786 }
2787 if (uNewTr & X86_SEL_LDT)
2788 {
2789 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2790 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
2791 }
2792 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
2793 {
2794 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2795 return iemRaiseGeneralProtectionFault0(pIemCpu);
2796 }
2797
2798 /*
2799 * Read the descriptor.
2800 */
2801 IEMSELDESC Desc;
2802 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2803 if (rcStrict != VINF_SUCCESS)
2804 return rcStrict;
2805
2806 /* Check GPs first. */
2807 if (Desc.Legacy.Gen.u1DescType)
2808 {
2809 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2810 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2811 }
2812 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2813 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2814 || IEM_IS_LONG_MODE(pIemCpu)) )
2815 {
2816 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2817 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2818 }
2819 uint64_t u64Base;
2820 if (!IEM_IS_LONG_MODE(pIemCpu))
2821 u64Base = X86DESC_BASE(&Desc.Legacy);
2822 else
2823 {
2824 if (Desc.Long.Gen.u5Zeros)
2825 {
2826 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2827 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2828 }
2829
2830 u64Base = X86DESC64_BASE(&Desc.Long);
2831 if (!IEM_IS_CANONICAL(u64Base))
2832 {
2833 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2834 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2835 }
2836 }
2837
2838 /* NP */
2839 if (!Desc.Legacy.Gen.u1Present)
2840 {
2841 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
2842 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
2843 }
2844
2845 /*
2846 * Set it busy.
2847 * Note! Intel says this should lock down the whole descriptor, but we'll
2848 * restrict our selves to 32-bit for now due to lack of inline
2849 * assembly and such.
2850 */
2851 void *pvDesc;
2852 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2853 if (rcStrict != VINF_SUCCESS)
2854 return rcStrict;
2855 switch ((uintptr_t)pvDesc & 3)
2856 {
2857 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
2858 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
2859 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
2860 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
2861 }
2862 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2863 if (rcStrict != VINF_SUCCESS)
2864 return rcStrict;
2865 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2866
2867 /*
2868 * It checks out alright, update the registers.
2869 */
2870/** @todo check if the actual value is loaded or if the RPL is dropped */
2871 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2872 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
2873 else
2874 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
2875 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
2876 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2877 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2878 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
2879 pCtx->tr.u64Base = u64Base;
2880
2881 iemRegAddToRip(pIemCpu, cbInstr);
2882 return VINF_SUCCESS;
2883}
2884
2885
2886/**
2887 * Implements mov GReg,CRx.
2888 *
2889 * @param iGReg The general register to store the CRx value in.
2890 * @param iCrReg The CRx register to read (valid).
2891 */
2892IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
2893{
2894 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2895 if (pIemCpu->uCpl != 0)
2896 return iemRaiseGeneralProtectionFault0(pIemCpu);
2897 Assert(!pCtx->eflags.Bits.u1VM);
2898
2899 /* read it */
2900 uint64_t crX;
2901 switch (iCrReg)
2902 {
2903 case 0: crX = pCtx->cr0; break;
2904 case 2: crX = pCtx->cr2; break;
2905 case 3: crX = pCtx->cr3; break;
2906 case 4: crX = pCtx->cr4; break;
2907 case 8:
2908 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2909 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
2910 else
2911 crX = 0xff;
2912 break;
2913 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2914 }
2915
2916 /* store it */
2917 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2918 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
2919 else
2920 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
2921
2922 iemRegAddToRip(pIemCpu, cbInstr);
2923 return VINF_SUCCESS;
2924}
2925
2926
2927/**
2928 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
2929 *
2930 * @param iCrReg The CRx register to write (valid).
2931 * @param uNewCrX The new value.
2932 */
2933IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
2934{
2935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2936 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2937 VBOXSTRICTRC rcStrict;
2938 int rc;
2939
2940 /*
2941 * Try store it.
2942 * Unfortunately, CPUM only does a tiny bit of the work.
2943 */
2944 switch (iCrReg)
2945 {
2946 case 0:
2947 {
2948 /*
2949 * Perform checks.
2950 */
2951 uint64_t const uOldCrX = pCtx->cr0;
2952 uNewCrX |= X86_CR0_ET; /* hardcoded */
2953
2954 /* Check for reserved bits. */
2955 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
2956 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
2957 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
2958 if (uNewCrX & ~(uint64_t)fValid)
2959 {
2960 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2961 return iemRaiseGeneralProtectionFault0(pIemCpu);
2962 }
2963
2964 /* Check for invalid combinations. */
2965 if ( (uNewCrX & X86_CR0_PG)
2966 && !(uNewCrX & X86_CR0_PE) )
2967 {
2968 Log(("Trying to set CR0.PG without CR0.PE\n"));
2969 return iemRaiseGeneralProtectionFault0(pIemCpu);
2970 }
2971
2972 if ( !(uNewCrX & X86_CR0_CD)
2973 && (uNewCrX & X86_CR0_NW) )
2974 {
2975 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
2976 return iemRaiseGeneralProtectionFault0(pIemCpu);
2977 }
2978
2979 /* Long mode consistency checks. */
2980 if ( (uNewCrX & X86_CR0_PG)
2981 && !(uOldCrX & X86_CR0_PG)
2982 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2983 {
2984 if (!(pCtx->cr4 & X86_CR4_PAE))
2985 {
2986 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
2987 return iemRaiseGeneralProtectionFault0(pIemCpu);
2988 }
2989 if (pCtx->cs.Attr.n.u1Long)
2990 {
2991 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
2992 return iemRaiseGeneralProtectionFault0(pIemCpu);
2993 }
2994 }
2995
2996 /** @todo check reserved PDPTR bits as AMD states. */
2997
2998 /*
2999 * Change CR0.
3000 */
3001 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3002 CPUMSetGuestCR0(pVCpu, uNewCrX);
3003 else
3004 pCtx->cr0 = uNewCrX;
3005 Assert(pCtx->cr0 == uNewCrX);
3006
3007 /*
3008 * Change EFER.LMA if entering or leaving long mode.
3009 */
3010 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
3011 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3012 {
3013 uint64_t NewEFER = pCtx->msrEFER;
3014 if (uNewCrX & X86_CR0_PG)
3015 NewEFER |= MSR_K6_EFER_LME;
3016 else
3017 NewEFER &= ~MSR_K6_EFER_LME;
3018
3019 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3020 CPUMSetGuestEFER(pVCpu, NewEFER);
3021 else
3022 pCtx->msrEFER = NewEFER;
3023 Assert(pCtx->msrEFER == NewEFER);
3024 }
3025
3026 /*
3027 * Inform PGM.
3028 */
3029 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3030 {
3031 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3032 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3033 {
3034 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3035 AssertRCReturn(rc, rc);
3036 /* ignore informational status codes */
3037 }
3038 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3039 }
3040 else
3041 rcStrict = VINF_SUCCESS;
3042 break;
3043 }
3044
3045 /*
3046 * CR2 can be changed without any restrictions.
3047 */
3048 case 2:
3049 pCtx->cr2 = uNewCrX;
3050 rcStrict = VINF_SUCCESS;
3051 break;
3052
3053 /*
3054 * CR3 is relatively simple, although AMD and Intel have different
3055 * accounts of how setting reserved bits are handled. We take intel's
3056 * word for the lower bits and AMD's for the high bits (63:52).
3057 */
3058 /** @todo Testcase: Setting reserved bits in CR3, especially before
3059 * enabling paging. */
3060 case 3:
3061 {
3062 /* check / mask the value. */
3063 if (uNewCrX & UINT64_C(0xfff0000000000000))
3064 {
3065 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3066 return iemRaiseGeneralProtectionFault0(pIemCpu);
3067 }
3068
3069 uint64_t fValid;
3070 if ( (pCtx->cr4 & X86_CR4_PAE)
3071 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3072 fValid = UINT64_C(0x000ffffffffff014);
3073 else if (pCtx->cr4 & X86_CR4_PAE)
3074 fValid = UINT64_C(0xfffffff4);
3075 else
3076 fValid = UINT64_C(0xfffff014);
3077 if (uNewCrX & ~fValid)
3078 {
3079 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3080 uNewCrX, uNewCrX & ~fValid));
3081 uNewCrX &= fValid;
3082 }
3083
3084 /** @todo If we're in PAE mode we should check the PDPTRs for
3085 * invalid bits. */
3086
3087 /* Make the change. */
3088 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3089 {
3090 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3091 AssertRCSuccessReturn(rc, rc);
3092 }
3093 else
3094 pCtx->cr3 = uNewCrX;
3095
3096 /* Inform PGM. */
3097 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3098 {
3099 if (pCtx->cr0 & X86_CR0_PG)
3100 {
3101 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
3102 AssertRCReturn(rc, rc);
3103 /* ignore informational status codes */
3104 }
3105 }
3106 rcStrict = VINF_SUCCESS;
3107 break;
3108 }
3109
3110 /*
3111 * CR4 is a bit more tedious as there are bits which cannot be cleared
3112 * under some circumstances and such.
3113 */
3114 case 4:
3115 {
3116 uint64_t const uOldCrX = pCtx->cr4;
3117
3118 /* reserved bits */
3119 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3120 | X86_CR4_TSD | X86_CR4_DE
3121 | X86_CR4_PSE | X86_CR4_PAE
3122 | X86_CR4_MCE | X86_CR4_PGE
3123 | X86_CR4_PCE | X86_CR4_OSFSXR
3124 | X86_CR4_OSXMMEEXCPT;
3125 //if (xxx)
3126 // fValid |= X86_CR4_VMXE;
3127 //if (xxx)
3128 // fValid |= X86_CR4_OSXSAVE;
3129 if (uNewCrX & ~(uint64_t)fValid)
3130 {
3131 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3132 return iemRaiseGeneralProtectionFault0(pIemCpu);
3133 }
3134
3135 /* long mode checks. */
3136 if ( (uOldCrX & X86_CR4_PAE)
3137 && !(uNewCrX & X86_CR4_PAE)
3138 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3139 {
3140 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3141 return iemRaiseGeneralProtectionFault0(pIemCpu);
3142 }
3143
3144
3145 /*
3146 * Change it.
3147 */
3148 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3149 {
3150 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3151 AssertRCSuccessReturn(rc, rc);
3152 }
3153 else
3154 pCtx->cr4 = uNewCrX;
3155 Assert(pCtx->cr4 == uNewCrX);
3156
3157 /*
3158 * Notify SELM and PGM.
3159 */
3160 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3161 {
3162 /* SELM - VME may change things wrt to the TSS shadowing. */
3163 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3164 {
3165 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
3166 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
3167 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3168 }
3169
3170 /* PGM - flushing and mode. */
3171 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3172 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3173 {
3174 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3175 AssertRCReturn(rc, rc);
3176 /* ignore informational status codes */
3177 }
3178 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3179 }
3180 else
3181 rcStrict = VINF_SUCCESS;
3182 break;
3183 }
3184
3185 /*
3186 * CR8 maps to the APIC TPR.
3187 */
3188 case 8:
3189 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3190 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
3191 else
3192 rcStrict = VINF_SUCCESS;
3193 break;
3194
3195 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3196 }
3197
3198 /*
3199 * Advance the RIP on success.
3200 */
3201 if (RT_SUCCESS(rcStrict))
3202 {
3203 if (rcStrict != VINF_SUCCESS)
3204 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3205 iemRegAddToRip(pIemCpu, cbInstr);
3206 }
3207
3208 return rcStrict;
3209}
3210
3211
3212/**
3213 * Implements mov CRx,GReg.
3214 *
3215 * @param iCrReg The CRx register to write (valid).
3216 * @param iGReg The general register to load the DRx value from.
3217 */
3218IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3219{
3220 if (pIemCpu->uCpl != 0)
3221 return iemRaiseGeneralProtectionFault0(pIemCpu);
3222 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3223
3224 /*
3225 * Read the new value from the source register and call common worker.
3226 */
3227 uint64_t uNewCrX;
3228 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3229 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3230 else
3231 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3232 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3233}
3234
3235
3236/**
3237 * Implements 'LMSW r/m16'
3238 *
3239 * @param u16NewMsw The new value.
3240 */
3241IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3242{
3243 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3244
3245 if (pIemCpu->uCpl != 0)
3246 return iemRaiseGeneralProtectionFault0(pIemCpu);
3247 Assert(!pCtx->eflags.Bits.u1VM);
3248
3249 /*
3250 * Compose the new CR0 value and call common worker.
3251 */
3252 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3253 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3254 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3255}
3256
3257
3258/**
3259 * Implements 'CLTS'.
3260 */
3261IEM_CIMPL_DEF_0(iemCImpl_clts)
3262{
3263 if (pIemCpu->uCpl != 0)
3264 return iemRaiseGeneralProtectionFault0(pIemCpu);
3265
3266 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3267 uint64_t uNewCr0 = pCtx->cr0;
3268 uNewCr0 &= ~X86_CR0_TS;
3269 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3270}
3271
3272
3273/**
3274 * Implements mov GReg,DRx.
3275 *
3276 * @param iGReg The general register to store the DRx value in.
3277 * @param iDrReg The DRx register to read (0-7).
3278 */
3279IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3280{
3281 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3282
3283 /*
3284 * Check preconditions.
3285 */
3286
3287 /* Raise GPs. */
3288 if (pIemCpu->uCpl != 0)
3289 return iemRaiseGeneralProtectionFault0(pIemCpu);
3290 Assert(!pCtx->eflags.Bits.u1VM);
3291
3292 if ( (iDrReg == 4 || iDrReg == 5)
3293 && (pCtx->cr4 & X86_CR4_DE) )
3294 {
3295 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3296 return iemRaiseGeneralProtectionFault0(pIemCpu);
3297 }
3298
3299 /* Raise #DB if general access detect is enabled. */
3300 if (pCtx->dr[7] & X86_DR7_GD)
3301 {
3302 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3303 return iemRaiseDebugException(pIemCpu);
3304 }
3305
3306 /*
3307 * Read the debug register and store it in the specified general register.
3308 */
3309 uint64_t drX;
3310 switch (iDrReg)
3311 {
3312 case 0: drX = pCtx->dr[0]; break;
3313 case 1: drX = pCtx->dr[1]; break;
3314 case 2: drX = pCtx->dr[2]; break;
3315 case 3: drX = pCtx->dr[3]; break;
3316 case 6:
3317 case 4:
3318 drX = pCtx->dr[6];
3319 drX &= ~RT_BIT_32(12);
3320 drX |= UINT32_C(0xffff0ff0);
3321 break;
3322 case 7:
3323 case 5:
3324 drX = pCtx->dr[7];
3325 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3326 drX |= RT_BIT_32(10);
3327 break;
3328 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3329 }
3330
3331 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3332 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3333 else
3334 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3335
3336 iemRegAddToRip(pIemCpu, cbInstr);
3337 return VINF_SUCCESS;
3338}
3339
3340
3341/**
3342 * Implements mov DRx,GReg.
3343 *
3344 * @param iDrReg The DRx register to write (valid).
3345 * @param iGReg The general register to load the DRx value from.
3346 */
3347IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3348{
3349 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3350
3351 /*
3352 * Check preconditions.
3353 */
3354 if (pIemCpu->uCpl != 0)
3355 return iemRaiseGeneralProtectionFault0(pIemCpu);
3356 Assert(!pCtx->eflags.Bits.u1VM);
3357
3358 if ( (iDrReg == 4 || iDrReg == 5)
3359 && (pCtx->cr4 & X86_CR4_DE) )
3360 {
3361 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3362 return iemRaiseGeneralProtectionFault0(pIemCpu);
3363 }
3364
3365 /* Raise #DB if general access detect is enabled. */
3366 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3367 * \#GP? */
3368 if (pCtx->dr[7] & X86_DR7_GD)
3369 {
3370 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3371 return iemRaiseDebugException(pIemCpu);
3372 }
3373
3374 /*
3375 * Read the new value from the source register.
3376 */
3377 uint64_t uNewDrX;
3378 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3379 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3380 else
3381 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3382
3383 /*
3384 * Adjust it.
3385 */
3386 switch (iDrReg)
3387 {
3388 case 0:
3389 case 1:
3390 case 2:
3391 case 3:
3392 /* nothing to adjust */
3393 break;
3394
3395 case 6:
3396 case 4:
3397 if (uNewDrX & UINT64_C(0xffffffff00000000))
3398 {
3399 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3400 return iemRaiseGeneralProtectionFault0(pIemCpu);
3401 }
3402 uNewDrX &= ~RT_BIT_32(12);
3403 uNewDrX |= UINT32_C(0xffff0ff0);
3404 break;
3405
3406 case 7:
3407 case 5:
3408 if (uNewDrX & UINT64_C(0xffffffff00000000))
3409 {
3410 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3411 return iemRaiseGeneralProtectionFault0(pIemCpu);
3412 }
3413 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3414 uNewDrX |= RT_BIT_32(10);
3415 break;
3416
3417 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3418 }
3419
3420 /*
3421 * Do the actual setting.
3422 */
3423 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3424 {
3425 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3426 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3427 }
3428 else
3429 pCtx->dr[iDrReg] = uNewDrX;
3430
3431 iemRegAddToRip(pIemCpu, cbInstr);
3432 return VINF_SUCCESS;
3433}
3434
3435
3436/**
3437 * Implements 'INVLPG m'.
3438 *
3439 * @param GCPtrPage The effective address of the page to invalidate.
3440 * @remarks Updates the RIP.
3441 */
3442IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3443{
3444 /* ring-0 only. */
3445 if (pIemCpu->uCpl != 0)
3446 return iemRaiseGeneralProtectionFault0(pIemCpu);
3447 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3448
3449 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3450 iemRegAddToRip(pIemCpu, cbInstr);
3451
3452 if (rc == VINF_SUCCESS)
3453 return VINF_SUCCESS;
3454 if (rc == VINF_PGM_SYNC_CR3)
3455 return iemSetPassUpStatus(pIemCpu, rc);
3456
3457 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3458 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3459 return rc;
3460}
3461
3462
3463/**
3464 * Implements RDTSC.
3465 */
3466IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3467{
3468 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3469
3470 /*
3471 * Check preconditions.
3472 */
3473 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3474 return iemRaiseUndefinedOpcode(pIemCpu);
3475
3476 if ( (pCtx->cr4 & X86_CR4_TSD)
3477 && pIemCpu->uCpl != 0)
3478 {
3479 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3480 return iemRaiseGeneralProtectionFault0(pIemCpu);
3481 }
3482
3483 /*
3484 * Do the job.
3485 */
3486 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3487 pCtx->rax = (uint32_t)uTicks;
3488 pCtx->rdx = uTicks >> 32;
3489#ifdef IEM_VERIFICATION_MODE
3490 pIemCpu->fIgnoreRaxRdx = true;
3491#endif
3492
3493 iemRegAddToRip(pIemCpu, cbInstr);
3494 return VINF_SUCCESS;
3495}
3496
3497
3498/**
3499 * Implements RDMSR.
3500 */
3501IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3502{
3503 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3504
3505 /*
3506 * Check preconditions.
3507 */
3508 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3509 return iemRaiseUndefinedOpcode(pIemCpu);
3510 if (pIemCpu->uCpl != 0)
3511 return iemRaiseGeneralProtectionFault0(pIemCpu);
3512
3513 /*
3514 * Do the job.
3515 */
3516 RTUINT64U uValue;
3517 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3518 if (rc != VINF_SUCCESS)
3519 {
3520 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3521 return iemRaiseGeneralProtectionFault0(pIemCpu);
3522 }
3523
3524 pCtx->rax = uValue.au32[0];
3525 pCtx->rdx = uValue.au32[1];
3526
3527 iemRegAddToRip(pIemCpu, cbInstr);
3528 return VINF_SUCCESS;
3529}
3530
3531
3532/**
3533 * Implements 'IN eAX, port'.
3534 *
3535 * @param u16Port The source port.
3536 * @param cbReg The register size.
3537 */
3538IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3539{
3540 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3541
3542 /*
3543 * CPL check
3544 */
3545 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3546 if (rcStrict != VINF_SUCCESS)
3547 return rcStrict;
3548
3549 /*
3550 * Perform the I/O.
3551 */
3552 uint32_t u32Value;
3553 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3554 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
3555 else
3556 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3557 if (IOM_SUCCESS(rcStrict))
3558 {
3559 switch (cbReg)
3560 {
3561 case 1: pCtx->al = (uint8_t)u32Value; break;
3562 case 2: pCtx->ax = (uint16_t)u32Value; break;
3563 case 4: pCtx->rax = u32Value; break;
3564 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3565 }
3566 iemRegAddToRip(pIemCpu, cbInstr);
3567 pIemCpu->cPotentialExits++;
3568 if (rcStrict != VINF_SUCCESS)
3569 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3570 }
3571
3572 return rcStrict;
3573}
3574
3575
3576/**
3577 * Implements 'IN eAX, DX'.
3578 *
3579 * @param cbReg The register size.
3580 */
3581IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3582{
3583 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3584}
3585
3586
3587/**
3588 * Implements 'OUT port, eAX'.
3589 *
3590 * @param u16Port The destination port.
3591 * @param cbReg The register size.
3592 */
3593IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3594{
3595 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3596
3597 /*
3598 * CPL check
3599 */
3600 if ( (pCtx->cr0 & X86_CR0_PE)
3601 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3602 || pCtx->eflags.Bits.u1VM) )
3603 {
3604 /** @todo I/O port permission bitmap check */
3605 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap checks.\n"));
3606 }
3607
3608 /*
3609 * Perform the I/O.
3610 */
3611 uint32_t u32Value;
3612 switch (cbReg)
3613 {
3614 case 1: u32Value = pCtx->al; break;
3615 case 2: u32Value = pCtx->ax; break;
3616 case 4: u32Value = pCtx->eax; break;
3617 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3618 }
3619 VBOXSTRICTRC rcStrict;
3620 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3621 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
3622 else
3623 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3624 if (IOM_SUCCESS(rcStrict))
3625 {
3626 iemRegAddToRip(pIemCpu, cbInstr);
3627 pIemCpu->cPotentialExits++;
3628 if (rcStrict != VINF_SUCCESS)
3629 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3630 }
3631 return rcStrict;
3632}
3633
3634
3635/**
3636 * Implements 'OUT DX, eAX'.
3637 *
3638 * @param cbReg The register size.
3639 */
3640IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3641{
3642 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3643}
3644
3645
3646/**
3647 * Implements 'CLI'.
3648 */
3649IEM_CIMPL_DEF_0(iemCImpl_cli)
3650{
3651 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3652
3653 if (pCtx->cr0 & X86_CR0_PE)
3654 {
3655 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3656 if (!pCtx->eflags.Bits.u1VM)
3657 {
3658 if (pIemCpu->uCpl <= uIopl)
3659 pCtx->eflags.Bits.u1IF = 0;
3660 else if ( pIemCpu->uCpl == 3
3661 && (pCtx->cr4 & X86_CR4_PVI) )
3662 pCtx->eflags.Bits.u1VIF = 0;
3663 else
3664 return iemRaiseGeneralProtectionFault0(pIemCpu);
3665 }
3666 /* V8086 */
3667 else if (uIopl == 3)
3668 pCtx->eflags.Bits.u1IF = 0;
3669 else if ( uIopl < 3
3670 && (pCtx->cr4 & X86_CR4_VME) )
3671 pCtx->eflags.Bits.u1VIF = 0;
3672 else
3673 return iemRaiseGeneralProtectionFault0(pIemCpu);
3674 }
3675 /* real mode */
3676 else
3677 pCtx->eflags.Bits.u1IF = 0;
3678 iemRegAddToRip(pIemCpu, cbInstr);
3679 return VINF_SUCCESS;
3680}
3681
3682
3683/**
3684 * Implements 'STI'.
3685 */
3686IEM_CIMPL_DEF_0(iemCImpl_sti)
3687{
3688 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3689
3690 if (pCtx->cr0 & X86_CR0_PE)
3691 {
3692 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3693 if (!pCtx->eflags.Bits.u1VM)
3694 {
3695 if (pIemCpu->uCpl <= uIopl)
3696 pCtx->eflags.Bits.u1IF = 1;
3697 else if ( pIemCpu->uCpl == 3
3698 && (pCtx->cr4 & X86_CR4_PVI)
3699 && !pCtx->eflags.Bits.u1VIP )
3700 pCtx->eflags.Bits.u1VIF = 1;
3701 else
3702 return iemRaiseGeneralProtectionFault0(pIemCpu);
3703 }
3704 /* V8086 */
3705 else if (uIopl == 3)
3706 pCtx->eflags.Bits.u1IF = 1;
3707 else if ( uIopl < 3
3708 && (pCtx->cr4 & X86_CR4_VME)
3709 && !pCtx->eflags.Bits.u1VIP )
3710 pCtx->eflags.Bits.u1VIF = 1;
3711 else
3712 return iemRaiseGeneralProtectionFault0(pIemCpu);
3713 }
3714 /* real mode */
3715 else
3716 pCtx->eflags.Bits.u1IF = 1;
3717
3718 iemRegAddToRip(pIemCpu, cbInstr);
3719 /** @todo don't do this unconditionally... */
3720 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3721 return VINF_SUCCESS;
3722}
3723
3724
3725/**
3726 * Implements 'HLT'.
3727 */
3728IEM_CIMPL_DEF_0(iemCImpl_hlt)
3729{
3730 if (pIemCpu->uCpl != 0)
3731 return iemRaiseGeneralProtectionFault0(pIemCpu);
3732 iemRegAddToRip(pIemCpu, cbInstr);
3733 return VINF_EM_HALT;
3734}
3735
3736
3737/**
3738 * Implements 'CPUID'.
3739 */
3740IEM_CIMPL_DEF_0(iemCImpl_cpuid)
3741{
3742 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3743
3744 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
3745 pCtx->rax &= UINT32_C(0xffffffff);
3746 pCtx->rbx &= UINT32_C(0xffffffff);
3747 pCtx->rcx &= UINT32_C(0xffffffff);
3748 pCtx->rdx &= UINT32_C(0xffffffff);
3749
3750 iemRegAddToRip(pIemCpu, cbInstr);
3751 return VINF_SUCCESS;
3752}
3753
3754
3755/**
3756 * Implements 'AAD'.
3757 *
3758 * @param enmEffOpSize The effective operand size.
3759 */
3760IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
3761{
3762 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3763
3764 uint16_t const ax = pCtx->ax;
3765 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
3766 pCtx->ax = al;
3767 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3768 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3769 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3770
3771 iemRegAddToRip(pIemCpu, cbInstr);
3772 return VINF_SUCCESS;
3773}
3774
3775
3776/**
3777 * Implements 'AAM'.
3778 *
3779 * @param bImm The immediate operand. Cannot be 0.
3780 */
3781IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
3782{
3783 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3784 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
3785
3786 uint16_t const ax = pCtx->ax;
3787 uint8_t const al = (uint8_t)ax % bImm;
3788 uint8_t const ah = (uint8_t)ax / bImm;
3789 pCtx->ax = (ah << 8) + al;
3790 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3791 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3792 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3793
3794 iemRegAddToRip(pIemCpu, cbInstr);
3795 return VINF_SUCCESS;
3796}
3797
3798
3799
3800
3801/*
3802 * Instantiate the various string operation combinations.
3803 */
3804#define OP_SIZE 8
3805#define ADDR_SIZE 16
3806#include "IEMAllCImplStrInstr.cpp.h"
3807#define OP_SIZE 8
3808#define ADDR_SIZE 32
3809#include "IEMAllCImplStrInstr.cpp.h"
3810#define OP_SIZE 8
3811#define ADDR_SIZE 64
3812#include "IEMAllCImplStrInstr.cpp.h"
3813
3814#define OP_SIZE 16
3815#define ADDR_SIZE 16
3816#include "IEMAllCImplStrInstr.cpp.h"
3817#define OP_SIZE 16
3818#define ADDR_SIZE 32
3819#include "IEMAllCImplStrInstr.cpp.h"
3820#define OP_SIZE 16
3821#define ADDR_SIZE 64
3822#include "IEMAllCImplStrInstr.cpp.h"
3823
3824#define OP_SIZE 32
3825#define ADDR_SIZE 16
3826#include "IEMAllCImplStrInstr.cpp.h"
3827#define OP_SIZE 32
3828#define ADDR_SIZE 32
3829#include "IEMAllCImplStrInstr.cpp.h"
3830#define OP_SIZE 32
3831#define ADDR_SIZE 64
3832#include "IEMAllCImplStrInstr.cpp.h"
3833
3834#define OP_SIZE 64
3835#define ADDR_SIZE 32
3836#include "IEMAllCImplStrInstr.cpp.h"
3837#define OP_SIZE 64
3838#define ADDR_SIZE 64
3839#include "IEMAllCImplStrInstr.cpp.h"
3840
3841
3842/**
3843 * Implements 'FINIT' and 'FNINIT'.
3844 *
3845 * @param fCheckXcpts Whether to check for umasked pending exceptions or
3846 * not.
3847 */
3848IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
3849{
3850 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3851
3852 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
3853 return iemRaiseDeviceNotAvailable(pIemCpu);
3854
3855 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
3856 if (fCheckXcpts && TODO )
3857 return iemRaiseMathFault(pIemCpu);
3858 */
3859
3860 if (iemFRegIsFxSaveFormat(pIemCpu))
3861 {
3862 pCtx->fpu.FCW = 0x37f;
3863 pCtx->fpu.FSW = 0;
3864 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
3865 pCtx->fpu.FPUDP = 0;
3866 pCtx->fpu.DS = 0; //??
3867 pCtx->fpu.FPUIP = 0;
3868 pCtx->fpu.CS = 0; //??
3869 pCtx->fpu.FOP = 0;
3870 }
3871 else
3872 {
3873 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
3874 pFpu->FCW = 0x37f;
3875 pFpu->FSW = 0;
3876 pFpu->FTW = 0xffff; /* 11 - empty */
3877 pFpu->FPUOO = 0; //??
3878 pFpu->FPUOS = 0; //??
3879 pFpu->FPUIP = 0;
3880 pFpu->CS = 0; //??
3881 pFpu->FOP = 0;
3882 }
3883
3884 iemRegAddToRip(pIemCpu, cbInstr);
3885 return VINF_SUCCESS;
3886}
3887
3888
3889/**
3890 * Implements 'FXSAVE'.
3891 *
3892 * @param iEffSeg The effective segment.
3893 * @param GCPtrEff The address of the image.
3894 * @param enmEffOpSize The operand size (only REX.W really matters).
3895 */
3896IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3897{
3898 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3899
3900 /*
3901 * Raise exceptions.
3902 */
3903 if (pCtx->cr0 & X86_CR0_EM)
3904 return iemRaiseUndefinedOpcode(pIemCpu);
3905 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3906 return iemRaiseDeviceNotAvailable(pIemCpu);
3907 if (GCPtrEff & 15)
3908 {
3909 /** @todo CPU/VM detection possible! \#AC might not be signal for
3910 * all/any misalignment sizes, intel says its an implementation detail. */
3911 if ( (pCtx->cr0 & X86_CR0_AM)
3912 && pCtx->eflags.Bits.u1AC
3913 && pIemCpu->uCpl == 3)
3914 return iemRaiseAlignmentCheckException(pIemCpu);
3915 return iemRaiseGeneralProtectionFault0(pIemCpu);
3916 }
3917 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3918
3919 /*
3920 * Access the memory.
3921 */
3922 void *pvMem512;
3923 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3924 if (rcStrict != VINF_SUCCESS)
3925 return rcStrict;
3926 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
3927
3928 /*
3929 * Store the registers.
3930 */
3931 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
3932 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
3933
3934 /* common for all formats */
3935 pDst->FCW = pCtx->fpu.FCW;
3936 pDst->FSW = pCtx->fpu.FSW;
3937 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
3938 pDst->FOP = pCtx->fpu.FOP;
3939 pDst->MXCSR = pCtx->fpu.MXCSR;
3940 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
3941 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
3942 {
3943 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
3944 * them for now... */
3945 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
3946 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
3947 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
3948 pDst->aRegs[i].au32[3] = 0;
3949 }
3950
3951 /* FPU IP, CS, DP and DS. */
3952 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
3953 * state information. :-/
3954 * Storing zeros now to prevent any potential leakage of host info. */
3955 pDst->FPUIP = 0;
3956 pDst->CS = 0;
3957 pDst->Rsrvd1 = 0;
3958 pDst->FPUDP = 0;
3959 pDst->DS = 0;
3960 pDst->Rsrvd2 = 0;
3961
3962 /* XMM registers. */
3963 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
3964 || pIemCpu->enmCpuMode != IEMMODE_64BIT
3965 || pIemCpu->uCpl != 0)
3966 {
3967 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
3968 for (uint32_t i = 0; i < cXmmRegs; i++)
3969 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
3970 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
3971 * right? */
3972 }
3973
3974 /*
3975 * Commit the memory.
3976 */
3977 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3978 if (rcStrict != VINF_SUCCESS)
3979 return rcStrict;
3980
3981 iemRegAddToRip(pIemCpu, cbInstr);
3982 return VINF_SUCCESS;
3983}
3984
3985
3986/**
3987 * Implements 'FXRSTOR'.
3988 *
3989 * @param GCPtrEff The address of the image.
3990 * @param enmEffOpSize The operand size (only REX.W really matters).
3991 */
3992IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3993{
3994 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3995
3996 /*
3997 * Raise exceptions.
3998 */
3999 if (pCtx->cr0 & X86_CR0_EM)
4000 return iemRaiseUndefinedOpcode(pIemCpu);
4001 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4002 return iemRaiseDeviceNotAvailable(pIemCpu);
4003 if (GCPtrEff & 15)
4004 {
4005 /** @todo CPU/VM detection possible! \#AC might not be signal for
4006 * all/any misalignment sizes, intel says its an implementation detail. */
4007 if ( (pCtx->cr0 & X86_CR0_AM)
4008 && pCtx->eflags.Bits.u1AC
4009 && pIemCpu->uCpl == 3)
4010 return iemRaiseAlignmentCheckException(pIemCpu);
4011 return iemRaiseGeneralProtectionFault0(pIemCpu);
4012 }
4013 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4014
4015 /*
4016 * Access the memory.
4017 */
4018 void *pvMem512;
4019 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
4020 if (rcStrict != VINF_SUCCESS)
4021 return rcStrict;
4022 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
4023
4024 /*
4025 * Check the state for stuff which will GP(0).
4026 */
4027 uint32_t const fMXCSR = pSrc->MXCSR;
4028 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
4029 if (fMXCSR & ~fMXCSR_MASK)
4030 {
4031 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
4032 return iemRaiseGeneralProtectionFault0(pIemCpu);
4033 }
4034
4035 /*
4036 * Load the registers.
4037 */
4038 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4039 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4040
4041 /* common for all formats */
4042 pCtx->fpu.FCW = pSrc->FCW;
4043 pCtx->fpu.FSW = pSrc->FSW;
4044 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4045 pCtx->fpu.FOP = pSrc->FOP;
4046 pCtx->fpu.MXCSR = fMXCSR;
4047 /* (MXCSR_MASK is read-only) */
4048 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4049 {
4050 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4051 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4052 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4053 pCtx->fpu.aRegs[i].au32[3] = 0;
4054 }
4055
4056 /* FPU IP, CS, DP and DS. */
4057 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4058 {
4059 pCtx->fpu.FPUIP = pSrc->FPUIP;
4060 pCtx->fpu.CS = pSrc->CS;
4061 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4062 pCtx->fpu.FPUDP = pSrc->FPUDP;
4063 pCtx->fpu.DS = pSrc->DS;
4064 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4065 }
4066 else
4067 {
4068 pCtx->fpu.FPUIP = pSrc->FPUIP;
4069 pCtx->fpu.CS = pSrc->CS;
4070 pCtx->fpu.Rsrvd1 = 0;
4071 pCtx->fpu.FPUDP = pSrc->FPUDP;
4072 pCtx->fpu.DS = pSrc->DS;
4073 pCtx->fpu.Rsrvd2 = 0;
4074 }
4075
4076 /* XMM registers. */
4077 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4078 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4079 || pIemCpu->uCpl != 0)
4080 {
4081 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4082 for (uint32_t i = 0; i < cXmmRegs; i++)
4083 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4084 }
4085
4086 /*
4087 * Commit the memory.
4088 */
4089 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4090 if (rcStrict != VINF_SUCCESS)
4091 return rcStrict;
4092
4093 iemRegAddToRip(pIemCpu, cbInstr);
4094 return VINF_SUCCESS;
4095}
4096
4097
4098/**
4099 * Commmon routine for fnstenv and fnsave.
4100 *
4101 * @param uPtr Where to store the state.
4102 * @param pCtx The CPU context.
4103 */
4104static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4105{
4106 if (enmEffOpSize == IEMMODE_16BIT)
4107 {
4108 uPtr.pu16[0] = pCtx->fpu.FCW;
4109 uPtr.pu16[1] = pCtx->fpu.FSW;
4110 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4111 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4112 {
4113 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4114 * protected mode or long mode and we save it in real mode? And vice
4115 * versa? And with 32-bit operand size? I think CPU is storing the
4116 * effective address ((CS << 4) + IP) in the offset register and not
4117 * doing any address calculations here. */
4118 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4119 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4120 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4121 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4122 }
4123 else
4124 {
4125 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4126 uPtr.pu16[4] = pCtx->fpu.CS;
4127 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4128 uPtr.pu16[6] = pCtx->fpu.DS;
4129 }
4130 }
4131 else
4132 {
4133 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4134 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4135 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4136 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4137 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4138 {
4139 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4140 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4141 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4142 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4143 }
4144 else
4145 {
4146 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4147 uPtr.pu16[4*2] = pCtx->fpu.CS;
4148 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4149 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4150 uPtr.pu16[6*2] = pCtx->fpu.DS;
4151 }
4152 }
4153}
4154
4155
4156/**
4157 * Commmon routine for fnstenv and fnsave.
4158 *
4159 * @param uPtr Where to store the state.
4160 * @param pCtx The CPU context.
4161 */
4162static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4163{
4164 if (enmEffOpSize == IEMMODE_16BIT)
4165 {
4166 pCtx->fpu.FCW = uPtr.pu16[0];
4167 pCtx->fpu.FSW = uPtr.pu16[1];
4168 pCtx->fpu.FTW = uPtr.pu16[2];
4169 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4170 {
4171 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4172 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4173 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4174 pCtx->fpu.CS = 0;
4175 pCtx->fpu.DS = 0;
4176 }
4177 else
4178 {
4179 pCtx->fpu.FPUIP = uPtr.pu16[3];
4180 pCtx->fpu.CS = uPtr.pu16[4];
4181 pCtx->fpu.FPUDP = uPtr.pu16[5];
4182 pCtx->fpu.DS = uPtr.pu16[6];
4183 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4184 }
4185 }
4186 else
4187 {
4188 pCtx->fpu.FCW = uPtr.pu16[0*2];
4189 pCtx->fpu.FSW = uPtr.pu16[1*2];
4190 pCtx->fpu.FTW = uPtr.pu16[2*2];
4191 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4192 {
4193 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4194 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4195 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4196 pCtx->fpu.CS = 0;
4197 pCtx->fpu.DS = 0;
4198 }
4199 else
4200 {
4201 pCtx->fpu.FPUIP = uPtr.pu32[3];
4202 pCtx->fpu.CS = uPtr.pu16[4*2];
4203 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4204 pCtx->fpu.FPUDP = uPtr.pu32[5];
4205 pCtx->fpu.DS = uPtr.pu16[6*2];
4206 }
4207 }
4208
4209 /* Make adjustments. */
4210 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4211 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4212 iemFpuRecalcExceptionStatus(pCtx);
4213 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4214 * exceptions are pending after loading the saved state? */
4215}
4216
4217
4218/**
4219 * Implements 'FNSTENV'.
4220 *
4221 * @param enmEffOpSize The operand size (only REX.W really matters).
4222 * @param iEffSeg The effective segment register for @a GCPtrEff.
4223 * @param GCPtrEffDst The address of the image.
4224 */
4225IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4226{
4227 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4228 RTPTRUNION uPtr;
4229 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4230 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4231 if (rcStrict != VINF_SUCCESS)
4232 return rcStrict;
4233
4234 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4235
4236 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4237 if (rcStrict != VINF_SUCCESS)
4238 return rcStrict;
4239
4240 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4241 iemRegAddToRip(pIemCpu, cbInstr);
4242 return VINF_SUCCESS;
4243}
4244
4245
4246/**
4247 * Implements 'FLDENV'.
4248 *
4249 * @param enmEffOpSize The operand size (only REX.W really matters).
4250 * @param iEffSeg The effective segment register for @a GCPtrEff.
4251 * @param GCPtrEffSrc The address of the image.
4252 */
4253IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4254{
4255 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4256 RTCPTRUNION uPtr;
4257 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4258 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4259 if (rcStrict != VINF_SUCCESS)
4260 return rcStrict;
4261
4262 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4263
4264 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4265 if (rcStrict != VINF_SUCCESS)
4266 return rcStrict;
4267
4268 iemRegAddToRip(pIemCpu, cbInstr);
4269 return VINF_SUCCESS;
4270}
4271
4272
4273/**
4274 * Implements 'FLDCW'.
4275 *
4276 * @param u16Fcw The new FCW.
4277 */
4278IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4279{
4280 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4281
4282 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4283 /** @todo Testcase: Try see what happens when trying to set undefined bits
4284 * (other than 6 and 7). Currently ignoring them. */
4285 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4286 * according to FSW. (This is was is currently implemented.) */
4287 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4288 iemFpuRecalcExceptionStatus(pCtx);
4289
4290 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4291 iemRegAddToRip(pIemCpu, cbInstr);
4292 return VINF_SUCCESS;
4293}
4294
4295
4296
4297/**
4298 * Implements the underflow case of fxch.
4299 *
4300 * @param iStReg The other stack register.
4301 */
4302IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4303{
4304 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4305
4306 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4307 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4308 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4309
4310 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4311 * registers are read as QNaN and then exchanged. This could be
4312 * wrong... */
4313 if (pCtx->fpu.FCW & X86_FCW_IM)
4314 {
4315 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4316 {
4317 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4318 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4319 else
4320 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4321 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4322 }
4323 else
4324 {
4325 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4326 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4327 }
4328 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4329 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4330 }
4331 else
4332 {
4333 /* raise underflow exception, don't change anything. */
4334 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
4335 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4336 }
4337 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4338
4339 iemRegAddToRip(pIemCpu, cbInstr);
4340 return VINF_SUCCESS;
4341}
4342
4343
4344/**
4345 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
4346 *
4347 * @param cToAdd 1 or 7.
4348 */
4349IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
4350{
4351 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4352 Assert(iStReg < 8);
4353
4354 /*
4355 * Raise exceptions.
4356 */
4357 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4358 return iemRaiseDeviceNotAvailable(pIemCpu);
4359 uint16_t u16Fsw = pCtx->fpu.FSW;
4360 if (u16Fsw & X86_FSW_ES)
4361 return iemRaiseMathFault(pIemCpu);
4362
4363 /*
4364 * Check if any of the register accesses causes #SF + #IA.
4365 */
4366 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
4367 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4368 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
4369 {
4370 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
4371 pCtx->fpu.FSW &= ~X86_FSW_C1;
4372 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
4373 if ( !(u16Fsw & X86_FSW_IE)
4374 || (pCtx->fpu.FCW & X86_FCW_IM) )
4375 {
4376 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4377 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4378 }
4379 }
4380 else if (pCtx->fpu.FCW & X86_FCW_IM)
4381 {
4382 /* Masked underflow. */
4383 pCtx->fpu.FSW &= ~X86_FSW_C1;
4384 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4385 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4386 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
4387 }
4388 else
4389 {
4390 /* Raise underflow - don't touch EFLAGS or TOP. */
4391 pCtx->fpu.FSW &= ~X86_FSW_C1;
4392 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4393 fPop = false;
4394 }
4395
4396 /*
4397 * Pop if necessary.
4398 */
4399 if (fPop)
4400 {
4401 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
4402 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
4403 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
4404 }
4405
4406 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4407 iemRegAddToRip(pIemCpu, cbInstr);
4408 return VINF_SUCCESS;
4409}
4410
4411/** @} */
4412
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette