VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 42610

Last change on this file since 42610 was 42610, checked in by vboxsync, 13 years ago

IEM: Implemented SGDT and SIDT.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 149.0 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 42610 2012-08-06 00:44:09Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
42 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n"));
43 }
44 return VINF_SUCCESS;
45}
46
47
48#if 0
49/**
50 * Calculates the parity bit.
51 *
52 * @returns true if the bit is set, false if not.
53 * @param u8Result The least significant byte of the result.
54 */
55static bool iemHlpCalcParityFlag(uint8_t u8Result)
56{
57 /*
58 * Parity is set if the number of bits in the least significant byte of
59 * the result is even.
60 */
61 uint8_t cBits;
62 cBits = u8Result & 1; /* 0 */
63 u8Result >>= 1;
64 cBits += u8Result & 1;
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1; /* 4 */
71 u8Result >>= 1;
72 cBits += u8Result & 1;
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 return !(cBits & 1);
78}
79#endif /* not used */
80
81
82/**
83 * Updates the specified flags according to a 8-bit result.
84 *
85 * @param pIemCpu The.
86 * @param u8Result The result to set the flags according to.
87 * @param fToUpdate The flags to update.
88 * @param fUndefined The flags that are specified as undefined.
89 */
90static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
91{
92 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
93
94 uint32_t fEFlags = pCtx->eflags.u;
95 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
96 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
97 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
98}
99
100
101/**
102 * Loads a NULL data selector into a selector register, both the hidden and
103 * visible parts, in protected mode.
104 *
105 * @param pSReg Pointer to the segment register.
106 */
107static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg)
108{
109 /** @todo Testcase: write a testcase checking what happends when loading a NULL
110 * data selector in protected mode. */
111 pSReg->Sel = 0;
112 pSReg->ValidSel = 0;
113 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
114 pSReg->u64Base = 0;
115 pSReg->u32Limit = 0;
116 pSReg->Attr.u = 0;
117}
118
119
120/**
121 * Helper used by iret.
122 *
123 * @param uCpl The new CPL.
124 * @param pSReg Pointer to the segment register.
125 */
126static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
127{
128#ifdef VBOX_WITH_RAW_MODE_NOT_R0
129 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
130 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
131#else
132 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
133#endif
134
135 if ( uCpl > pSReg->Attr.n.u2Dpl
136 && pSReg->Attr.n.u1DescType /* code or data, not system */
137 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
138 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
139 iemHlpLoadNullDataSelectorProt(pSReg);
140}
141
142/** @} */
143
144/** @name C Implementations
145 * @{
146 */
147
148/**
149 * Implements a 16-bit popa.
150 */
151IEM_CIMPL_DEF_0(iemCImpl_popa_16)
152{
153 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
154 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
155 RTGCPTR GCPtrLast = GCPtrStart + 15;
156 VBOXSTRICTRC rcStrict;
157
158 /*
159 * The docs are a bit hard to comprehend here, but it looks like we wrap
160 * around in real mode as long as none of the individual "popa" crosses the
161 * end of the stack segment. In protected mode we check the whole access
162 * in one go. For efficiency, only do the word-by-word thing if we're in
163 * danger of wrapping around.
164 */
165 /** @todo do popa boundary / wrap-around checks. */
166 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
167 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
168 {
169 /* word-by-word */
170 RTUINT64U TmpRsp;
171 TmpRsp.u = pCtx->rsp;
172 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
173 if (rcStrict == VINF_SUCCESS)
174 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
175 if (rcStrict == VINF_SUCCESS)
176 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
177 if (rcStrict == VINF_SUCCESS)
178 {
179 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
180 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
181 }
182 if (rcStrict == VINF_SUCCESS)
183 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
184 if (rcStrict == VINF_SUCCESS)
185 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
186 if (rcStrict == VINF_SUCCESS)
187 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
188 if (rcStrict == VINF_SUCCESS)
189 {
190 pCtx->rsp = TmpRsp.u;
191 iemRegAddToRip(pIemCpu, cbInstr);
192 }
193 }
194 else
195 {
196 uint16_t const *pa16Mem = NULL;
197 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
198 if (rcStrict == VINF_SUCCESS)
199 {
200 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
201 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
202 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
203 /* skip sp */
204 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
205 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
206 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
207 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
208 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
209 if (rcStrict == VINF_SUCCESS)
210 {
211 iemRegAddToRsp(pCtx, 16);
212 iemRegAddToRip(pIemCpu, cbInstr);
213 }
214 }
215 }
216 return rcStrict;
217}
218
219
220/**
221 * Implements a 32-bit popa.
222 */
223IEM_CIMPL_DEF_0(iemCImpl_popa_32)
224{
225 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
226 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
227 RTGCPTR GCPtrLast = GCPtrStart + 31;
228 VBOXSTRICTRC rcStrict;
229
230 /*
231 * The docs are a bit hard to comprehend here, but it looks like we wrap
232 * around in real mode as long as none of the individual "popa" crosses the
233 * end of the stack segment. In protected mode we check the whole access
234 * in one go. For efficiency, only do the word-by-word thing if we're in
235 * danger of wrapping around.
236 */
237 /** @todo do popa boundary / wrap-around checks. */
238 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
239 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
240 {
241 /* word-by-word */
242 RTUINT64U TmpRsp;
243 TmpRsp.u = pCtx->rsp;
244 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
245 if (rcStrict == VINF_SUCCESS)
246 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
247 if (rcStrict == VINF_SUCCESS)
248 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
249 if (rcStrict == VINF_SUCCESS)
250 {
251 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
252 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
253 }
254 if (rcStrict == VINF_SUCCESS)
255 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
256 if (rcStrict == VINF_SUCCESS)
257 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
258 if (rcStrict == VINF_SUCCESS)
259 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
260 if (rcStrict == VINF_SUCCESS)
261 {
262#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
263 pCtx->rdi &= UINT32_MAX;
264 pCtx->rsi &= UINT32_MAX;
265 pCtx->rbp &= UINT32_MAX;
266 pCtx->rbx &= UINT32_MAX;
267 pCtx->rdx &= UINT32_MAX;
268 pCtx->rcx &= UINT32_MAX;
269 pCtx->rax &= UINT32_MAX;
270#endif
271 pCtx->rsp = TmpRsp.u;
272 iemRegAddToRip(pIemCpu, cbInstr);
273 }
274 }
275 else
276 {
277 uint32_t const *pa32Mem;
278 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
279 if (rcStrict == VINF_SUCCESS)
280 {
281 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
282 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
283 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
284 /* skip esp */
285 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
286 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
287 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
288 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
289 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
290 if (rcStrict == VINF_SUCCESS)
291 {
292 iemRegAddToRsp(pCtx, 32);
293 iemRegAddToRip(pIemCpu, cbInstr);
294 }
295 }
296 }
297 return rcStrict;
298}
299
300
301/**
302 * Implements a 16-bit pusha.
303 */
304IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
305{
306 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
307 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
308 RTGCPTR GCPtrBottom = GCPtrTop - 15;
309 VBOXSTRICTRC rcStrict;
310
311 /*
312 * The docs are a bit hard to comprehend here, but it looks like we wrap
313 * around in real mode as long as none of the individual "pushd" crosses the
314 * end of the stack segment. In protected mode we check the whole access
315 * in one go. For efficiency, only do the word-by-word thing if we're in
316 * danger of wrapping around.
317 */
318 /** @todo do pusha boundary / wrap-around checks. */
319 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
320 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
321 {
322 /* word-by-word */
323 RTUINT64U TmpRsp;
324 TmpRsp.u = pCtx->rsp;
325 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
326 if (rcStrict == VINF_SUCCESS)
327 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
328 if (rcStrict == VINF_SUCCESS)
329 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
330 if (rcStrict == VINF_SUCCESS)
331 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
332 if (rcStrict == VINF_SUCCESS)
333 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
334 if (rcStrict == VINF_SUCCESS)
335 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
336 if (rcStrict == VINF_SUCCESS)
337 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
338 if (rcStrict == VINF_SUCCESS)
339 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 {
342 pCtx->rsp = TmpRsp.u;
343 iemRegAddToRip(pIemCpu, cbInstr);
344 }
345 }
346 else
347 {
348 GCPtrBottom--;
349 uint16_t *pa16Mem = NULL;
350 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
351 if (rcStrict == VINF_SUCCESS)
352 {
353 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
354 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
355 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
356 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
357 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
358 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
359 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
360 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
361 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
362 if (rcStrict == VINF_SUCCESS)
363 {
364 iemRegSubFromRsp(pCtx, 16);
365 iemRegAddToRip(pIemCpu, cbInstr);
366 }
367 }
368 }
369 return rcStrict;
370}
371
372
373/**
374 * Implements a 32-bit pusha.
375 */
376IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
377{
378 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
379 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
380 RTGCPTR GCPtrBottom = GCPtrTop - 31;
381 VBOXSTRICTRC rcStrict;
382
383 /*
384 * The docs are a bit hard to comprehend here, but it looks like we wrap
385 * around in real mode as long as none of the individual "pusha" crosses the
386 * end of the stack segment. In protected mode we check the whole access
387 * in one go. For efficiency, only do the word-by-word thing if we're in
388 * danger of wrapping around.
389 */
390 /** @todo do pusha boundary / wrap-around checks. */
391 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
392 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
393 {
394 /* word-by-word */
395 RTUINT64U TmpRsp;
396 TmpRsp.u = pCtx->rsp;
397 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
398 if (rcStrict == VINF_SUCCESS)
399 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
400 if (rcStrict == VINF_SUCCESS)
401 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
402 if (rcStrict == VINF_SUCCESS)
403 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
404 if (rcStrict == VINF_SUCCESS)
405 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
406 if (rcStrict == VINF_SUCCESS)
407 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
408 if (rcStrict == VINF_SUCCESS)
409 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
410 if (rcStrict == VINF_SUCCESS)
411 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 {
414 pCtx->rsp = TmpRsp.u;
415 iemRegAddToRip(pIemCpu, cbInstr);
416 }
417 }
418 else
419 {
420 GCPtrBottom--;
421 uint32_t *pa32Mem;
422 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
423 if (rcStrict == VINF_SUCCESS)
424 {
425 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
426 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
427 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
428 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
429 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
430 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
431 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
432 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
433 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
434 if (rcStrict == VINF_SUCCESS)
435 {
436 iemRegSubFromRsp(pCtx, 32);
437 iemRegAddToRip(pIemCpu, cbInstr);
438 }
439 }
440 }
441 return rcStrict;
442}
443
444
445/**
446 * Implements pushf.
447 *
448 *
449 * @param enmEffOpSize The effective operand size.
450 */
451IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
452{
453 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
454
455 /*
456 * If we're in V8086 mode some care is required (which is why we're in
457 * doing this in a C implementation).
458 */
459 uint32_t fEfl = pCtx->eflags.u;
460 if ( (fEfl & X86_EFL_VM)
461 && X86_EFL_GET_IOPL(fEfl) != 3 )
462 {
463 Assert(pCtx->cr0 & X86_CR0_PE);
464 if ( enmEffOpSize != IEMMODE_16BIT
465 || !(pCtx->cr4 & X86_CR4_VME))
466 return iemRaiseGeneralProtectionFault0(pIemCpu);
467 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
468 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
469 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
470 }
471
472 /*
473 * Ok, clear RF and VM and push the flags.
474 */
475 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
476
477 VBOXSTRICTRC rcStrict;
478 switch (enmEffOpSize)
479 {
480 case IEMMODE_16BIT:
481 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
482 break;
483 case IEMMODE_32BIT:
484 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
485 break;
486 case IEMMODE_64BIT:
487 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
488 break;
489 IEM_NOT_REACHED_DEFAULT_CASE_RET();
490 }
491 if (rcStrict != VINF_SUCCESS)
492 return rcStrict;
493
494 iemRegAddToRip(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements popf.
501 *
502 * @param enmEffOpSize The effective operand size.
503 */
504IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
505{
506 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
507 uint32_t const fEflOld = pCtx->eflags.u;
508 VBOXSTRICTRC rcStrict;
509 uint32_t fEflNew;
510
511 /*
512 * V8086 is special as usual.
513 */
514 if (fEflOld & X86_EFL_VM)
515 {
516 /*
517 * Almost anything goes if IOPL is 3.
518 */
519 if (X86_EFL_GET_IOPL(fEflOld) == 3)
520 {
521 switch (enmEffOpSize)
522 {
523 case IEMMODE_16BIT:
524 {
525 uint16_t u16Value;
526 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
527 if (rcStrict != VINF_SUCCESS)
528 return rcStrict;
529 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
530 break;
531 }
532 case IEMMODE_32BIT:
533 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
534 if (rcStrict != VINF_SUCCESS)
535 return rcStrict;
536 break;
537 IEM_NOT_REACHED_DEFAULT_CASE_RET();
538 }
539
540 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
541 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
542 }
543 /*
544 * Interrupt flag virtualization with CR4.VME=1.
545 */
546 else if ( enmEffOpSize == IEMMODE_16BIT
547 && (pCtx->cr4 & X86_CR4_VME) )
548 {
549 uint16_t u16Value;
550 RTUINT64U TmpRsp;
551 TmpRsp.u = pCtx->rsp;
552 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
553 if (rcStrict != VINF_SUCCESS)
554 return rcStrict;
555
556 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
557 * or before? */
558 if ( ( (u16Value & X86_EFL_IF)
559 && (fEflOld & X86_EFL_VIP))
560 || (u16Value & X86_EFL_TF) )
561 return iemRaiseGeneralProtectionFault0(pIemCpu);
562
563 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
564 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
565 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
566 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
567
568 pCtx->rsp = TmpRsp.u;
569 }
570 else
571 return iemRaiseGeneralProtectionFault0(pIemCpu);
572
573 }
574 /*
575 * Not in V8086 mode.
576 */
577 else
578 {
579 /* Pop the flags. */
580 switch (enmEffOpSize)
581 {
582 case IEMMODE_16BIT:
583 {
584 uint16_t u16Value;
585 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
586 if (rcStrict != VINF_SUCCESS)
587 return rcStrict;
588 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
589 break;
590 }
591 case IEMMODE_32BIT:
592 case IEMMODE_64BIT:
593 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
594 if (rcStrict != VINF_SUCCESS)
595 return rcStrict;
596 break;
597 IEM_NOT_REACHED_DEFAULT_CASE_RET();
598 }
599
600 /* Merge them with the current flags. */
601 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
602 || pIemCpu->uCpl == 0)
603 {
604 fEflNew &= X86_EFL_POPF_BITS;
605 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
606 }
607 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
608 {
609 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
610 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
611 }
612 else
613 {
614 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
615 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
616 }
617 }
618
619 /*
620 * Commit the flags.
621 */
622 Assert(fEflNew & RT_BIT_32(1));
623 pCtx->eflags.u = fEflNew;
624 iemRegAddToRip(pIemCpu, cbInstr);
625
626 return VINF_SUCCESS;
627}
628
629
630/**
631 * Implements an indirect call.
632 *
633 * @param uNewPC The new program counter (RIP) value (loaded from the
634 * operand).
635 * @param enmEffOpSize The effective operand size.
636 */
637IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
638{
639 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
640 uint16_t uOldPC = pCtx->ip + cbInstr;
641 if (uNewPC > pCtx->cs.u32Limit)
642 return iemRaiseGeneralProtectionFault0(pIemCpu);
643
644 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
645 if (rcStrict != VINF_SUCCESS)
646 return rcStrict;
647
648 pCtx->rip = uNewPC;
649 return VINF_SUCCESS;
650
651}
652
653
654/**
655 * Implements a 16-bit relative call.
656 *
657 * @param offDisp The displacment offset.
658 */
659IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
660{
661 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
662 uint16_t uOldPC = pCtx->ip + cbInstr;
663 uint16_t uNewPC = uOldPC + offDisp;
664 if (uNewPC > pCtx->cs.u32Limit)
665 return iemRaiseGeneralProtectionFault0(pIemCpu);
666
667 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
668 if (rcStrict != VINF_SUCCESS)
669 return rcStrict;
670
671 pCtx->rip = uNewPC;
672 return VINF_SUCCESS;
673}
674
675
676/**
677 * Implements a 32-bit indirect call.
678 *
679 * @param uNewPC The new program counter (RIP) value (loaded from the
680 * operand).
681 * @param enmEffOpSize The effective operand size.
682 */
683IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
684{
685 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
686 uint32_t uOldPC = pCtx->eip + cbInstr;
687 if (uNewPC > pCtx->cs.u32Limit)
688 return iemRaiseGeneralProtectionFault0(pIemCpu);
689
690 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
691 if (rcStrict != VINF_SUCCESS)
692 return rcStrict;
693
694 pCtx->rip = uNewPC;
695 return VINF_SUCCESS;
696
697}
698
699
700/**
701 * Implements a 32-bit relative call.
702 *
703 * @param offDisp The displacment offset.
704 */
705IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
706{
707 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
708 uint32_t uOldPC = pCtx->eip + cbInstr;
709 uint32_t uNewPC = uOldPC + offDisp;
710 if (uNewPC > pCtx->cs.u32Limit)
711 return iemRaiseGeneralProtectionFault0(pIemCpu);
712
713 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
714 if (rcStrict != VINF_SUCCESS)
715 return rcStrict;
716
717 pCtx->rip = uNewPC;
718 return VINF_SUCCESS;
719}
720
721
722/**
723 * Implements a 64-bit indirect call.
724 *
725 * @param uNewPC The new program counter (RIP) value (loaded from the
726 * operand).
727 * @param enmEffOpSize The effective operand size.
728 */
729IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
730{
731 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
732 uint64_t uOldPC = pCtx->rip + cbInstr;
733 if (!IEM_IS_CANONICAL(uNewPC))
734 return iemRaiseGeneralProtectionFault0(pIemCpu);
735
736 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
737 if (rcStrict != VINF_SUCCESS)
738 return rcStrict;
739
740 pCtx->rip = uNewPC;
741 return VINF_SUCCESS;
742
743}
744
745
746/**
747 * Implements a 64-bit relative call.
748 *
749 * @param offDisp The displacment offset.
750 */
751IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
752{
753 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
754 uint64_t uOldPC = pCtx->rip + cbInstr;
755 uint64_t uNewPC = uOldPC + offDisp;
756 if (!IEM_IS_CANONICAL(uNewPC))
757 return iemRaiseNotCanonical(pIemCpu);
758
759 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
760 if (rcStrict != VINF_SUCCESS)
761 return rcStrict;
762
763 pCtx->rip = uNewPC;
764 return VINF_SUCCESS;
765}
766
767
768/**
769 * Implements far jumps and calls thru task segments (TSS).
770 *
771 * @param uSel The selector.
772 * @param enmBranch The kind of branching we're performing.
773 * @param enmEffOpSize The effective operand size.
774 * @param pDesc The descriptor corrsponding to @a uSel. The type is
775 * call gate.
776 */
777IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
778{
779 /* Call various functions to do the work. */
780 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
781}
782
783
784/**
785 * Implements far jumps and calls thru task gates.
786 *
787 * @param uSel The selector.
788 * @param enmBranch The kind of branching we're performing.
789 * @param enmEffOpSize The effective operand size.
790 * @param pDesc The descriptor corrsponding to @a uSel. The type is
791 * call gate.
792 */
793IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
794{
795 /* Call various functions to do the work. */
796 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
797}
798
799
800/**
801 * Implements far jumps and calls thru call gates.
802 *
803 * @param uSel The selector.
804 * @param enmBranch The kind of branching we're performing.
805 * @param enmEffOpSize The effective operand size.
806 * @param pDesc The descriptor corrsponding to @a uSel. The type is
807 * call gate.
808 */
809IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
810{
811 /* Call various functions to do the work. */
812 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
813}
814
815
816/**
817 * Implements far jumps and calls thru system selectors.
818 *
819 * @param uSel The selector.
820 * @param enmBranch The kind of branching we're performing.
821 * @param enmEffOpSize The effective operand size.
822 * @param pDesc The descriptor corrsponding to @a uSel.
823 */
824IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
825{
826 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
827 Assert((uSel & X86_SEL_MASK_OFF_RPL));
828
829 if (IEM_IS_LONG_MODE(pIemCpu))
830 switch (pDesc->Legacy.Gen.u4Type)
831 {
832 case AMD64_SEL_TYPE_SYS_CALL_GATE:
833 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
834
835 default:
836 case AMD64_SEL_TYPE_SYS_LDT:
837 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
838 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
839 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
840 case AMD64_SEL_TYPE_SYS_INT_GATE:
841 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
842 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
843
844 }
845
846 switch (pDesc->Legacy.Gen.u4Type)
847 {
848 case X86_SEL_TYPE_SYS_286_CALL_GATE:
849 case X86_SEL_TYPE_SYS_386_CALL_GATE:
850 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
851
852 case X86_SEL_TYPE_SYS_TASK_GATE:
853 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
854
855 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
856 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
857 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
858
859 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
860 Log(("branch %04x -> busy 286 TSS\n", uSel));
861 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
862
863 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
864 Log(("branch %04x -> busy 386 TSS\n", uSel));
865 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
866
867 default:
868 case X86_SEL_TYPE_SYS_LDT:
869 case X86_SEL_TYPE_SYS_286_INT_GATE:
870 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
871 case X86_SEL_TYPE_SYS_386_INT_GATE:
872 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
873 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
874 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
875 }
876}
877
878
879/**
880 * Implements far jumps.
881 *
882 * @param uSel The selector.
883 * @param offSeg The segment offset.
884 * @param enmEffOpSize The effective operand size.
885 */
886IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
887{
888 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
889 NOREF(cbInstr);
890 Assert(offSeg <= UINT32_MAX);
891
892 /*
893 * Real mode and V8086 mode are easy. The only snag seems to be that
894 * CS.limit doesn't change and the limit check is done against the current
895 * limit.
896 */
897 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
898 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
899 {
900 if (offSeg > pCtx->cs.u32Limit)
901 return iemRaiseGeneralProtectionFault0(pIemCpu);
902
903 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
904 pCtx->rip = offSeg;
905 else
906 pCtx->rip = offSeg & UINT16_MAX;
907 pCtx->cs.Sel = uSel;
908 pCtx->cs.ValidSel = uSel;
909 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
910 pCtx->cs.u64Base = (uint32_t)uSel << 4;
911 return VINF_SUCCESS;
912 }
913
914 /*
915 * Protected mode. Need to parse the specified descriptor...
916 */
917 if (!(uSel & X86_SEL_MASK_OFF_RPL))
918 {
919 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
920 return iemRaiseGeneralProtectionFault0(pIemCpu);
921 }
922
923 /* Fetch the descriptor. */
924 IEMSELDESC Desc;
925 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
926 if (rcStrict != VINF_SUCCESS)
927 return rcStrict;
928
929 /* Is it there? */
930 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
931 {
932 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
933 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
934 }
935
936 /*
937 * Deal with it according to its type. We do the standard code selectors
938 * here and dispatch the system selectors to worker functions.
939 */
940 if (!Desc.Legacy.Gen.u1DescType)
941 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
942
943 /* Only code segments. */
944 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
945 {
946 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
947 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
948 }
949
950 /* L vs D. */
951 if ( Desc.Legacy.Gen.u1Long
952 && Desc.Legacy.Gen.u1DefBig
953 && IEM_IS_LONG_MODE(pIemCpu))
954 {
955 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
956 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
957 }
958
959 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
960 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
961 {
962 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
963 {
964 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
965 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
966 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
967 }
968 }
969 else
970 {
971 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
972 {
973 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
974 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
975 }
976 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
977 {
978 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
979 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
980 }
981 }
982
983 /* Chop the high bits if 16-bit (Intel says so). */
984 if (enmEffOpSize == IEMMODE_16BIT)
985 offSeg &= UINT16_MAX;
986
987 /* Limit check. (Should alternatively check for non-canonical addresses
988 here, but that is ruled out by offSeg being 32-bit, right?) */
989 uint64_t u64Base;
990 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
991 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
992 u64Base = 0;
993 else
994 {
995 if (offSeg > cbLimit)
996 {
997 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
998 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
999 }
1000 u64Base = X86DESC_BASE(&Desc.Legacy);
1001 }
1002
1003 /*
1004 * Ok, everything checked out fine. Now set the accessed bit before
1005 * committing the result into CS, CSHID and RIP.
1006 */
1007 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1008 {
1009 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1010 if (rcStrict != VINF_SUCCESS)
1011 return rcStrict;
1012#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1013 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1014#endif
1015 }
1016
1017 /* commit */
1018 pCtx->rip = offSeg;
1019 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1020 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1021 pCtx->cs.ValidSel = pCtx->cs.Sel;
1022 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1023 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1024 pCtx->cs.u32Limit = cbLimit;
1025 pCtx->cs.u64Base = u64Base;
1026 /** @todo check if the hidden bits are loaded correctly for 64-bit
1027 * mode. */
1028 return VINF_SUCCESS;
1029}
1030
1031
1032/**
1033 * Implements far calls.
1034 *
1035 * This very similar to iemCImpl_FarJmp.
1036 *
1037 * @param uSel The selector.
1038 * @param offSeg The segment offset.
1039 * @param enmEffOpSize The operand size (in case we need it).
1040 */
1041IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1042{
1043 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1044 VBOXSTRICTRC rcStrict;
1045 uint64_t uNewRsp;
1046 RTPTRUNION uPtrRet;
1047
1048 /*
1049 * Real mode and V8086 mode are easy. The only snag seems to be that
1050 * CS.limit doesn't change and the limit check is done against the current
1051 * limit.
1052 */
1053 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1054 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1055 {
1056 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1057
1058 /* Check stack first - may #SS(0). */
1059 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1060 &uPtrRet.pv, &uNewRsp);
1061 if (rcStrict != VINF_SUCCESS)
1062 return rcStrict;
1063
1064 /* Check the target address range. */
1065 if (offSeg > UINT32_MAX)
1066 return iemRaiseGeneralProtectionFault0(pIemCpu);
1067
1068 /* Everything is fine, push the return address. */
1069 if (enmEffOpSize == IEMMODE_16BIT)
1070 {
1071 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1072 uPtrRet.pu16[1] = pCtx->cs.Sel;
1073 }
1074 else
1075 {
1076 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1077 uPtrRet.pu16[3] = pCtx->cs.Sel;
1078 }
1079 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1080 if (rcStrict != VINF_SUCCESS)
1081 return rcStrict;
1082
1083 /* Branch. */
1084 pCtx->rip = offSeg;
1085 pCtx->cs.Sel = uSel;
1086 pCtx->cs.ValidSel = uSel;
1087 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1088 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1089 return VINF_SUCCESS;
1090 }
1091
1092 /*
1093 * Protected mode. Need to parse the specified descriptor...
1094 */
1095 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1096 {
1097 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1098 return iemRaiseGeneralProtectionFault0(pIemCpu);
1099 }
1100
1101 /* Fetch the descriptor. */
1102 IEMSELDESC Desc;
1103 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1104 if (rcStrict != VINF_SUCCESS)
1105 return rcStrict;
1106
1107 /*
1108 * Deal with it according to its type. We do the standard code selectors
1109 * here and dispatch the system selectors to worker functions.
1110 */
1111 if (!Desc.Legacy.Gen.u1DescType)
1112 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1113
1114 /* Only code segments. */
1115 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1116 {
1117 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1118 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1119 }
1120
1121 /* L vs D. */
1122 if ( Desc.Legacy.Gen.u1Long
1123 && Desc.Legacy.Gen.u1DefBig
1124 && IEM_IS_LONG_MODE(pIemCpu))
1125 {
1126 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1127 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1128 }
1129
1130 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1131 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1132 {
1133 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1134 {
1135 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1136 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1137 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1138 }
1139 }
1140 else
1141 {
1142 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1143 {
1144 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1145 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1146 }
1147 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1148 {
1149 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1150 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1151 }
1152 }
1153
1154 /* Is it there? */
1155 if (!Desc.Legacy.Gen.u1Present)
1156 {
1157 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1158 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1159 }
1160
1161 /* Check stack first - may #SS(0). */
1162 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1163 * 16-bit code cause a two or four byte CS to be pushed? */
1164 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1165 enmEffOpSize == IEMMODE_64BIT ? 8+8
1166 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1167 &uPtrRet.pv, &uNewRsp);
1168 if (rcStrict != VINF_SUCCESS)
1169 return rcStrict;
1170
1171 /* Chop the high bits if 16-bit (Intel says so). */
1172 if (enmEffOpSize == IEMMODE_16BIT)
1173 offSeg &= UINT16_MAX;
1174
1175 /* Limit / canonical check. */
1176 uint64_t u64Base;
1177 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1178 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1179 {
1180 if (!IEM_IS_CANONICAL(offSeg))
1181 {
1182 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1183 return iemRaiseNotCanonical(pIemCpu);
1184 }
1185 u64Base = 0;
1186 }
1187 else
1188 {
1189 if (offSeg > cbLimit)
1190 {
1191 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1192 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1193 }
1194 u64Base = X86DESC_BASE(&Desc.Legacy);
1195 }
1196
1197 /*
1198 * Now set the accessed bit before
1199 * writing the return address to the stack and committing the result into
1200 * CS, CSHID and RIP.
1201 */
1202 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1203 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1204 {
1205 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1206 if (rcStrict != VINF_SUCCESS)
1207 return rcStrict;
1208#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1209 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1210#endif
1211 }
1212
1213 /* stack */
1214 if (enmEffOpSize == IEMMODE_16BIT)
1215 {
1216 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1217 uPtrRet.pu16[1] = pCtx->cs.Sel;
1218 }
1219 else if (enmEffOpSize == IEMMODE_32BIT)
1220 {
1221 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1222 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1223 }
1224 else
1225 {
1226 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1227 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1228 }
1229 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1230 if (rcStrict != VINF_SUCCESS)
1231 return rcStrict;
1232
1233 /* commit */
1234 pCtx->rip = offSeg;
1235 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1236 pCtx->cs.Sel |= pIemCpu->uCpl;
1237 pCtx->cs.ValidSel = pCtx->cs.Sel;
1238 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1239 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1240 pCtx->cs.u32Limit = cbLimit;
1241 pCtx->cs.u64Base = u64Base;
1242 /** @todo check if the hidden bits are loaded correctly for 64-bit
1243 * mode. */
1244 return VINF_SUCCESS;
1245}
1246
1247
1248/**
1249 * Implements retf.
1250 *
1251 * @param enmEffOpSize The effective operand size.
1252 * @param cbPop The amount of arguments to pop from the stack
1253 * (bytes).
1254 */
1255IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1256{
1257 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1258 VBOXSTRICTRC rcStrict;
1259 RTCPTRUNION uPtrFrame;
1260 uint64_t uNewRsp;
1261 uint64_t uNewRip;
1262 uint16_t uNewCs;
1263 NOREF(cbInstr);
1264
1265 /*
1266 * Read the stack values first.
1267 */
1268 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1269 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1270 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1271 if (rcStrict != VINF_SUCCESS)
1272 return rcStrict;
1273 if (enmEffOpSize == IEMMODE_16BIT)
1274 {
1275 uNewRip = uPtrFrame.pu16[0];
1276 uNewCs = uPtrFrame.pu16[1];
1277 }
1278 else if (enmEffOpSize == IEMMODE_32BIT)
1279 {
1280 uNewRip = uPtrFrame.pu32[0];
1281 uNewCs = uPtrFrame.pu16[2];
1282 }
1283 else
1284 {
1285 uNewRip = uPtrFrame.pu64[0];
1286 uNewCs = uPtrFrame.pu16[4];
1287 }
1288
1289 /*
1290 * Real mode and V8086 mode are easy.
1291 */
1292 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1293 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1294 {
1295 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1296 /** @todo check how this is supposed to work if sp=0xfffe. */
1297
1298 /* Check the limit of the new EIP. */
1299 /** @todo Intel pseudo code only does the limit check for 16-bit
1300 * operands, AMD does not make any distinction. What is right? */
1301 if (uNewRip > pCtx->cs.u32Limit)
1302 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1303
1304 /* commit the operation. */
1305 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1306 if (rcStrict != VINF_SUCCESS)
1307 return rcStrict;
1308 pCtx->rip = uNewRip;
1309 pCtx->cs.Sel = uNewCs;
1310 pCtx->cs.ValidSel = uNewCs;
1311 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1312 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1313 /** @todo do we load attribs and limit as well? */
1314 if (cbPop)
1315 iemRegAddToRsp(pCtx, cbPop);
1316 return VINF_SUCCESS;
1317 }
1318
1319 /*
1320 * Protected mode is complicated, of course.
1321 */
1322 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
1323 {
1324 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1325 return iemRaiseGeneralProtectionFault0(pIemCpu);
1326 }
1327
1328 /* Fetch the descriptor. */
1329 IEMSELDESC DescCs;
1330 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1331 if (rcStrict != VINF_SUCCESS)
1332 return rcStrict;
1333
1334 /* Can only return to a code selector. */
1335 if ( !DescCs.Legacy.Gen.u1DescType
1336 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1337 {
1338 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1339 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1340 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1341 }
1342
1343 /* L vs D. */
1344 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1345 && DescCs.Legacy.Gen.u1DefBig
1346 && IEM_IS_LONG_MODE(pIemCpu))
1347 {
1348 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1349 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1350 }
1351
1352 /* DPL/RPL/CPL checks. */
1353 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1354 {
1355 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1356 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1357 }
1358
1359 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1360 {
1361 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1362 {
1363 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1364 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1365 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1366 }
1367 }
1368 else
1369 {
1370 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1371 {
1372 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1373 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1374 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1375 }
1376 }
1377
1378 /* Is it there? */
1379 if (!DescCs.Legacy.Gen.u1Present)
1380 {
1381 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1382 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1383 }
1384
1385 /*
1386 * Return to outer privilege? (We'll typically have entered via a call gate.)
1387 */
1388 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1389 {
1390 /* Read the return pointer, it comes before the parameters. */
1391 RTCPTRUNION uPtrStack;
1392 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1393 if (rcStrict != VINF_SUCCESS)
1394 return rcStrict;
1395 uint16_t uNewOuterSs;
1396 uint64_t uNewOuterRsp;
1397 if (enmEffOpSize == IEMMODE_16BIT)
1398 {
1399 uNewOuterRsp = uPtrFrame.pu16[0];
1400 uNewOuterSs = uPtrFrame.pu16[1];
1401 }
1402 else if (enmEffOpSize == IEMMODE_32BIT)
1403 {
1404 uNewOuterRsp = uPtrFrame.pu32[0];
1405 uNewOuterSs = uPtrFrame.pu16[2];
1406 }
1407 else
1408 {
1409 uNewOuterRsp = uPtrFrame.pu64[0];
1410 uNewOuterSs = uPtrFrame.pu16[4];
1411 }
1412
1413 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1414 and read the selector. */
1415 IEMSELDESC DescSs;
1416 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
1417 {
1418 if ( !DescCs.Legacy.Gen.u1Long
1419 || (uNewOuterSs & X86_SEL_RPL) == 3)
1420 {
1421 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1422 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1423 return iemRaiseGeneralProtectionFault0(pIemCpu);
1424 }
1425 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1426 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1427 }
1428 else
1429 {
1430 /* Fetch the descriptor for the new stack segment. */
1431 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1432 if (rcStrict != VINF_SUCCESS)
1433 return rcStrict;
1434 }
1435
1436 /* Check that RPL of stack and code selectors match. */
1437 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1438 {
1439 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1440 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1441 }
1442
1443 /* Must be a writable data segment. */
1444 if ( !DescSs.Legacy.Gen.u1DescType
1445 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1446 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1447 {
1448 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1449 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1450 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1451 }
1452
1453 /* L vs D. (Not mentioned by intel.) */
1454 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1455 && DescSs.Legacy.Gen.u1DefBig
1456 && IEM_IS_LONG_MODE(pIemCpu))
1457 {
1458 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1459 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1460 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1461 }
1462
1463 /* DPL/RPL/CPL checks. */
1464 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1465 {
1466 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1467 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1468 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1469 }
1470
1471 /* Is it there? */
1472 if (!DescSs.Legacy.Gen.u1Present)
1473 {
1474 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1475 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1476 }
1477
1478 /* Calc SS limit.*/
1479 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
1480
1481 /* Is RIP canonical or within CS.limit? */
1482 uint64_t u64Base;
1483 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1484
1485 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1486 {
1487 if (!IEM_IS_CANONICAL(uNewRip))
1488 {
1489 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1490 return iemRaiseNotCanonical(pIemCpu);
1491 }
1492 u64Base = 0;
1493 }
1494 else
1495 {
1496 if (uNewRip > cbLimitCs)
1497 {
1498 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1499 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1500 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1501 }
1502 u64Base = X86DESC_BASE(&DescCs.Legacy);
1503 }
1504
1505 /*
1506 * Now set the accessed bit before
1507 * writing the return address to the stack and committing the result into
1508 * CS, CSHID and RIP.
1509 */
1510 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1511 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1512 {
1513 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1514 if (rcStrict != VINF_SUCCESS)
1515 return rcStrict;
1516#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1517 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1518#endif
1519 }
1520 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1521 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1522 {
1523 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1524 if (rcStrict != VINF_SUCCESS)
1525 return rcStrict;
1526#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1527 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1528#endif
1529 }
1530
1531 /* commit */
1532 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1533 if (rcStrict != VINF_SUCCESS)
1534 return rcStrict;
1535 if (enmEffOpSize == IEMMODE_16BIT)
1536 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1537 else
1538 pCtx->rip = uNewRip;
1539 pCtx->cs.Sel = uNewCs;
1540 pCtx->cs.ValidSel = uNewCs;
1541 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1542 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1543 pCtx->cs.u32Limit = cbLimitCs;
1544 pCtx->cs.u64Base = u64Base;
1545 pCtx->rsp = uNewRsp;
1546 pCtx->ss.Sel = uNewOuterSs;
1547 pCtx->ss.ValidSel = uNewOuterSs;
1548 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1549 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
1550 pCtx->ss.u32Limit = cbLimitSs;
1551 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1552 pCtx->ss.u64Base = 0;
1553 else
1554 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
1555
1556 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1557 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
1558 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
1559 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
1560 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
1561
1562 /** @todo check if the hidden bits are loaded correctly for 64-bit
1563 * mode. */
1564
1565 if (cbPop)
1566 iemRegAddToRsp(pCtx, cbPop);
1567
1568 /* Done! */
1569 }
1570 /*
1571 * Return to the same privilege level
1572 */
1573 else
1574 {
1575 /* Limit / canonical check. */
1576 uint64_t u64Base;
1577 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1578
1579 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1580 {
1581 if (!IEM_IS_CANONICAL(uNewRip))
1582 {
1583 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1584 return iemRaiseNotCanonical(pIemCpu);
1585 }
1586 u64Base = 0;
1587 }
1588 else
1589 {
1590 if (uNewRip > cbLimitCs)
1591 {
1592 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1593 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1594 }
1595 u64Base = X86DESC_BASE(&DescCs.Legacy);
1596 }
1597
1598 /*
1599 * Now set the accessed bit before
1600 * writing the return address to the stack and committing the result into
1601 * CS, CSHID and RIP.
1602 */
1603 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1604 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1605 {
1606 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1607 if (rcStrict != VINF_SUCCESS)
1608 return rcStrict;
1609#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1610 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1611#endif
1612 }
1613
1614 /* commit */
1615 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1616 if (rcStrict != VINF_SUCCESS)
1617 return rcStrict;
1618 if (enmEffOpSize == IEMMODE_16BIT)
1619 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1620 else
1621 pCtx->rip = uNewRip;
1622 pCtx->cs.Sel = uNewCs;
1623 pCtx->cs.ValidSel = uNewCs;
1624 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1625 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1626 pCtx->cs.u32Limit = cbLimitCs;
1627 pCtx->cs.u64Base = u64Base;
1628 /** @todo check if the hidden bits are loaded correctly for 64-bit
1629 * mode. */
1630 if (cbPop)
1631 iemRegAddToRsp(pCtx, cbPop);
1632 }
1633 return VINF_SUCCESS;
1634}
1635
1636
1637/**
1638 * Implements retn.
1639 *
1640 * We're doing this in C because of the \#GP that might be raised if the popped
1641 * program counter is out of bounds.
1642 *
1643 * @param enmEffOpSize The effective operand size.
1644 * @param cbPop The amount of arguments to pop from the stack
1645 * (bytes).
1646 */
1647IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1648{
1649 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1650 NOREF(cbInstr);
1651
1652 /* Fetch the RSP from the stack. */
1653 VBOXSTRICTRC rcStrict;
1654 RTUINT64U NewRip;
1655 RTUINT64U NewRsp;
1656 NewRsp.u = pCtx->rsp;
1657 switch (enmEffOpSize)
1658 {
1659 case IEMMODE_16BIT:
1660 NewRip.u = 0;
1661 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1662 break;
1663 case IEMMODE_32BIT:
1664 NewRip.u = 0;
1665 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1666 break;
1667 case IEMMODE_64BIT:
1668 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1669 break;
1670 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1671 }
1672 if (rcStrict != VINF_SUCCESS)
1673 return rcStrict;
1674
1675 /* Check the new RSP before loading it. */
1676 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1677 * of it. The canonical test is performed here and for call. */
1678 if (enmEffOpSize != IEMMODE_64BIT)
1679 {
1680 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
1681 {
1682 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
1683 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1684 }
1685 }
1686 else
1687 {
1688 if (!IEM_IS_CANONICAL(NewRip.u))
1689 {
1690 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1691 return iemRaiseNotCanonical(pIemCpu);
1692 }
1693 }
1694
1695 /* Commit it. */
1696 pCtx->rip = NewRip.u;
1697 pCtx->rsp = NewRsp.u;
1698 if (cbPop)
1699 iemRegAddToRsp(pCtx, cbPop);
1700
1701 return VINF_SUCCESS;
1702}
1703
1704
1705/**
1706 * Implements enter.
1707 *
1708 * We're doing this in C because the instruction is insane, even for the
1709 * u8NestingLevel=0 case dealing with the stack is tedious.
1710 *
1711 * @param enmEffOpSize The effective operand size.
1712 */
1713IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
1714{
1715 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1716
1717 /* Push RBP, saving the old value in TmpRbp. */
1718 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
1719 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
1720 RTUINT64U NewRbp;
1721 VBOXSTRICTRC rcStrict;
1722 if (enmEffOpSize == IEMMODE_64BIT)
1723 {
1724 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
1725 NewRbp = NewRsp;
1726 }
1727 else if (pCtx->ss.Attr.n.u1DefBig)
1728 {
1729 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
1730 NewRbp = NewRsp;
1731 }
1732 else
1733 {
1734 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
1735 NewRbp = NewRsp;
1736 }
1737 if (rcStrict != VINF_SUCCESS)
1738 return rcStrict;
1739
1740 /* Copy the parameters (aka nesting levels by Intel). */
1741 cParameters &= 0x1f;
1742 if (cParameters > 0)
1743 {
1744 switch (enmEffOpSize)
1745 {
1746 case IEMMODE_16BIT:
1747 if (pCtx->ss.Attr.n.u1DefBig)
1748 TmpRbp.DWords.dw0 -= 2;
1749 else
1750 TmpRbp.Words.w0 -= 2;
1751 do
1752 {
1753 uint16_t u16Tmp;
1754 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
1755 if (rcStrict != VINF_SUCCESS)
1756 break;
1757 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
1758 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1759 break;
1760
1761 case IEMMODE_32BIT:
1762 if (pCtx->ss.Attr.n.u1DefBig)
1763 TmpRbp.DWords.dw0 -= 4;
1764 else
1765 TmpRbp.Words.w0 -= 4;
1766 do
1767 {
1768 uint32_t u32Tmp;
1769 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
1770 if (rcStrict != VINF_SUCCESS)
1771 break;
1772 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
1773 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1774 break;
1775
1776 case IEMMODE_64BIT:
1777 TmpRbp.u -= 8;
1778 do
1779 {
1780 uint64_t u64Tmp;
1781 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
1782 if (rcStrict != VINF_SUCCESS)
1783 break;
1784 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
1785 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1786 break;
1787
1788 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1789 }
1790 if (rcStrict != VINF_SUCCESS)
1791 return VINF_SUCCESS;
1792
1793 /* Push the new RBP */
1794 if (enmEffOpSize == IEMMODE_64BIT)
1795 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
1796 else if (pCtx->ss.Attr.n.u1DefBig)
1797 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
1798 else
1799 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
1800 if (rcStrict != VINF_SUCCESS)
1801 return rcStrict;
1802
1803 }
1804
1805 /* Recalc RSP. */
1806 iemRegSubFromRspEx(&NewRsp, cbFrame, pCtx);
1807
1808 /** @todo Should probe write access at the new RSP according to AMD. */
1809
1810 /* Commit it. */
1811 pCtx->rbp = NewRbp.u;
1812 pCtx->rsp = NewRsp.u;
1813 iemRegAddToRip(pIemCpu, cbInstr);
1814
1815 return VINF_SUCCESS;
1816}
1817
1818
1819
1820/**
1821 * Implements leave.
1822 *
1823 * We're doing this in C because messing with the stack registers is annoying
1824 * since they depends on SS attributes.
1825 *
1826 * @param enmEffOpSize The effective operand size.
1827 */
1828IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1829{
1830 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1831
1832 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1833 RTUINT64U NewRsp;
1834 if (pCtx->ss.Attr.n.u1Long)
1835 NewRsp.u = pCtx->rbp;
1836 else if (pCtx->ss.Attr.n.u1DefBig)
1837 NewRsp.u = pCtx->ebp;
1838 else
1839 {
1840 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1841 NewRsp.u = pCtx->rsp;
1842 NewRsp.Words.w0 = pCtx->bp;
1843 }
1844
1845 /* Pop RBP according to the operand size. */
1846 VBOXSTRICTRC rcStrict;
1847 RTUINT64U NewRbp;
1848 switch (enmEffOpSize)
1849 {
1850 case IEMMODE_16BIT:
1851 NewRbp.u = pCtx->rbp;
1852 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1853 break;
1854 case IEMMODE_32BIT:
1855 NewRbp.u = 0;
1856 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1857 break;
1858 case IEMMODE_64BIT:
1859 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1860 break;
1861 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1862 }
1863 if (rcStrict != VINF_SUCCESS)
1864 return rcStrict;
1865
1866
1867 /* Commit it. */
1868 pCtx->rbp = NewRbp.u;
1869 pCtx->rsp = NewRsp.u;
1870 iemRegAddToRip(pIemCpu, cbInstr);
1871
1872 return VINF_SUCCESS;
1873}
1874
1875
1876/**
1877 * Implements int3 and int XX.
1878 *
1879 * @param u8Int The interrupt vector number.
1880 * @param fIsBpInstr Is it the breakpoint instruction.
1881 */
1882IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1883{
1884 Assert(pIemCpu->cXcptRecursions == 0);
1885 return iemRaiseXcptOrInt(pIemCpu,
1886 cbInstr,
1887 u8Int,
1888 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1889 0,
1890 0);
1891}
1892
1893
1894/**
1895 * Implements iret for real mode and V8086 mode.
1896 *
1897 * @param enmEffOpSize The effective operand size.
1898 */
1899IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1900{
1901 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1902 NOREF(cbInstr);
1903
1904 /*
1905 * iret throws an exception if VME isn't enabled.
1906 */
1907 if ( pCtx->eflags.Bits.u1VM
1908 && !(pCtx->cr4 & X86_CR4_VME))
1909 return iemRaiseGeneralProtectionFault0(pIemCpu);
1910
1911 /*
1912 * Do the stack bits, but don't commit RSP before everything checks
1913 * out right.
1914 */
1915 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1916 VBOXSTRICTRC rcStrict;
1917 RTCPTRUNION uFrame;
1918 uint16_t uNewCs;
1919 uint32_t uNewEip;
1920 uint32_t uNewFlags;
1921 uint64_t uNewRsp;
1922 if (enmEffOpSize == IEMMODE_32BIT)
1923 {
1924 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1925 if (rcStrict != VINF_SUCCESS)
1926 return rcStrict;
1927 uNewEip = uFrame.pu32[0];
1928 uNewCs = (uint16_t)uFrame.pu32[1];
1929 uNewFlags = uFrame.pu32[2];
1930 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1931 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1932 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1933 | X86_EFL_ID;
1934 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1935 }
1936 else
1937 {
1938 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1939 if (rcStrict != VINF_SUCCESS)
1940 return rcStrict;
1941 uNewEip = uFrame.pu16[0];
1942 uNewCs = uFrame.pu16[1];
1943 uNewFlags = uFrame.pu16[2];
1944 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1945 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1946 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1947 /** @todo The intel pseudo code does not indicate what happens to
1948 * reserved flags. We just ignore them. */
1949 }
1950 /** @todo Check how this is supposed to work if sp=0xfffe. */
1951
1952 /*
1953 * Check the limit of the new EIP.
1954 */
1955 /** @todo Only the AMD pseudo code check the limit here, what's
1956 * right? */
1957 if (uNewEip > pCtx->cs.u32Limit)
1958 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1959
1960 /*
1961 * V8086 checks and flag adjustments
1962 */
1963 if (pCtx->eflags.Bits.u1VM)
1964 {
1965 if (pCtx->eflags.Bits.u2IOPL == 3)
1966 {
1967 /* Preserve IOPL and clear RF. */
1968 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1969 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1970 }
1971 else if ( enmEffOpSize == IEMMODE_16BIT
1972 && ( !(uNewFlags & X86_EFL_IF)
1973 || !pCtx->eflags.Bits.u1VIP )
1974 && !(uNewFlags & X86_EFL_TF) )
1975 {
1976 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1977 uNewFlags &= ~X86_EFL_VIF;
1978 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1979 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1980 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1981 }
1982 else
1983 return iemRaiseGeneralProtectionFault0(pIemCpu);
1984 }
1985
1986 /*
1987 * Commit the operation.
1988 */
1989 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1990 if (rcStrict != VINF_SUCCESS)
1991 return rcStrict;
1992 pCtx->rip = uNewEip;
1993 pCtx->cs.Sel = uNewCs;
1994 pCtx->cs.ValidSel = uNewCs;
1995 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1996 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1997 /** @todo do we load attribs and limit as well? */
1998 Assert(uNewFlags & X86_EFL_1);
1999 pCtx->eflags.u = uNewFlags;
2000
2001 return VINF_SUCCESS;
2002}
2003
2004
2005/**
2006 * Implements iret for protected mode returning to V8086 mode.
2007 *
2008 * @param enmEffOpSize The effective operand size.
2009 * @param uNewEip The new EIP.
2010 * @param uNewCs The new CS.
2011 * @param uNewFlags The new EFLAGS.
2012 * @param uNewRsp The RSP after the initial IRET frame.
2013 */
2014IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, IEMMODE, enmEffOpSize, uint32_t, uNewEip, uint16_t, uNewCs,
2015 uint32_t, uNewFlags, uint64_t, uNewRsp)
2016{
2017 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2018}
2019
2020
2021/**
2022 * Implements iret for protected mode returning via a nested task.
2023 *
2024 * @param enmEffOpSize The effective operand size.
2025 */
2026IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2027{
2028 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2029}
2030
2031
2032/**
2033 * Implements iret for protected mode
2034 *
2035 * @param enmEffOpSize The effective operand size.
2036 */
2037IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
2038{
2039 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2040 NOREF(cbInstr);
2041
2042 /*
2043 * Nested task return.
2044 */
2045 if (pCtx->eflags.Bits.u1NT)
2046 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
2047
2048 /*
2049 * Normal return.
2050 *
2051 * Do the stack bits, but don't commit RSP before everything checks
2052 * out right.
2053 */
2054 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2055 VBOXSTRICTRC rcStrict;
2056 RTCPTRUNION uFrame;
2057 uint16_t uNewCs;
2058 uint32_t uNewEip;
2059 uint32_t uNewFlags;
2060 uint64_t uNewRsp;
2061 if (enmEffOpSize == IEMMODE_32BIT)
2062 {
2063 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2064 if (rcStrict != VINF_SUCCESS)
2065 return rcStrict;
2066 uNewEip = uFrame.pu32[0];
2067 uNewCs = (uint16_t)uFrame.pu32[1];
2068 uNewFlags = uFrame.pu32[2];
2069 }
2070 else
2071 {
2072 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2073 if (rcStrict != VINF_SUCCESS)
2074 return rcStrict;
2075 uNewEip = uFrame.pu16[0];
2076 uNewCs = uFrame.pu16[1];
2077 uNewFlags = uFrame.pu16[2];
2078 }
2079 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2080 if (rcStrict != VINF_SUCCESS)
2081 return rcStrict;
2082
2083 /*
2084 * We're hopefully not returning to V8086 mode...
2085 */
2086 if ( (uNewFlags & X86_EFL_VM)
2087 && pIemCpu->uCpl == 0)
2088 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, enmEffOpSize, uNewEip, uNewCs, uNewFlags, uNewRsp);
2089
2090 /*
2091 * Protected mode.
2092 */
2093 /* Read the CS descriptor. */
2094 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2095 {
2096 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
2097 return iemRaiseGeneralProtectionFault0(pIemCpu);
2098 }
2099
2100 IEMSELDESC DescCS;
2101 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
2102 if (rcStrict != VINF_SUCCESS)
2103 {
2104 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
2105 return rcStrict;
2106 }
2107
2108 /* Must be a code descriptor. */
2109 if (!DescCS.Legacy.Gen.u1DescType)
2110 {
2111 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2112 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2113 }
2114 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2115 {
2116 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2117 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2118 }
2119
2120 /* Privilege checks. */
2121 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2122 {
2123 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
2124 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2125 }
2126 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2127 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2128 {
2129 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
2130 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2131 }
2132
2133 /* Present? */
2134 if (!DescCS.Legacy.Gen.u1Present)
2135 {
2136 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2137 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2138 }
2139
2140 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2141
2142 /*
2143 * Return to outer level?
2144 */
2145 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2146 {
2147 uint16_t uNewSS;
2148 uint32_t uNewESP;
2149 if (enmEffOpSize == IEMMODE_32BIT)
2150 {
2151 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2152 if (rcStrict != VINF_SUCCESS)
2153 return rcStrict;
2154 uNewESP = uFrame.pu32[0];
2155 uNewSS = (uint16_t)uFrame.pu32[1];
2156 }
2157 else
2158 {
2159 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2160 if (rcStrict != VINF_SUCCESS)
2161 return rcStrict;
2162 uNewESP = uFrame.pu16[0];
2163 uNewSS = uFrame.pu16[1];
2164 }
2165 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2166 if (rcStrict != VINF_SUCCESS)
2167 return rcStrict;
2168
2169 /* Read the SS descriptor. */
2170 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2171 {
2172 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2173 return iemRaiseGeneralProtectionFault0(pIemCpu);
2174 }
2175
2176 IEMSELDESC DescSS;
2177 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2178 if (rcStrict != VINF_SUCCESS)
2179 {
2180 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2181 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2182 return rcStrict;
2183 }
2184
2185 /* Privilege checks. */
2186 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2187 {
2188 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2189 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2190 }
2191 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2192 {
2193 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2194 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2195 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2196 }
2197
2198 /* Must be a writeable data segment descriptor. */
2199 if (!DescSS.Legacy.Gen.u1DescType)
2200 {
2201 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2202 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2203 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2204 }
2205 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2206 {
2207 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2208 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2209 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2210 }
2211
2212 /* Present? */
2213 if (!DescSS.Legacy.Gen.u1Present)
2214 {
2215 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2216 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2217 }
2218
2219 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2220
2221 /* Check EIP. */
2222 if (uNewEip > cbLimitCS)
2223 {
2224 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2225 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2226 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2227 }
2228
2229 /*
2230 * Commit the changes, marking CS and SS accessed first since
2231 * that may fail.
2232 */
2233 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2234 {
2235 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2236 if (rcStrict != VINF_SUCCESS)
2237 return rcStrict;
2238 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2239 }
2240 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2241 {
2242 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2243 if (rcStrict != VINF_SUCCESS)
2244 return rcStrict;
2245 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2246 }
2247
2248 pCtx->rip = uNewEip;
2249 pCtx->cs.Sel = uNewCs;
2250 pCtx->cs.ValidSel = uNewCs;
2251 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2252 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2253 pCtx->cs.u32Limit = cbLimitCS;
2254 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2255 pCtx->rsp = uNewESP;
2256 pCtx->ss.Sel = uNewSS;
2257 pCtx->ss.ValidSel = uNewSS;
2258 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2259 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2260 pCtx->ss.u32Limit = cbLimitSs;
2261 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2262
2263 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2264 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2265 if (enmEffOpSize != IEMMODE_16BIT)
2266 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2267 if (pIemCpu->uCpl == 0)
2268 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2269 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2270 fEFlagsMask |= X86_EFL_IF;
2271 pCtx->eflags.u &= ~fEFlagsMask;
2272 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2273
2274 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2275 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2276 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2277 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2278 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2279
2280 /* Done! */
2281
2282 }
2283 /*
2284 * Return to the same level.
2285 */
2286 else
2287 {
2288 /* Check EIP. */
2289 if (uNewEip > cbLimitCS)
2290 {
2291 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2292 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2293 }
2294
2295 /*
2296 * Commit the changes, marking CS first since it may fail.
2297 */
2298 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2299 {
2300 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2301 if (rcStrict != VINF_SUCCESS)
2302 return rcStrict;
2303 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2304 }
2305
2306 pCtx->rip = uNewEip;
2307 pCtx->cs.Sel = uNewCs;
2308 pCtx->cs.ValidSel = uNewCs;
2309 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2310 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2311 pCtx->cs.u32Limit = cbLimitCS;
2312 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2313 pCtx->rsp = uNewRsp;
2314
2315 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2316 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2317 if (enmEffOpSize != IEMMODE_16BIT)
2318 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2319 if (pIemCpu->uCpl == 0)
2320 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2321 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2322 fEFlagsMask |= X86_EFL_IF;
2323 pCtx->eflags.u &= ~fEFlagsMask;
2324 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2325 /* Done! */
2326 }
2327 return VINF_SUCCESS;
2328}
2329
2330
2331/**
2332 * Implements iret for long mode
2333 *
2334 * @param enmEffOpSize The effective operand size.
2335 */
2336IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2337{
2338 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2339 //VBOXSTRICTRC rcStrict;
2340 //uint64_t uNewRsp;
2341
2342 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2343 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2344}
2345
2346
2347/**
2348 * Implements iret.
2349 *
2350 * @param enmEffOpSize The effective operand size.
2351 */
2352IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2353{
2354 /*
2355 * Call a mode specific worker.
2356 */
2357 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2358 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2359 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2360 if (IEM_IS_LONG_MODE(pIemCpu))
2361 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2362
2363 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2364}
2365
2366
2367/**
2368 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2369 *
2370 * @param iSegReg The segment register number (valid).
2371 * @param uSel The new selector value.
2372 */
2373IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2374{
2375 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2376 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2377 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2378
2379 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2380
2381 /*
2382 * Real mode and V8086 mode are easy.
2383 */
2384 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2385 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2386 {
2387 *pSel = uSel;
2388 pHid->u64Base = (uint32_t)uSel << 4;
2389#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2390 /** @todo Does the CPU actually load limits and attributes in the
2391 * real/V8086 mode segment load case? It doesn't for CS in far
2392 * jumps... Affects unreal mode. */
2393 pHid->u32Limit = 0xffff;
2394 pHid->Attr.u = 0;
2395 pHid->Attr.n.u1Present = 1;
2396 pHid->Attr.n.u1DescType = 1;
2397 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2398 ? X86_SEL_TYPE_RW
2399 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2400#endif
2401 iemRegAddToRip(pIemCpu, cbInstr);
2402 return VINF_SUCCESS;
2403 }
2404
2405 /*
2406 * Protected mode.
2407 *
2408 * Check if it's a null segment selector value first, that's OK for DS, ES,
2409 * FS and GS. If not null, then we have to load and parse the descriptor.
2410 */
2411 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2412 {
2413 if (iSegReg == X86_SREG_SS)
2414 {
2415 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2416 || pIemCpu->uCpl != 0
2417 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2418 {
2419 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2420 return iemRaiseGeneralProtectionFault0(pIemCpu);
2421 }
2422
2423 /* In 64-bit kernel mode, the stack can be 0 because of the way
2424 interrupts are dispatched when in kernel ctx. Just load the
2425 selector value into the register and leave the hidden bits
2426 as is. */
2427 *pSel = uSel;
2428 iemRegAddToRip(pIemCpu, cbInstr);
2429 return VINF_SUCCESS;
2430 }
2431
2432 *pSel = uSel; /* Not RPL, remember :-) */
2433 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2434 && iSegReg != X86_SREG_FS
2435 && iSegReg != X86_SREG_GS)
2436 {
2437 /** @todo figure out what this actually does, it works. Needs
2438 * testcase! */
2439 pHid->Attr.u = 0;
2440 pHid->Attr.n.u1Present = 1;
2441 pHid->Attr.n.u1Long = 1;
2442 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2443 pHid->Attr.n.u2Dpl = 3;
2444 pHid->u32Limit = 0;
2445 pHid->u64Base = 0;
2446 }
2447 else
2448 {
2449 pHid->Attr.u = 0;
2450 pHid->u32Limit = 0;
2451 pHid->u64Base = 0;
2452 }
2453 iemRegAddToRip(pIemCpu, cbInstr);
2454 return VINF_SUCCESS;
2455 }
2456
2457 /* Fetch the descriptor. */
2458 IEMSELDESC Desc;
2459 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2460 if (rcStrict != VINF_SUCCESS)
2461 return rcStrict;
2462
2463 /* Check GPs first. */
2464 if (!Desc.Legacy.Gen.u1DescType)
2465 {
2466 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2467 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2468 }
2469 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2470 {
2471 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2472 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2473 {
2474 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2475 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2476 }
2477 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2478 {
2479 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2480 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2481 }
2482 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2483 {
2484 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2485 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2486 }
2487 }
2488 else
2489 {
2490 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2491 {
2492 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2493 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2494 }
2495 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2496 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2497 {
2498#if 0 /* this is what intel says. */
2499 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2500 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2501 {
2502 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2503 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2504 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2505 }
2506#else /* this is what makes more sense. */
2507 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2508 {
2509 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2510 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2511 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2512 }
2513 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2514 {
2515 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2516 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2517 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2518 }
2519#endif
2520 }
2521 }
2522
2523 /* Is it there? */
2524 if (!Desc.Legacy.Gen.u1Present)
2525 {
2526 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2527 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2528 }
2529
2530 /* The base and limit. */
2531 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2532 uint64_t u64Base;
2533 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2534 && iSegReg < X86_SREG_FS)
2535 u64Base = 0;
2536 else
2537 u64Base = X86DESC_BASE(&Desc.Legacy);
2538
2539 /*
2540 * Ok, everything checked out fine. Now set the accessed bit before
2541 * committing the result into the registers.
2542 */
2543 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2544 {
2545 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2546 if (rcStrict != VINF_SUCCESS)
2547 return rcStrict;
2548 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2549 }
2550
2551 /* commit */
2552 *pSel = uSel;
2553 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2554 pHid->u32Limit = cbLimit;
2555 pHid->u64Base = u64Base;
2556
2557 /** @todo check if the hidden bits are loaded correctly for 64-bit
2558 * mode. */
2559
2560 iemRegAddToRip(pIemCpu, cbInstr);
2561 return VINF_SUCCESS;
2562}
2563
2564
2565/**
2566 * Implements 'mov SReg, r/m'.
2567 *
2568 * @param iSegReg The segment register number (valid).
2569 * @param uSel The new selector value.
2570 */
2571IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2572{
2573 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2574 if (rcStrict == VINF_SUCCESS)
2575 {
2576 if (iSegReg == X86_SREG_SS)
2577 {
2578 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2579 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2580 }
2581 }
2582 return rcStrict;
2583}
2584
2585
2586/**
2587 * Implements 'pop SReg'.
2588 *
2589 * @param iSegReg The segment register number (valid).
2590 * @param enmEffOpSize The efficient operand size (valid).
2591 */
2592IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2593{
2594 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2595 VBOXSTRICTRC rcStrict;
2596
2597 /*
2598 * Read the selector off the stack and join paths with mov ss, reg.
2599 */
2600 RTUINT64U TmpRsp;
2601 TmpRsp.u = pCtx->rsp;
2602 switch (enmEffOpSize)
2603 {
2604 case IEMMODE_16BIT:
2605 {
2606 uint16_t uSel;
2607 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2608 if (rcStrict == VINF_SUCCESS)
2609 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2610 break;
2611 }
2612
2613 case IEMMODE_32BIT:
2614 {
2615 uint32_t u32Value;
2616 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2617 if (rcStrict == VINF_SUCCESS)
2618 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2619 break;
2620 }
2621
2622 case IEMMODE_64BIT:
2623 {
2624 uint64_t u64Value;
2625 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2626 if (rcStrict == VINF_SUCCESS)
2627 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2628 break;
2629 }
2630 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2631 }
2632
2633 /*
2634 * Commit the stack on success.
2635 */
2636 if (rcStrict == VINF_SUCCESS)
2637 {
2638 pCtx->rsp = TmpRsp.u;
2639 if (iSegReg == X86_SREG_SS)
2640 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2641 }
2642 return rcStrict;
2643}
2644
2645
2646/**
2647 * Implements lgs, lfs, les, lds & lss.
2648 */
2649IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2650 uint16_t, uSel,
2651 uint64_t, offSeg,
2652 uint8_t, iSegReg,
2653 uint8_t, iGReg,
2654 IEMMODE, enmEffOpSize)
2655{
2656 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2657 VBOXSTRICTRC rcStrict;
2658
2659 /*
2660 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2661 */
2662 /** @todo verify and test that mov, pop and lXs works the segment
2663 * register loading in the exact same way. */
2664 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2665 if (rcStrict == VINF_SUCCESS)
2666 {
2667 switch (enmEffOpSize)
2668 {
2669 case IEMMODE_16BIT:
2670 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2671 break;
2672 case IEMMODE_32BIT:
2673 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2674 break;
2675 case IEMMODE_64BIT:
2676 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2677 break;
2678 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2679 }
2680 }
2681
2682 return rcStrict;
2683}
2684
2685
2686/**
2687 * Implements lgdt.
2688 *
2689 * @param iEffSeg The segment of the new ldtr contents
2690 * @param GCPtrEffSrc The address of the new ldtr contents.
2691 * @param enmEffOpSize The effective operand size.
2692 */
2693IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2694{
2695 if (pIemCpu->uCpl != 0)
2696 return iemRaiseGeneralProtectionFault0(pIemCpu);
2697 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2698
2699 /*
2700 * Fetch the limit and base address.
2701 */
2702 uint16_t cbLimit;
2703 RTGCPTR GCPtrBase;
2704 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2705 if (rcStrict == VINF_SUCCESS)
2706 {
2707 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2708 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2709 else
2710 {
2711 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2712 pCtx->gdtr.cbGdt = cbLimit;
2713 pCtx->gdtr.pGdt = GCPtrBase;
2714 }
2715 if (rcStrict == VINF_SUCCESS)
2716 iemRegAddToRip(pIemCpu, cbInstr);
2717 }
2718 return rcStrict;
2719}
2720
2721
2722/**
2723 * Implements sgdt.
2724 *
2725 * @param iEffSeg The segment where to store the gdtr content.
2726 * @param GCPtrEffDst The address where to store the gdtr content.
2727 * @param enmEffOpSize The effective operand size.
2728 */
2729IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
2730{
2731 /*
2732 * Join paths with sgdt.
2733 * Note! No CPL or V8086 checks here, it's a really sad story, as Intel if
2734 * you really must know.
2735 */
2736 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2737 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
2738 if (rcStrict == VINF_SUCCESS)
2739 iemRegAddToRip(pIemCpu, cbInstr);
2740 return rcStrict;
2741}
2742
2743
2744/**
2745 * Implements lidt.
2746 *
2747 * @param iEffSeg The segment of the new ldtr contents
2748 * @param GCPtrEffSrc The address of the new ldtr contents.
2749 * @param enmEffOpSize The effective operand size.
2750 */
2751IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2752{
2753 if (pIemCpu->uCpl != 0)
2754 return iemRaiseGeneralProtectionFault0(pIemCpu);
2755 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2756
2757 /*
2758 * Fetch the limit and base address.
2759 */
2760 uint16_t cbLimit;
2761 RTGCPTR GCPtrBase;
2762 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2763 if (rcStrict == VINF_SUCCESS)
2764 {
2765 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2766 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2767 else
2768 {
2769 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2770 pCtx->idtr.cbIdt = cbLimit;
2771 pCtx->idtr.pIdt = GCPtrBase;
2772 }
2773 iemRegAddToRip(pIemCpu, cbInstr);
2774 }
2775 return rcStrict;
2776}
2777
2778
2779/**
2780 * Implements sidt.
2781 *
2782 * @param iEffSeg The segment where to store the idtr content.
2783 * @param GCPtrEffDst The address where to store the idtr content.
2784 * @param enmEffOpSize The effective operand size.
2785 */
2786IEM_CIMPL_DEF_3(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
2787{
2788 /*
2789 * Join paths with sgdt.
2790 * Note! No CPL or V8086 checks here, it's a really sad story, as Intel if
2791 * you really must know.
2792 */
2793 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2794 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
2795 if (rcStrict == VINF_SUCCESS)
2796 iemRegAddToRip(pIemCpu, cbInstr);
2797 return rcStrict;
2798}
2799
2800
2801/**
2802 * Implements lldt.
2803 *
2804 * @param uNewLdt The new LDT selector value.
2805 */
2806IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2807{
2808 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2809
2810 /*
2811 * Check preconditions.
2812 */
2813 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2814 {
2815 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2816 return iemRaiseUndefinedOpcode(pIemCpu);
2817 }
2818 if (pIemCpu->uCpl != 0)
2819 {
2820 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2821 return iemRaiseGeneralProtectionFault0(pIemCpu);
2822 }
2823 if (uNewLdt & X86_SEL_LDT)
2824 {
2825 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2826 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2827 }
2828
2829 /*
2830 * Now, loading a NULL selector is easy.
2831 */
2832 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2833 {
2834 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2835 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2836 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
2837 else
2838 pCtx->ldtr.Sel = uNewLdt;
2839 pCtx->ldtr.ValidSel = uNewLdt;
2840 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2841 if (IEM_IS_GUEST_CPU_AMD(pIemCpu) && !IEM_VERIFICATION_ENABLED(pIemCpu))
2842 pCtx->ldtr.Attr.u = 0;
2843 else
2844 {
2845 pCtx->ldtr.u64Base = 0;
2846 pCtx->ldtr.u32Limit = 0;
2847 }
2848
2849 iemRegAddToRip(pIemCpu, cbInstr);
2850 return VINF_SUCCESS;
2851 }
2852
2853 /*
2854 * Read the descriptor.
2855 */
2856 IEMSELDESC Desc;
2857 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2858 if (rcStrict != VINF_SUCCESS)
2859 return rcStrict;
2860
2861 /* Check GPs first. */
2862 if (Desc.Legacy.Gen.u1DescType)
2863 {
2864 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2865 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2866 }
2867 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2868 {
2869 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2870 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2871 }
2872 uint64_t u64Base;
2873 if (!IEM_IS_LONG_MODE(pIemCpu))
2874 u64Base = X86DESC_BASE(&Desc.Legacy);
2875 else
2876 {
2877 if (Desc.Long.Gen.u5Zeros)
2878 {
2879 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2880 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2881 }
2882
2883 u64Base = X86DESC64_BASE(&Desc.Long);
2884 if (!IEM_IS_CANONICAL(u64Base))
2885 {
2886 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2887 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2888 }
2889 }
2890
2891 /* NP */
2892 if (!Desc.Legacy.Gen.u1Present)
2893 {
2894 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2895 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2896 }
2897
2898 /*
2899 * It checks out alright, update the registers.
2900 */
2901/** @todo check if the actual value is loaded or if the RPL is dropped */
2902 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2903 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
2904 else
2905 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
2906 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
2907 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2908 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2909 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
2910 pCtx->ldtr.u64Base = u64Base;
2911
2912 iemRegAddToRip(pIemCpu, cbInstr);
2913 return VINF_SUCCESS;
2914}
2915
2916
2917/**
2918 * Implements lldt.
2919 *
2920 * @param uNewLdt The new LDT selector value.
2921 */
2922IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2923{
2924 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2925
2926 /*
2927 * Check preconditions.
2928 */
2929 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2930 {
2931 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2932 return iemRaiseUndefinedOpcode(pIemCpu);
2933 }
2934 if (pIemCpu->uCpl != 0)
2935 {
2936 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2937 return iemRaiseGeneralProtectionFault0(pIemCpu);
2938 }
2939 if (uNewTr & X86_SEL_LDT)
2940 {
2941 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2942 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
2943 }
2944 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
2945 {
2946 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2947 return iemRaiseGeneralProtectionFault0(pIemCpu);
2948 }
2949
2950 /*
2951 * Read the descriptor.
2952 */
2953 IEMSELDESC Desc;
2954 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2955 if (rcStrict != VINF_SUCCESS)
2956 return rcStrict;
2957
2958 /* Check GPs first. */
2959 if (Desc.Legacy.Gen.u1DescType)
2960 {
2961 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2962 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2963 }
2964 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2965 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2966 || IEM_IS_LONG_MODE(pIemCpu)) )
2967 {
2968 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2969 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2970 }
2971 uint64_t u64Base;
2972 if (!IEM_IS_LONG_MODE(pIemCpu))
2973 u64Base = X86DESC_BASE(&Desc.Legacy);
2974 else
2975 {
2976 if (Desc.Long.Gen.u5Zeros)
2977 {
2978 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2979 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2980 }
2981
2982 u64Base = X86DESC64_BASE(&Desc.Long);
2983 if (!IEM_IS_CANONICAL(u64Base))
2984 {
2985 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2986 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
2987 }
2988 }
2989
2990 /* NP */
2991 if (!Desc.Legacy.Gen.u1Present)
2992 {
2993 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
2994 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
2995 }
2996
2997 /*
2998 * Set it busy.
2999 * Note! Intel says this should lock down the whole descriptor, but we'll
3000 * restrict our selves to 32-bit for now due to lack of inline
3001 * assembly and such.
3002 */
3003 void *pvDesc;
3004 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
3005 if (rcStrict != VINF_SUCCESS)
3006 return rcStrict;
3007 switch ((uintptr_t)pvDesc & 3)
3008 {
3009 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
3010 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
3011 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
3012 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
3013 }
3014 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
3015 if (rcStrict != VINF_SUCCESS)
3016 return rcStrict;
3017 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3018
3019 /*
3020 * It checks out alright, update the registers.
3021 */
3022/** @todo check if the actual value is loaded or if the RPL is dropped */
3023 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3024 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
3025 else
3026 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
3027 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
3028 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3029 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3030 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3031 pCtx->tr.u64Base = u64Base;
3032
3033 iemRegAddToRip(pIemCpu, cbInstr);
3034 return VINF_SUCCESS;
3035}
3036
3037
3038/**
3039 * Implements mov GReg,CRx.
3040 *
3041 * @param iGReg The general register to store the CRx value in.
3042 * @param iCrReg The CRx register to read (valid).
3043 */
3044IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
3045{
3046 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3047 if (pIemCpu->uCpl != 0)
3048 return iemRaiseGeneralProtectionFault0(pIemCpu);
3049 Assert(!pCtx->eflags.Bits.u1VM);
3050
3051 /* read it */
3052 uint64_t crX;
3053 switch (iCrReg)
3054 {
3055 case 0: crX = pCtx->cr0; break;
3056 case 2: crX = pCtx->cr2; break;
3057 case 3: crX = pCtx->cr3; break;
3058 case 4: crX = pCtx->cr4; break;
3059 case 8:
3060 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3061 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
3062 else
3063 crX = 0xff;
3064 break;
3065 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3066 }
3067
3068 /* store it */
3069 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3070 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
3071 else
3072 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
3073
3074 iemRegAddToRip(pIemCpu, cbInstr);
3075 return VINF_SUCCESS;
3076}
3077
3078
3079/**
3080 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
3081 *
3082 * @param iCrReg The CRx register to write (valid).
3083 * @param uNewCrX The new value.
3084 */
3085IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
3086{
3087 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3088 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3089 VBOXSTRICTRC rcStrict;
3090 int rc;
3091
3092 /*
3093 * Try store it.
3094 * Unfortunately, CPUM only does a tiny bit of the work.
3095 */
3096 switch (iCrReg)
3097 {
3098 case 0:
3099 {
3100 /*
3101 * Perform checks.
3102 */
3103 uint64_t const uOldCrX = pCtx->cr0;
3104 uNewCrX |= X86_CR0_ET; /* hardcoded */
3105
3106 /* Check for reserved bits. */
3107 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
3108 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
3109 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
3110 if (uNewCrX & ~(uint64_t)fValid)
3111 {
3112 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3113 return iemRaiseGeneralProtectionFault0(pIemCpu);
3114 }
3115
3116 /* Check for invalid combinations. */
3117 if ( (uNewCrX & X86_CR0_PG)
3118 && !(uNewCrX & X86_CR0_PE) )
3119 {
3120 Log(("Trying to set CR0.PG without CR0.PE\n"));
3121 return iemRaiseGeneralProtectionFault0(pIemCpu);
3122 }
3123
3124 if ( !(uNewCrX & X86_CR0_CD)
3125 && (uNewCrX & X86_CR0_NW) )
3126 {
3127 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
3128 return iemRaiseGeneralProtectionFault0(pIemCpu);
3129 }
3130
3131 /* Long mode consistency checks. */
3132 if ( (uNewCrX & X86_CR0_PG)
3133 && !(uOldCrX & X86_CR0_PG)
3134 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3135 {
3136 if (!(pCtx->cr4 & X86_CR4_PAE))
3137 {
3138 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
3139 return iemRaiseGeneralProtectionFault0(pIemCpu);
3140 }
3141 if (pCtx->cs.Attr.n.u1Long)
3142 {
3143 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
3144 return iemRaiseGeneralProtectionFault0(pIemCpu);
3145 }
3146 }
3147
3148 /** @todo check reserved PDPTR bits as AMD states. */
3149
3150 /*
3151 * Change CR0.
3152 */
3153 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3154 CPUMSetGuestCR0(pVCpu, uNewCrX);
3155 else
3156 pCtx->cr0 = uNewCrX;
3157 Assert(pCtx->cr0 == uNewCrX);
3158
3159 /*
3160 * Change EFER.LMA if entering or leaving long mode.
3161 */
3162 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
3163 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3164 {
3165 uint64_t NewEFER = pCtx->msrEFER;
3166 if (uNewCrX & X86_CR0_PG)
3167 NewEFER |= MSR_K6_EFER_LME;
3168 else
3169 NewEFER &= ~MSR_K6_EFER_LME;
3170
3171 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3172 CPUMSetGuestEFER(pVCpu, NewEFER);
3173 else
3174 pCtx->msrEFER = NewEFER;
3175 Assert(pCtx->msrEFER == NewEFER);
3176 }
3177
3178 /*
3179 * Inform PGM.
3180 */
3181 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3182 {
3183 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3184 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3185 {
3186 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3187 AssertRCReturn(rc, rc);
3188 /* ignore informational status codes */
3189 }
3190 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3191 }
3192 else
3193 rcStrict = VINF_SUCCESS;
3194 break;
3195 }
3196
3197 /*
3198 * CR2 can be changed without any restrictions.
3199 */
3200 case 2:
3201 pCtx->cr2 = uNewCrX;
3202 rcStrict = VINF_SUCCESS;
3203 break;
3204
3205 /*
3206 * CR3 is relatively simple, although AMD and Intel have different
3207 * accounts of how setting reserved bits are handled. We take intel's
3208 * word for the lower bits and AMD's for the high bits (63:52).
3209 */
3210 /** @todo Testcase: Setting reserved bits in CR3, especially before
3211 * enabling paging. */
3212 case 3:
3213 {
3214 /* check / mask the value. */
3215 if (uNewCrX & UINT64_C(0xfff0000000000000))
3216 {
3217 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3218 return iemRaiseGeneralProtectionFault0(pIemCpu);
3219 }
3220
3221 uint64_t fValid;
3222 if ( (pCtx->cr4 & X86_CR4_PAE)
3223 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3224 fValid = UINT64_C(0x000ffffffffff014);
3225 else if (pCtx->cr4 & X86_CR4_PAE)
3226 fValid = UINT64_C(0xfffffff4);
3227 else
3228 fValid = UINT64_C(0xfffff014);
3229 if (uNewCrX & ~fValid)
3230 {
3231 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3232 uNewCrX, uNewCrX & ~fValid));
3233 uNewCrX &= fValid;
3234 }
3235
3236 /** @todo If we're in PAE mode we should check the PDPTRs for
3237 * invalid bits. */
3238
3239 /* Make the change. */
3240 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3241 {
3242 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3243 AssertRCSuccessReturn(rc, rc);
3244 }
3245 else
3246 pCtx->cr3 = uNewCrX;
3247
3248 /* Inform PGM. */
3249 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3250 {
3251 if (pCtx->cr0 & X86_CR0_PG)
3252 {
3253 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
3254 AssertRCReturn(rc, rc);
3255 /* ignore informational status codes */
3256 }
3257 }
3258 rcStrict = VINF_SUCCESS;
3259 break;
3260 }
3261
3262 /*
3263 * CR4 is a bit more tedious as there are bits which cannot be cleared
3264 * under some circumstances and such.
3265 */
3266 case 4:
3267 {
3268 uint64_t const uOldCrX = pCtx->cr4;
3269
3270 /* reserved bits */
3271 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3272 | X86_CR4_TSD | X86_CR4_DE
3273 | X86_CR4_PSE | X86_CR4_PAE
3274 | X86_CR4_MCE | X86_CR4_PGE
3275 | X86_CR4_PCE | X86_CR4_OSFSXR
3276 | X86_CR4_OSXMMEEXCPT;
3277 //if (xxx)
3278 // fValid |= X86_CR4_VMXE;
3279 //if (xxx)
3280 // fValid |= X86_CR4_OSXSAVE;
3281 if (uNewCrX & ~(uint64_t)fValid)
3282 {
3283 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3284 return iemRaiseGeneralProtectionFault0(pIemCpu);
3285 }
3286
3287 /* long mode checks. */
3288 if ( (uOldCrX & X86_CR4_PAE)
3289 && !(uNewCrX & X86_CR4_PAE)
3290 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3291 {
3292 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3293 return iemRaiseGeneralProtectionFault0(pIemCpu);
3294 }
3295
3296
3297 /*
3298 * Change it.
3299 */
3300 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3301 {
3302 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3303 AssertRCSuccessReturn(rc, rc);
3304 }
3305 else
3306 pCtx->cr4 = uNewCrX;
3307 Assert(pCtx->cr4 == uNewCrX);
3308
3309 /*
3310 * Notify SELM and PGM.
3311 */
3312 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3313 {
3314 /* SELM - VME may change things wrt to the TSS shadowing. */
3315 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3316 {
3317 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
3318 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
3319 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3320 }
3321
3322 /* PGM - flushing and mode. */
3323 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3324 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3325 {
3326 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3327 AssertRCReturn(rc, rc);
3328 /* ignore informational status codes */
3329 }
3330 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3331 }
3332 else
3333 rcStrict = VINF_SUCCESS;
3334 break;
3335 }
3336
3337 /*
3338 * CR8 maps to the APIC TPR.
3339 */
3340 case 8:
3341 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3342 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
3343 else
3344 rcStrict = VINF_SUCCESS;
3345 break;
3346
3347 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3348 }
3349
3350 /*
3351 * Advance the RIP on success.
3352 */
3353 if (RT_SUCCESS(rcStrict))
3354 {
3355 if (rcStrict != VINF_SUCCESS)
3356 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3357 iemRegAddToRip(pIemCpu, cbInstr);
3358 }
3359
3360 return rcStrict;
3361}
3362
3363
3364/**
3365 * Implements mov CRx,GReg.
3366 *
3367 * @param iCrReg The CRx register to write (valid).
3368 * @param iGReg The general register to load the DRx value from.
3369 */
3370IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3371{
3372 if (pIemCpu->uCpl != 0)
3373 return iemRaiseGeneralProtectionFault0(pIemCpu);
3374 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3375
3376 /*
3377 * Read the new value from the source register and call common worker.
3378 */
3379 uint64_t uNewCrX;
3380 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3381 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3382 else
3383 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3384 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3385}
3386
3387
3388/**
3389 * Implements 'LMSW r/m16'
3390 *
3391 * @param u16NewMsw The new value.
3392 */
3393IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3394{
3395 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3396
3397 if (pIemCpu->uCpl != 0)
3398 return iemRaiseGeneralProtectionFault0(pIemCpu);
3399 Assert(!pCtx->eflags.Bits.u1VM);
3400
3401 /*
3402 * Compose the new CR0 value and call common worker.
3403 */
3404 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3405 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3406 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3407}
3408
3409
3410/**
3411 * Implements 'CLTS'.
3412 */
3413IEM_CIMPL_DEF_0(iemCImpl_clts)
3414{
3415 if (pIemCpu->uCpl != 0)
3416 return iemRaiseGeneralProtectionFault0(pIemCpu);
3417
3418 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3419 uint64_t uNewCr0 = pCtx->cr0;
3420 uNewCr0 &= ~X86_CR0_TS;
3421 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3422}
3423
3424
3425/**
3426 * Implements mov GReg,DRx.
3427 *
3428 * @param iGReg The general register to store the DRx value in.
3429 * @param iDrReg The DRx register to read (0-7).
3430 */
3431IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3432{
3433 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3434
3435 /*
3436 * Check preconditions.
3437 */
3438
3439 /* Raise GPs. */
3440 if (pIemCpu->uCpl != 0)
3441 return iemRaiseGeneralProtectionFault0(pIemCpu);
3442 Assert(!pCtx->eflags.Bits.u1VM);
3443
3444 if ( (iDrReg == 4 || iDrReg == 5)
3445 && (pCtx->cr4 & X86_CR4_DE) )
3446 {
3447 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3448 return iemRaiseGeneralProtectionFault0(pIemCpu);
3449 }
3450
3451 /* Raise #DB if general access detect is enabled. */
3452 if (pCtx->dr[7] & X86_DR7_GD)
3453 {
3454 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3455 return iemRaiseDebugException(pIemCpu);
3456 }
3457
3458 /*
3459 * Read the debug register and store it in the specified general register.
3460 */
3461 uint64_t drX;
3462 switch (iDrReg)
3463 {
3464 case 0: drX = pCtx->dr[0]; break;
3465 case 1: drX = pCtx->dr[1]; break;
3466 case 2: drX = pCtx->dr[2]; break;
3467 case 3: drX = pCtx->dr[3]; break;
3468 case 6:
3469 case 4:
3470 drX = pCtx->dr[6];
3471 drX &= ~RT_BIT_32(12);
3472 drX |= UINT32_C(0xffff0ff0);
3473 break;
3474 case 7:
3475 case 5:
3476 drX = pCtx->dr[7];
3477 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3478 drX |= RT_BIT_32(10);
3479 break;
3480 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3481 }
3482
3483 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3484 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3485 else
3486 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3487
3488 iemRegAddToRip(pIemCpu, cbInstr);
3489 return VINF_SUCCESS;
3490}
3491
3492
3493/**
3494 * Implements mov DRx,GReg.
3495 *
3496 * @param iDrReg The DRx register to write (valid).
3497 * @param iGReg The general register to load the DRx value from.
3498 */
3499IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3500{
3501 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3502
3503 /*
3504 * Check preconditions.
3505 */
3506 if (pIemCpu->uCpl != 0)
3507 return iemRaiseGeneralProtectionFault0(pIemCpu);
3508 Assert(!pCtx->eflags.Bits.u1VM);
3509
3510 if ( (iDrReg == 4 || iDrReg == 5)
3511 && (pCtx->cr4 & X86_CR4_DE) )
3512 {
3513 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3514 return iemRaiseGeneralProtectionFault0(pIemCpu);
3515 }
3516
3517 /* Raise #DB if general access detect is enabled. */
3518 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3519 * \#GP? */
3520 if (pCtx->dr[7] & X86_DR7_GD)
3521 {
3522 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3523 return iemRaiseDebugException(pIemCpu);
3524 }
3525
3526 /*
3527 * Read the new value from the source register.
3528 */
3529 uint64_t uNewDrX;
3530 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3531 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3532 else
3533 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3534
3535 /*
3536 * Adjust it.
3537 */
3538 switch (iDrReg)
3539 {
3540 case 0:
3541 case 1:
3542 case 2:
3543 case 3:
3544 /* nothing to adjust */
3545 break;
3546
3547 case 6:
3548 case 4:
3549 if (uNewDrX & UINT64_C(0xffffffff00000000))
3550 {
3551 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3552 return iemRaiseGeneralProtectionFault0(pIemCpu);
3553 }
3554 uNewDrX &= ~RT_BIT_32(12);
3555 uNewDrX |= UINT32_C(0xffff0ff0);
3556 break;
3557
3558 case 7:
3559 case 5:
3560 if (uNewDrX & UINT64_C(0xffffffff00000000))
3561 {
3562 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3563 return iemRaiseGeneralProtectionFault0(pIemCpu);
3564 }
3565 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3566 uNewDrX |= RT_BIT_32(10);
3567 break;
3568
3569 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3570 }
3571
3572 /*
3573 * Do the actual setting.
3574 */
3575 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3576 {
3577 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3578 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3579 }
3580 else
3581 pCtx->dr[iDrReg] = uNewDrX;
3582
3583 iemRegAddToRip(pIemCpu, cbInstr);
3584 return VINF_SUCCESS;
3585}
3586
3587
3588/**
3589 * Implements 'INVLPG m'.
3590 *
3591 * @param GCPtrPage The effective address of the page to invalidate.
3592 * @remarks Updates the RIP.
3593 */
3594IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3595{
3596 /* ring-0 only. */
3597 if (pIemCpu->uCpl != 0)
3598 return iemRaiseGeneralProtectionFault0(pIemCpu);
3599 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3600
3601 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3602 iemRegAddToRip(pIemCpu, cbInstr);
3603
3604 if (rc == VINF_SUCCESS)
3605 return VINF_SUCCESS;
3606 if (rc == VINF_PGM_SYNC_CR3)
3607 return iemSetPassUpStatus(pIemCpu, rc);
3608
3609 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3610 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3611 return rc;
3612}
3613
3614
3615/**
3616 * Implements RDTSC.
3617 */
3618IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3619{
3620 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3621
3622 /*
3623 * Check preconditions.
3624 */
3625 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3626 return iemRaiseUndefinedOpcode(pIemCpu);
3627
3628 if ( (pCtx->cr4 & X86_CR4_TSD)
3629 && pIemCpu->uCpl != 0)
3630 {
3631 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3632 return iemRaiseGeneralProtectionFault0(pIemCpu);
3633 }
3634
3635 /*
3636 * Do the job.
3637 */
3638 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3639 pCtx->rax = (uint32_t)uTicks;
3640 pCtx->rdx = uTicks >> 32;
3641#ifdef IEM_VERIFICATION_MODE
3642 pIemCpu->fIgnoreRaxRdx = true;
3643#endif
3644
3645 iemRegAddToRip(pIemCpu, cbInstr);
3646 return VINF_SUCCESS;
3647}
3648
3649
3650/**
3651 * Implements RDMSR.
3652 */
3653IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3654{
3655 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3656
3657 /*
3658 * Check preconditions.
3659 */
3660 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3661 return iemRaiseUndefinedOpcode(pIemCpu);
3662 if (pIemCpu->uCpl != 0)
3663 return iemRaiseGeneralProtectionFault0(pIemCpu);
3664
3665 /*
3666 * Do the job.
3667 */
3668 RTUINT64U uValue;
3669 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3670 if (rc != VINF_SUCCESS)
3671 {
3672 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3673 return iemRaiseGeneralProtectionFault0(pIemCpu);
3674 }
3675
3676 pCtx->rax = uValue.au32[0];
3677 pCtx->rdx = uValue.au32[1];
3678
3679 iemRegAddToRip(pIemCpu, cbInstr);
3680 return VINF_SUCCESS;
3681}
3682
3683
3684/**
3685 * Implements 'IN eAX, port'.
3686 *
3687 * @param u16Port The source port.
3688 * @param cbReg The register size.
3689 */
3690IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3691{
3692 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3693
3694 /*
3695 * CPL check
3696 */
3697 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3698 if (rcStrict != VINF_SUCCESS)
3699 return rcStrict;
3700
3701 /*
3702 * Perform the I/O.
3703 */
3704 uint32_t u32Value;
3705 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3706 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
3707 else
3708 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3709 if (IOM_SUCCESS(rcStrict))
3710 {
3711 switch (cbReg)
3712 {
3713 case 1: pCtx->al = (uint8_t)u32Value; break;
3714 case 2: pCtx->ax = (uint16_t)u32Value; break;
3715 case 4: pCtx->rax = u32Value; break;
3716 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3717 }
3718 iemRegAddToRip(pIemCpu, cbInstr);
3719 pIemCpu->cPotentialExits++;
3720 if (rcStrict != VINF_SUCCESS)
3721 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3722 }
3723
3724 return rcStrict;
3725}
3726
3727
3728/**
3729 * Implements 'IN eAX, DX'.
3730 *
3731 * @param cbReg The register size.
3732 */
3733IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3734{
3735 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3736}
3737
3738
3739/**
3740 * Implements 'OUT port, eAX'.
3741 *
3742 * @param u16Port The destination port.
3743 * @param cbReg The register size.
3744 */
3745IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3746{
3747 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3748
3749 /*
3750 * CPL check
3751 */
3752 if ( (pCtx->cr0 & X86_CR0_PE)
3753 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3754 || pCtx->eflags.Bits.u1VM) )
3755 {
3756 /** @todo I/O port permission bitmap check */
3757 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap checks.\n"));
3758 }
3759
3760 /*
3761 * Perform the I/O.
3762 */
3763 uint32_t u32Value;
3764 switch (cbReg)
3765 {
3766 case 1: u32Value = pCtx->al; break;
3767 case 2: u32Value = pCtx->ax; break;
3768 case 4: u32Value = pCtx->eax; break;
3769 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3770 }
3771 VBOXSTRICTRC rcStrict;
3772 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3773 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
3774 else
3775 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3776 if (IOM_SUCCESS(rcStrict))
3777 {
3778 iemRegAddToRip(pIemCpu, cbInstr);
3779 pIemCpu->cPotentialExits++;
3780 if (rcStrict != VINF_SUCCESS)
3781 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3782 }
3783 return rcStrict;
3784}
3785
3786
3787/**
3788 * Implements 'OUT DX, eAX'.
3789 *
3790 * @param cbReg The register size.
3791 */
3792IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3793{
3794 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3795}
3796
3797
3798/**
3799 * Implements 'CLI'.
3800 */
3801IEM_CIMPL_DEF_0(iemCImpl_cli)
3802{
3803 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3804
3805 if (pCtx->cr0 & X86_CR0_PE)
3806 {
3807 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3808 if (!pCtx->eflags.Bits.u1VM)
3809 {
3810 if (pIemCpu->uCpl <= uIopl)
3811 pCtx->eflags.Bits.u1IF = 0;
3812 else if ( pIemCpu->uCpl == 3
3813 && (pCtx->cr4 & X86_CR4_PVI) )
3814 pCtx->eflags.Bits.u1VIF = 0;
3815 else
3816 return iemRaiseGeneralProtectionFault0(pIemCpu);
3817 }
3818 /* V8086 */
3819 else if (uIopl == 3)
3820 pCtx->eflags.Bits.u1IF = 0;
3821 else if ( uIopl < 3
3822 && (pCtx->cr4 & X86_CR4_VME) )
3823 pCtx->eflags.Bits.u1VIF = 0;
3824 else
3825 return iemRaiseGeneralProtectionFault0(pIemCpu);
3826 }
3827 /* real mode */
3828 else
3829 pCtx->eflags.Bits.u1IF = 0;
3830 iemRegAddToRip(pIemCpu, cbInstr);
3831 return VINF_SUCCESS;
3832}
3833
3834
3835/**
3836 * Implements 'STI'.
3837 */
3838IEM_CIMPL_DEF_0(iemCImpl_sti)
3839{
3840 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3841
3842 if (pCtx->cr0 & X86_CR0_PE)
3843 {
3844 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3845 if (!pCtx->eflags.Bits.u1VM)
3846 {
3847 if (pIemCpu->uCpl <= uIopl)
3848 pCtx->eflags.Bits.u1IF = 1;
3849 else if ( pIemCpu->uCpl == 3
3850 && (pCtx->cr4 & X86_CR4_PVI)
3851 && !pCtx->eflags.Bits.u1VIP )
3852 pCtx->eflags.Bits.u1VIF = 1;
3853 else
3854 return iemRaiseGeneralProtectionFault0(pIemCpu);
3855 }
3856 /* V8086 */
3857 else if (uIopl == 3)
3858 pCtx->eflags.Bits.u1IF = 1;
3859 else if ( uIopl < 3
3860 && (pCtx->cr4 & X86_CR4_VME)
3861 && !pCtx->eflags.Bits.u1VIP )
3862 pCtx->eflags.Bits.u1VIF = 1;
3863 else
3864 return iemRaiseGeneralProtectionFault0(pIemCpu);
3865 }
3866 /* real mode */
3867 else
3868 pCtx->eflags.Bits.u1IF = 1;
3869
3870 iemRegAddToRip(pIemCpu, cbInstr);
3871 /** @todo don't do this unconditionally... */
3872 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3873 return VINF_SUCCESS;
3874}
3875
3876
3877/**
3878 * Implements 'HLT'.
3879 */
3880IEM_CIMPL_DEF_0(iemCImpl_hlt)
3881{
3882 if (pIemCpu->uCpl != 0)
3883 return iemRaiseGeneralProtectionFault0(pIemCpu);
3884 iemRegAddToRip(pIemCpu, cbInstr);
3885 return VINF_EM_HALT;
3886}
3887
3888
3889/**
3890 * Implements 'CPUID'.
3891 */
3892IEM_CIMPL_DEF_0(iemCImpl_cpuid)
3893{
3894 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3895
3896 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
3897 pCtx->rax &= UINT32_C(0xffffffff);
3898 pCtx->rbx &= UINT32_C(0xffffffff);
3899 pCtx->rcx &= UINT32_C(0xffffffff);
3900 pCtx->rdx &= UINT32_C(0xffffffff);
3901
3902 iemRegAddToRip(pIemCpu, cbInstr);
3903 return VINF_SUCCESS;
3904}
3905
3906
3907/**
3908 * Implements 'AAD'.
3909 *
3910 * @param enmEffOpSize The effective operand size.
3911 */
3912IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
3913{
3914 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3915
3916 uint16_t const ax = pCtx->ax;
3917 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
3918 pCtx->ax = al;
3919 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3920 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3921 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3922
3923 iemRegAddToRip(pIemCpu, cbInstr);
3924 return VINF_SUCCESS;
3925}
3926
3927
3928/**
3929 * Implements 'AAM'.
3930 *
3931 * @param bImm The immediate operand. Cannot be 0.
3932 */
3933IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
3934{
3935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3936 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
3937
3938 uint16_t const ax = pCtx->ax;
3939 uint8_t const al = (uint8_t)ax % bImm;
3940 uint8_t const ah = (uint8_t)ax / bImm;
3941 pCtx->ax = (ah << 8) + al;
3942 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3943 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3944 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3945
3946 iemRegAddToRip(pIemCpu, cbInstr);
3947 return VINF_SUCCESS;
3948}
3949
3950
3951
3952
3953/*
3954 * Instantiate the various string operation combinations.
3955 */
3956#define OP_SIZE 8
3957#define ADDR_SIZE 16
3958#include "IEMAllCImplStrInstr.cpp.h"
3959#define OP_SIZE 8
3960#define ADDR_SIZE 32
3961#include "IEMAllCImplStrInstr.cpp.h"
3962#define OP_SIZE 8
3963#define ADDR_SIZE 64
3964#include "IEMAllCImplStrInstr.cpp.h"
3965
3966#define OP_SIZE 16
3967#define ADDR_SIZE 16
3968#include "IEMAllCImplStrInstr.cpp.h"
3969#define OP_SIZE 16
3970#define ADDR_SIZE 32
3971#include "IEMAllCImplStrInstr.cpp.h"
3972#define OP_SIZE 16
3973#define ADDR_SIZE 64
3974#include "IEMAllCImplStrInstr.cpp.h"
3975
3976#define OP_SIZE 32
3977#define ADDR_SIZE 16
3978#include "IEMAllCImplStrInstr.cpp.h"
3979#define OP_SIZE 32
3980#define ADDR_SIZE 32
3981#include "IEMAllCImplStrInstr.cpp.h"
3982#define OP_SIZE 32
3983#define ADDR_SIZE 64
3984#include "IEMAllCImplStrInstr.cpp.h"
3985
3986#define OP_SIZE 64
3987#define ADDR_SIZE 32
3988#include "IEMAllCImplStrInstr.cpp.h"
3989#define OP_SIZE 64
3990#define ADDR_SIZE 64
3991#include "IEMAllCImplStrInstr.cpp.h"
3992
3993
3994/**
3995 * Implements 'FINIT' and 'FNINIT'.
3996 *
3997 * @param fCheckXcpts Whether to check for umasked pending exceptions or
3998 * not.
3999 */
4000IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
4001{
4002 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4003
4004 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4005 return iemRaiseDeviceNotAvailable(pIemCpu);
4006
4007 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
4008 if (fCheckXcpts && TODO )
4009 return iemRaiseMathFault(pIemCpu);
4010 */
4011
4012 if (iemFRegIsFxSaveFormat(pIemCpu))
4013 {
4014 pCtx->fpu.FCW = 0x37f;
4015 pCtx->fpu.FSW = 0;
4016 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
4017 pCtx->fpu.FPUDP = 0;
4018 pCtx->fpu.DS = 0; //??
4019 pCtx->fpu.FPUIP = 0;
4020 pCtx->fpu.CS = 0; //??
4021 pCtx->fpu.FOP = 0;
4022 }
4023 else
4024 {
4025 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
4026 pFpu->FCW = 0x37f;
4027 pFpu->FSW = 0;
4028 pFpu->FTW = 0xffff; /* 11 - empty */
4029 pFpu->FPUOO = 0; //??
4030 pFpu->FPUOS = 0; //??
4031 pFpu->FPUIP = 0;
4032 pFpu->CS = 0; //??
4033 pFpu->FOP = 0;
4034 }
4035
4036 iemRegAddToRip(pIemCpu, cbInstr);
4037 return VINF_SUCCESS;
4038}
4039
4040
4041/**
4042 * Implements 'FXSAVE'.
4043 *
4044 * @param iEffSeg The effective segment.
4045 * @param GCPtrEff The address of the image.
4046 * @param enmEffOpSize The operand size (only REX.W really matters).
4047 */
4048IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4049{
4050 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4051
4052 /*
4053 * Raise exceptions.
4054 */
4055 if (pCtx->cr0 & X86_CR0_EM)
4056 return iemRaiseUndefinedOpcode(pIemCpu);
4057 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4058 return iemRaiseDeviceNotAvailable(pIemCpu);
4059 if (GCPtrEff & 15)
4060 {
4061 /** @todo CPU/VM detection possible! \#AC might not be signal for
4062 * all/any misalignment sizes, intel says its an implementation detail. */
4063 if ( (pCtx->cr0 & X86_CR0_AM)
4064 && pCtx->eflags.Bits.u1AC
4065 && pIemCpu->uCpl == 3)
4066 return iemRaiseAlignmentCheckException(pIemCpu);
4067 return iemRaiseGeneralProtectionFault0(pIemCpu);
4068 }
4069 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4070
4071 /*
4072 * Access the memory.
4073 */
4074 void *pvMem512;
4075 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4076 if (rcStrict != VINF_SUCCESS)
4077 return rcStrict;
4078 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
4079
4080 /*
4081 * Store the registers.
4082 */
4083 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4084 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
4085
4086 /* common for all formats */
4087 pDst->FCW = pCtx->fpu.FCW;
4088 pDst->FSW = pCtx->fpu.FSW;
4089 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
4090 pDst->FOP = pCtx->fpu.FOP;
4091 pDst->MXCSR = pCtx->fpu.MXCSR;
4092 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
4093 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
4094 {
4095 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
4096 * them for now... */
4097 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4098 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4099 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
4100 pDst->aRegs[i].au32[3] = 0;
4101 }
4102
4103 /* FPU IP, CS, DP and DS. */
4104 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
4105 * state information. :-/
4106 * Storing zeros now to prevent any potential leakage of host info. */
4107 pDst->FPUIP = 0;
4108 pDst->CS = 0;
4109 pDst->Rsrvd1 = 0;
4110 pDst->FPUDP = 0;
4111 pDst->DS = 0;
4112 pDst->Rsrvd2 = 0;
4113
4114 /* XMM registers. */
4115 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4116 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4117 || pIemCpu->uCpl != 0)
4118 {
4119 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4120 for (uint32_t i = 0; i < cXmmRegs; i++)
4121 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
4122 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
4123 * right? */
4124 }
4125
4126 /*
4127 * Commit the memory.
4128 */
4129 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4130 if (rcStrict != VINF_SUCCESS)
4131 return rcStrict;
4132
4133 iemRegAddToRip(pIemCpu, cbInstr);
4134 return VINF_SUCCESS;
4135}
4136
4137
4138/**
4139 * Implements 'FXRSTOR'.
4140 *
4141 * @param GCPtrEff The address of the image.
4142 * @param enmEffOpSize The operand size (only REX.W really matters).
4143 */
4144IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4145{
4146 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4147
4148 /*
4149 * Raise exceptions.
4150 */
4151 if (pCtx->cr0 & X86_CR0_EM)
4152 return iemRaiseUndefinedOpcode(pIemCpu);
4153 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4154 return iemRaiseDeviceNotAvailable(pIemCpu);
4155 if (GCPtrEff & 15)
4156 {
4157 /** @todo CPU/VM detection possible! \#AC might not be signal for
4158 * all/any misalignment sizes, intel says its an implementation detail. */
4159 if ( (pCtx->cr0 & X86_CR0_AM)
4160 && pCtx->eflags.Bits.u1AC
4161 && pIemCpu->uCpl == 3)
4162 return iemRaiseAlignmentCheckException(pIemCpu);
4163 return iemRaiseGeneralProtectionFault0(pIemCpu);
4164 }
4165 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4166
4167 /*
4168 * Access the memory.
4169 */
4170 void *pvMem512;
4171 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
4172 if (rcStrict != VINF_SUCCESS)
4173 return rcStrict;
4174 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
4175
4176 /*
4177 * Check the state for stuff which will GP(0).
4178 */
4179 uint32_t const fMXCSR = pSrc->MXCSR;
4180 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
4181 if (fMXCSR & ~fMXCSR_MASK)
4182 {
4183 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
4184 return iemRaiseGeneralProtectionFault0(pIemCpu);
4185 }
4186
4187 /*
4188 * Load the registers.
4189 */
4190 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4191 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4192
4193 /* common for all formats */
4194 pCtx->fpu.FCW = pSrc->FCW;
4195 pCtx->fpu.FSW = pSrc->FSW;
4196 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4197 pCtx->fpu.FOP = pSrc->FOP;
4198 pCtx->fpu.MXCSR = fMXCSR;
4199 /* (MXCSR_MASK is read-only) */
4200 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4201 {
4202 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4203 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4204 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4205 pCtx->fpu.aRegs[i].au32[3] = 0;
4206 }
4207
4208 /* FPU IP, CS, DP and DS. */
4209 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4210 {
4211 pCtx->fpu.FPUIP = pSrc->FPUIP;
4212 pCtx->fpu.CS = pSrc->CS;
4213 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4214 pCtx->fpu.FPUDP = pSrc->FPUDP;
4215 pCtx->fpu.DS = pSrc->DS;
4216 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4217 }
4218 else
4219 {
4220 pCtx->fpu.FPUIP = pSrc->FPUIP;
4221 pCtx->fpu.CS = pSrc->CS;
4222 pCtx->fpu.Rsrvd1 = 0;
4223 pCtx->fpu.FPUDP = pSrc->FPUDP;
4224 pCtx->fpu.DS = pSrc->DS;
4225 pCtx->fpu.Rsrvd2 = 0;
4226 }
4227
4228 /* XMM registers. */
4229 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4230 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4231 || pIemCpu->uCpl != 0)
4232 {
4233 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4234 for (uint32_t i = 0; i < cXmmRegs; i++)
4235 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4236 }
4237
4238 /*
4239 * Commit the memory.
4240 */
4241 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4242 if (rcStrict != VINF_SUCCESS)
4243 return rcStrict;
4244
4245 iemRegAddToRip(pIemCpu, cbInstr);
4246 return VINF_SUCCESS;
4247}
4248
4249
4250/**
4251 * Commmon routine for fnstenv and fnsave.
4252 *
4253 * @param uPtr Where to store the state.
4254 * @param pCtx The CPU context.
4255 */
4256static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4257{
4258 if (enmEffOpSize == IEMMODE_16BIT)
4259 {
4260 uPtr.pu16[0] = pCtx->fpu.FCW;
4261 uPtr.pu16[1] = pCtx->fpu.FSW;
4262 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4263 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4264 {
4265 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4266 * protected mode or long mode and we save it in real mode? And vice
4267 * versa? And with 32-bit operand size? I think CPU is storing the
4268 * effective address ((CS << 4) + IP) in the offset register and not
4269 * doing any address calculations here. */
4270 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4271 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4272 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4273 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4274 }
4275 else
4276 {
4277 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4278 uPtr.pu16[4] = pCtx->fpu.CS;
4279 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4280 uPtr.pu16[6] = pCtx->fpu.DS;
4281 }
4282 }
4283 else
4284 {
4285 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4286 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4287 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4288 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4289 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4290 {
4291 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4292 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4293 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4294 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4295 }
4296 else
4297 {
4298 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4299 uPtr.pu16[4*2] = pCtx->fpu.CS;
4300 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4301 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4302 uPtr.pu16[6*2] = pCtx->fpu.DS;
4303 }
4304 }
4305}
4306
4307
4308/**
4309 * Commmon routine for fnstenv and fnsave.
4310 *
4311 * @param uPtr Where to store the state.
4312 * @param pCtx The CPU context.
4313 */
4314static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4315{
4316 if (enmEffOpSize == IEMMODE_16BIT)
4317 {
4318 pCtx->fpu.FCW = uPtr.pu16[0];
4319 pCtx->fpu.FSW = uPtr.pu16[1];
4320 pCtx->fpu.FTW = uPtr.pu16[2];
4321 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4322 {
4323 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4324 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4325 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4326 pCtx->fpu.CS = 0;
4327 pCtx->fpu.DS = 0;
4328 }
4329 else
4330 {
4331 pCtx->fpu.FPUIP = uPtr.pu16[3];
4332 pCtx->fpu.CS = uPtr.pu16[4];
4333 pCtx->fpu.FPUDP = uPtr.pu16[5];
4334 pCtx->fpu.DS = uPtr.pu16[6];
4335 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4336 }
4337 }
4338 else
4339 {
4340 pCtx->fpu.FCW = uPtr.pu16[0*2];
4341 pCtx->fpu.FSW = uPtr.pu16[1*2];
4342 pCtx->fpu.FTW = uPtr.pu16[2*2];
4343 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4344 {
4345 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4346 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4347 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4348 pCtx->fpu.CS = 0;
4349 pCtx->fpu.DS = 0;
4350 }
4351 else
4352 {
4353 pCtx->fpu.FPUIP = uPtr.pu32[3];
4354 pCtx->fpu.CS = uPtr.pu16[4*2];
4355 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4356 pCtx->fpu.FPUDP = uPtr.pu32[5];
4357 pCtx->fpu.DS = uPtr.pu16[6*2];
4358 }
4359 }
4360
4361 /* Make adjustments. */
4362 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4363 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4364 iemFpuRecalcExceptionStatus(pCtx);
4365 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4366 * exceptions are pending after loading the saved state? */
4367}
4368
4369
4370/**
4371 * Implements 'FNSTENV'.
4372 *
4373 * @param enmEffOpSize The operand size (only REX.W really matters).
4374 * @param iEffSeg The effective segment register for @a GCPtrEff.
4375 * @param GCPtrEffDst The address of the image.
4376 */
4377IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4378{
4379 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4380 RTPTRUNION uPtr;
4381 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4382 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4383 if (rcStrict != VINF_SUCCESS)
4384 return rcStrict;
4385
4386 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4387
4388 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4389 if (rcStrict != VINF_SUCCESS)
4390 return rcStrict;
4391
4392 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4393 iemRegAddToRip(pIemCpu, cbInstr);
4394 return VINF_SUCCESS;
4395}
4396
4397
4398/**
4399 * Implements 'FLDENV'.
4400 *
4401 * @param enmEffOpSize The operand size (only REX.W really matters).
4402 * @param iEffSeg The effective segment register for @a GCPtrEff.
4403 * @param GCPtrEffSrc The address of the image.
4404 */
4405IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4406{
4407 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4408 RTCPTRUNION uPtr;
4409 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4410 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4411 if (rcStrict != VINF_SUCCESS)
4412 return rcStrict;
4413
4414 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4415
4416 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4417 if (rcStrict != VINF_SUCCESS)
4418 return rcStrict;
4419
4420 iemRegAddToRip(pIemCpu, cbInstr);
4421 return VINF_SUCCESS;
4422}
4423
4424
4425/**
4426 * Implements 'FLDCW'.
4427 *
4428 * @param u16Fcw The new FCW.
4429 */
4430IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4431{
4432 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4433
4434 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4435 /** @todo Testcase: Try see what happens when trying to set undefined bits
4436 * (other than 6 and 7). Currently ignoring them. */
4437 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4438 * according to FSW. (This is was is currently implemented.) */
4439 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4440 iemFpuRecalcExceptionStatus(pCtx);
4441
4442 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4443 iemRegAddToRip(pIemCpu, cbInstr);
4444 return VINF_SUCCESS;
4445}
4446
4447
4448
4449/**
4450 * Implements the underflow case of fxch.
4451 *
4452 * @param iStReg The other stack register.
4453 */
4454IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4455{
4456 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4457
4458 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4459 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4460 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4461
4462 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4463 * registers are read as QNaN and then exchanged. This could be
4464 * wrong... */
4465 if (pCtx->fpu.FCW & X86_FCW_IM)
4466 {
4467 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4468 {
4469 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4470 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4471 else
4472 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4473 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4474 }
4475 else
4476 {
4477 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4478 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4479 }
4480 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4481 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4482 }
4483 else
4484 {
4485 /* raise underflow exception, don't change anything. */
4486 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
4487 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4488 }
4489 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4490
4491 iemRegAddToRip(pIemCpu, cbInstr);
4492 return VINF_SUCCESS;
4493}
4494
4495
4496/**
4497 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
4498 *
4499 * @param cToAdd 1 or 7.
4500 */
4501IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
4502{
4503 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4504 Assert(iStReg < 8);
4505
4506 /*
4507 * Raise exceptions.
4508 */
4509 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4510 return iemRaiseDeviceNotAvailable(pIemCpu);
4511 uint16_t u16Fsw = pCtx->fpu.FSW;
4512 if (u16Fsw & X86_FSW_ES)
4513 return iemRaiseMathFault(pIemCpu);
4514
4515 /*
4516 * Check if any of the register accesses causes #SF + #IA.
4517 */
4518 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
4519 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4520 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
4521 {
4522 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
4523 pCtx->fpu.FSW &= ~X86_FSW_C1;
4524 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
4525 if ( !(u16Fsw & X86_FSW_IE)
4526 || (pCtx->fpu.FCW & X86_FCW_IM) )
4527 {
4528 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4529 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4530 }
4531 }
4532 else if (pCtx->fpu.FCW & X86_FCW_IM)
4533 {
4534 /* Masked underflow. */
4535 pCtx->fpu.FSW &= ~X86_FSW_C1;
4536 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4537 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4538 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
4539 }
4540 else
4541 {
4542 /* Raise underflow - don't touch EFLAGS or TOP. */
4543 pCtx->fpu.FSW &= ~X86_FSW_C1;
4544 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4545 fPop = false;
4546 }
4547
4548 /*
4549 * Pop if necessary.
4550 */
4551 if (fPop)
4552 {
4553 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
4554 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
4555 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
4556 }
4557
4558 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4559 iemRegAddToRip(pIemCpu, cbInstr);
4560 return VINF_SUCCESS;
4561}
4562
4563/** @} */
4564
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette