VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 39127

Last change on this file since 39127 was 39125, checked in by vboxsync, 13 years ago

IEM: Fixed some details in the AAM, SAHF and LAHF instruction emulation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 96.8 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 39125 2011-10-27 10:40:17Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
42 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
43 }
44 return VINF_SUCCESS;
45}
46
47
48#if 0
49/**
50 * Calculates the parity bit.
51 *
52 * @returns true if the bit is set, false if not.
53 * @param u8Result The least significant byte of the result.
54 */
55static bool iemHlpCalcParityFlag(uint8_t u8Result)
56{
57 /*
58 * Parity is set if the number of bits in the least significant byte of
59 * the result is even.
60 */
61 uint8_t cBits;
62 cBits = u8Result & 1; /* 0 */
63 u8Result >>= 1;
64 cBits += u8Result & 1;
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1; /* 4 */
71 u8Result >>= 1;
72 cBits += u8Result & 1;
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 return !(cBits & 1);
78}
79#endif /* not used */
80
81
82/**
83 * Updates the specified flags according to a 8-bit result.
84 *
85 * @param pIemCpu The.
86 * @param u8Result The result to set the flags according to.
87 * @param fToUpdate The flags to update.
88 * @param fUndefined The flags that are specified as undefined.
89 */
90static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
91{
92 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
93
94 uint32_t fEFlags = pCtx->eflags.u;
95 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
96 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
97 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
98}
99
100
101/** @} */
102
103/** @name C Implementations
104 * @{
105 */
106
107/**
108 * Implements a 16-bit popa.
109 */
110IEM_CIMPL_DEF_0(iemCImpl_popa_16)
111{
112 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
113 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
114 RTGCPTR GCPtrLast = GCPtrStart + 15;
115 VBOXSTRICTRC rcStrict;
116
117 /*
118 * The docs are a bit hard to comprehend here, but it looks like we wrap
119 * around in real mode as long as none of the individual "popa" crosses the
120 * end of the stack segment. In protected mode we check the whole access
121 * in one go. For efficiency, only do the word-by-word thing if we're in
122 * danger of wrapping around.
123 */
124 /** @todo do popa boundary / wrap-around checks. */
125 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
126 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
127 {
128 /* word-by-word */
129 RTUINT64U TmpRsp;
130 TmpRsp.u = pCtx->rsp;
131 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
132 if (rcStrict == VINF_SUCCESS)
133 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
134 if (rcStrict == VINF_SUCCESS)
135 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
136 if (rcStrict == VINF_SUCCESS)
137 {
138 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
139 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
140 }
141 if (rcStrict == VINF_SUCCESS)
142 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
143 if (rcStrict == VINF_SUCCESS)
144 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
145 if (rcStrict == VINF_SUCCESS)
146 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
147 if (rcStrict == VINF_SUCCESS)
148 {
149 pCtx->rsp = TmpRsp.u;
150 iemRegAddToRip(pIemCpu, cbInstr);
151 }
152 }
153 else
154 {
155 uint16_t const *pa16Mem = NULL;
156 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
157 if (rcStrict == VINF_SUCCESS)
158 {
159 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
160 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
161 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
162 /* skip sp */
163 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
164 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
165 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
166 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
167 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
168 if (rcStrict == VINF_SUCCESS)
169 {
170 iemRegAddToRsp(pCtx, 16);
171 iemRegAddToRip(pIemCpu, cbInstr);
172 }
173 }
174 }
175 return rcStrict;
176}
177
178
179/**
180 * Implements a 32-bit popa.
181 */
182IEM_CIMPL_DEF_0(iemCImpl_popa_32)
183{
184 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
185 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
186 RTGCPTR GCPtrLast = GCPtrStart + 31;
187 VBOXSTRICTRC rcStrict;
188
189 /*
190 * The docs are a bit hard to comprehend here, but it looks like we wrap
191 * around in real mode as long as none of the individual "popa" crosses the
192 * end of the stack segment. In protected mode we check the whole access
193 * in one go. For efficiency, only do the word-by-word thing if we're in
194 * danger of wrapping around.
195 */
196 /** @todo do popa boundary / wrap-around checks. */
197 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
198 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
199 {
200 /* word-by-word */
201 RTUINT64U TmpRsp;
202 TmpRsp.u = pCtx->rsp;
203 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
204 if (rcStrict == VINF_SUCCESS)
205 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
206 if (rcStrict == VINF_SUCCESS)
207 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
208 if (rcStrict == VINF_SUCCESS)
209 {
210 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
211 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
212 }
213 if (rcStrict == VINF_SUCCESS)
214 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
215 if (rcStrict == VINF_SUCCESS)
216 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
217 if (rcStrict == VINF_SUCCESS)
218 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
219 if (rcStrict == VINF_SUCCESS)
220 {
221#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
222 pCtx->rdi &= UINT32_MAX;
223 pCtx->rsi &= UINT32_MAX;
224 pCtx->rbp &= UINT32_MAX;
225 pCtx->rbx &= UINT32_MAX;
226 pCtx->rdx &= UINT32_MAX;
227 pCtx->rcx &= UINT32_MAX;
228 pCtx->rax &= UINT32_MAX;
229#endif
230 pCtx->rsp = TmpRsp.u;
231 iemRegAddToRip(pIemCpu, cbInstr);
232 }
233 }
234 else
235 {
236 uint32_t const *pa32Mem;
237 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
238 if (rcStrict == VINF_SUCCESS)
239 {
240 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
241 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
242 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
243 /* skip esp */
244 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
245 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
246 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
247 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
248 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
249 if (rcStrict == VINF_SUCCESS)
250 {
251 iemRegAddToRsp(pCtx, 32);
252 iemRegAddToRip(pIemCpu, cbInstr);
253 }
254 }
255 }
256 return rcStrict;
257}
258
259
260/**
261 * Implements a 16-bit pusha.
262 */
263IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
264{
265 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
266 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
267 RTGCPTR GCPtrBottom = GCPtrTop - 15;
268 VBOXSTRICTRC rcStrict;
269
270 /*
271 * The docs are a bit hard to comprehend here, but it looks like we wrap
272 * around in real mode as long as none of the individual "pushd" crosses the
273 * end of the stack segment. In protected mode we check the whole access
274 * in one go. For efficiency, only do the word-by-word thing if we're in
275 * danger of wrapping around.
276 */
277 /** @todo do pusha boundary / wrap-around checks. */
278 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
279 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
280 {
281 /* word-by-word */
282 RTUINT64U TmpRsp;
283 TmpRsp.u = pCtx->rsp;
284 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
285 if (rcStrict == VINF_SUCCESS)
286 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
287 if (rcStrict == VINF_SUCCESS)
288 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
289 if (rcStrict == VINF_SUCCESS)
290 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
291 if (rcStrict == VINF_SUCCESS)
292 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
293 if (rcStrict == VINF_SUCCESS)
294 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
295 if (rcStrict == VINF_SUCCESS)
296 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
297 if (rcStrict == VINF_SUCCESS)
298 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
299 if (rcStrict == VINF_SUCCESS)
300 {
301 pCtx->rsp = TmpRsp.u;
302 iemRegAddToRip(pIemCpu, cbInstr);
303 }
304 }
305 else
306 {
307 GCPtrBottom--;
308 uint16_t *pa16Mem = NULL;
309 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
310 if (rcStrict == VINF_SUCCESS)
311 {
312 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
313 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
314 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
315 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
316 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
317 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
318 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
319 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
320 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
321 if (rcStrict == VINF_SUCCESS)
322 {
323 iemRegSubFromRsp(pCtx, 16);
324 iemRegAddToRip(pIemCpu, cbInstr);
325 }
326 }
327 }
328 return rcStrict;
329}
330
331
332/**
333 * Implements a 32-bit pusha.
334 */
335IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
336{
337 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
338 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
339 RTGCPTR GCPtrBottom = GCPtrTop - 31;
340 VBOXSTRICTRC rcStrict;
341
342 /*
343 * The docs are a bit hard to comprehend here, but it looks like we wrap
344 * around in real mode as long as none of the individual "pusha" crosses the
345 * end of the stack segment. In protected mode we check the whole access
346 * in one go. For efficiency, only do the word-by-word thing if we're in
347 * danger of wrapping around.
348 */
349 /** @todo do pusha boundary / wrap-around checks. */
350 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
351 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
352 {
353 /* word-by-word */
354 RTUINT64U TmpRsp;
355 TmpRsp.u = pCtx->rsp;
356 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
357 if (rcStrict == VINF_SUCCESS)
358 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
359 if (rcStrict == VINF_SUCCESS)
360 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
361 if (rcStrict == VINF_SUCCESS)
362 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
363 if (rcStrict == VINF_SUCCESS)
364 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
365 if (rcStrict == VINF_SUCCESS)
366 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
367 if (rcStrict == VINF_SUCCESS)
368 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
369 if (rcStrict == VINF_SUCCESS)
370 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
371 if (rcStrict == VINF_SUCCESS)
372 {
373 pCtx->rsp = TmpRsp.u;
374 iemRegAddToRip(pIemCpu, cbInstr);
375 }
376 }
377 else
378 {
379 GCPtrBottom--;
380 uint32_t *pa32Mem;
381 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
382 if (rcStrict == VINF_SUCCESS)
383 {
384 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
385 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
386 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
387 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
388 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
389 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
390 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
391 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
392 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
393 if (rcStrict == VINF_SUCCESS)
394 {
395 iemRegSubFromRsp(pCtx, 32);
396 iemRegAddToRip(pIemCpu, cbInstr);
397 }
398 }
399 }
400 return rcStrict;
401}
402
403
404/**
405 * Implements pushf.
406 *
407 *
408 * @param enmEffOpSize The effective operand size.
409 */
410IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
411{
412 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
413
414 /*
415 * If we're in V8086 mode some care is required (which is why we're in
416 * doing this in a C implementation).
417 */
418 uint32_t fEfl = pCtx->eflags.u;
419 if ( (fEfl & X86_EFL_VM)
420 && X86_EFL_GET_IOPL(fEfl) != 3 )
421 {
422 Assert(pCtx->cr0 & X86_CR0_PE);
423 if ( enmEffOpSize != IEMMODE_16BIT
424 || !(pCtx->cr4 & X86_CR4_VME))
425 return iemRaiseGeneralProtectionFault0(pIemCpu);
426 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
427 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
428 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
429 }
430
431 /*
432 * Ok, clear RF and VM and push the flags.
433 */
434 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
435
436 VBOXSTRICTRC rcStrict;
437 switch (enmEffOpSize)
438 {
439 case IEMMODE_16BIT:
440 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
441 break;
442 case IEMMODE_32BIT:
443 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
444 break;
445 case IEMMODE_64BIT:
446 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
447 break;
448 IEM_NOT_REACHED_DEFAULT_CASE_RET();
449 }
450 if (rcStrict != VINF_SUCCESS)
451 return rcStrict;
452
453 iemRegAddToRip(pIemCpu, cbInstr);
454 return VINF_SUCCESS;
455}
456
457
458/**
459 * Implements popf.
460 *
461 * @param enmEffOpSize The effective operand size.
462 */
463IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
464{
465 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
466 uint32_t const fEflOld = pCtx->eflags.u;
467 VBOXSTRICTRC rcStrict;
468 uint32_t fEflNew;
469
470 /*
471 * V8086 is special as usual.
472 */
473 if (fEflOld & X86_EFL_VM)
474 {
475 /*
476 * Almost anything goes if IOPL is 3.
477 */
478 if (X86_EFL_GET_IOPL(fEflOld) == 3)
479 {
480 switch (enmEffOpSize)
481 {
482 case IEMMODE_16BIT:
483 {
484 uint16_t u16Value;
485 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
486 if (rcStrict != VINF_SUCCESS)
487 return rcStrict;
488 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
489 break;
490 }
491 case IEMMODE_32BIT:
492 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
493 if (rcStrict != VINF_SUCCESS)
494 return rcStrict;
495 break;
496 IEM_NOT_REACHED_DEFAULT_CASE_RET();
497 }
498
499 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
500 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
501 }
502 /*
503 * Interrupt flag virtualization with CR4.VME=1.
504 */
505 else if ( enmEffOpSize == IEMMODE_16BIT
506 && (pCtx->cr4 & X86_CR4_VME) )
507 {
508 uint16_t u16Value;
509 RTUINT64U TmpRsp;
510 TmpRsp.u = pCtx->rsp;
511 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
512 if (rcStrict != VINF_SUCCESS)
513 return rcStrict;
514
515 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
516 * or before? */
517 if ( ( (u16Value & X86_EFL_IF)
518 && (fEflOld & X86_EFL_VIP))
519 || (u16Value & X86_EFL_TF) )
520 return iemRaiseGeneralProtectionFault0(pIemCpu);
521
522 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
523 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
524 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
525 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
526
527 pCtx->rsp = TmpRsp.u;
528 }
529 else
530 return iemRaiseGeneralProtectionFault0(pIemCpu);
531
532 }
533 /*
534 * Not in V8086 mode.
535 */
536 else
537 {
538 /* Pop the flags. */
539 switch (enmEffOpSize)
540 {
541 case IEMMODE_16BIT:
542 {
543 uint16_t u16Value;
544 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
545 if (rcStrict != VINF_SUCCESS)
546 return rcStrict;
547 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
548 break;
549 }
550 case IEMMODE_32BIT:
551 case IEMMODE_64BIT:
552 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
553 if (rcStrict != VINF_SUCCESS)
554 return rcStrict;
555 break;
556 IEM_NOT_REACHED_DEFAULT_CASE_RET();
557 }
558
559 /* Merge them with the current flags. */
560 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
561 || pIemCpu->uCpl == 0)
562 {
563 fEflNew &= X86_EFL_POPF_BITS;
564 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
565 }
566 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
567 {
568 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
569 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
570 }
571 else
572 {
573 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
574 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
575 }
576 }
577
578 /*
579 * Commit the flags.
580 */
581 Assert(fEflNew & RT_BIT_32(1));
582 pCtx->eflags.u = fEflNew;
583 iemRegAddToRip(pIemCpu, cbInstr);
584
585 return VINF_SUCCESS;
586}
587
588
589/**
590 * Implements an indirect call.
591 *
592 * @param uNewPC The new program counter (RIP) value (loaded from the
593 * operand).
594 * @param enmEffOpSize The effective operand size.
595 */
596IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
597{
598 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
599 uint16_t uOldPC = pCtx->ip + cbInstr;
600 if (uNewPC > pCtx->csHid.u32Limit)
601 return iemRaiseGeneralProtectionFault0(pIemCpu);
602
603 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
604 if (rcStrict != VINF_SUCCESS)
605 return rcStrict;
606
607 pCtx->rip = uNewPC;
608 return VINF_SUCCESS;
609
610}
611
612
613/**
614 * Implements a 16-bit relative call.
615 *
616 * @param offDisp The displacment offset.
617 */
618IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
619{
620 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
621 uint16_t uOldPC = pCtx->ip + cbInstr;
622 uint16_t uNewPC = uOldPC + offDisp;
623 if (uNewPC > pCtx->csHid.u32Limit)
624 return iemRaiseGeneralProtectionFault0(pIemCpu);
625
626 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
627 if (rcStrict != VINF_SUCCESS)
628 return rcStrict;
629
630 pCtx->rip = uNewPC;
631 return VINF_SUCCESS;
632}
633
634
635/**
636 * Implements a 32-bit indirect call.
637 *
638 * @param uNewPC The new program counter (RIP) value (loaded from the
639 * operand).
640 * @param enmEffOpSize The effective operand size.
641 */
642IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
643{
644 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
645 uint32_t uOldPC = pCtx->eip + cbInstr;
646 if (uNewPC > pCtx->csHid.u32Limit)
647 return iemRaiseGeneralProtectionFault0(pIemCpu);
648
649 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
650 if (rcStrict != VINF_SUCCESS)
651 return rcStrict;
652
653 pCtx->rip = uNewPC;
654 return VINF_SUCCESS;
655
656}
657
658
659/**
660 * Implements a 32-bit relative call.
661 *
662 * @param offDisp The displacment offset.
663 */
664IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
665{
666 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
667 uint32_t uOldPC = pCtx->eip + cbInstr;
668 uint32_t uNewPC = uOldPC + offDisp;
669 if (uNewPC > pCtx->csHid.u32Limit)
670 return iemRaiseGeneralProtectionFault0(pIemCpu);
671
672 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
673 if (rcStrict != VINF_SUCCESS)
674 return rcStrict;
675
676 pCtx->rip = uNewPC;
677 return VINF_SUCCESS;
678}
679
680
681/**
682 * Implements a 64-bit indirect call.
683 *
684 * @param uNewPC The new program counter (RIP) value (loaded from the
685 * operand).
686 * @param enmEffOpSize The effective operand size.
687 */
688IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
689{
690 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
691 uint64_t uOldPC = pCtx->rip + cbInstr;
692 if (!IEM_IS_CANONICAL(uNewPC))
693 return iemRaiseGeneralProtectionFault0(pIemCpu);
694
695 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
696 if (rcStrict != VINF_SUCCESS)
697 return rcStrict;
698
699 pCtx->rip = uNewPC;
700 return VINF_SUCCESS;
701
702}
703
704
705/**
706 * Implements a 64-bit relative call.
707 *
708 * @param offDisp The displacment offset.
709 */
710IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
711{
712 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
713 uint64_t uOldPC = pCtx->rip + cbInstr;
714 uint64_t uNewPC = uOldPC + offDisp;
715 if (!IEM_IS_CANONICAL(uNewPC))
716 return iemRaiseNotCanonical(pIemCpu);
717
718 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
719 if (rcStrict != VINF_SUCCESS)
720 return rcStrict;
721
722 pCtx->rip = uNewPC;
723 return VINF_SUCCESS;
724}
725
726
727/**
728 * Implements far jumps.
729 *
730 * @param uSel The selector.
731 * @param offSeg The segment offset.
732 * @param enmEffOpSize The effective operand size.
733 */
734IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint32_t, offSeg, IEMMODE, enmEffOpSize)
735{
736 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
737 NOREF(cbInstr);
738
739 /*
740 * Real mode and V8086 mode are easy. The only snag seems to be that
741 * CS.limit doesn't change and the limit check is done against the current
742 * limit.
743 */
744 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
745 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
746 {
747 if (offSeg > pCtx->csHid.u32Limit)
748 return iemRaiseGeneralProtectionFault0(pIemCpu);
749
750 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
751 pCtx->rip = offSeg;
752 else
753 pCtx->rip = offSeg & UINT16_MAX;
754 pCtx->cs = uSel;
755 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
756 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
757 * PE. Check with VT-x and AMD-V. */
758#ifdef IEM_VERIFICATION_MODE
759 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
760#endif
761 return VINF_SUCCESS;
762 }
763
764 /*
765 * Protected mode. Need to parse the specified descriptor...
766 */
767 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
768 {
769 Log(("jmpf %04x:%08x -> invalid selector, #GP(0)\n", uSel, offSeg));
770 return iemRaiseGeneralProtectionFault0(pIemCpu);
771 }
772
773 /* Fetch the descriptor. */
774 IEMSELDESC Desc;
775 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
776 if (rcStrict != VINF_SUCCESS)
777 return rcStrict;
778
779 /* Is it there? */
780 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
781 {
782 Log(("jmpf %04x:%08x -> segment not present\n", uSel, offSeg));
783 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
784 }
785
786 /*
787 * Deal with it according to its type.
788 */
789 if (Desc.Legacy.Gen.u1DescType)
790 {
791 /* Only code segments. */
792 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
793 {
794 Log(("jmpf %04x:%08x -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
795 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
796 }
797
798 /* L vs D. */
799 if ( Desc.Legacy.Gen.u1Long
800 && Desc.Legacy.Gen.u1DefBig
801 && IEM_IS_LONG_MODE(pIemCpu))
802 {
803 Log(("jmpf %04x:%08x -> both L and D are set.\n", uSel, offSeg));
804 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
805 }
806
807 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
808 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
809 {
810 if (Desc.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
811 {
812 Log(("jmpf %04x:%08x -> DPL violation (conforming); DPL=%d CPL=%u\n",
813 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
814 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
815 }
816 }
817 else
818 {
819 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
820 {
821 Log(("jmpf %04x:%08x -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
822 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
823 }
824 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
825 {
826 Log(("jmpf %04x:%08x -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
827 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
828 }
829 }
830
831 /* Limit check. (Should alternatively check for non-canonical addresses
832 here, but that is ruled out by offSeg being 32-bit, right?) */
833 uint64_t u64Base;
834 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
835 if (Desc.Legacy.Gen.u1Granularity)
836 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
837 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
838 u64Base = 0;
839 else
840 {
841 if (offSeg > cbLimit)
842 {
843 Log(("jmpf %04x:%08x -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
844 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
845 }
846 u64Base = X86DESC_BASE(Desc.Legacy);
847 }
848
849 /*
850 * Ok, everything checked out fine. Now set the accessed bit before
851 * committing the result into CS, CSHID and RIP.
852 */
853 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
854 {
855 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
856 if (rcStrict != VINF_SUCCESS)
857 return rcStrict;
858#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
859 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
860#endif
861 }
862
863 /* commit */
864 pCtx->rip = offSeg;
865 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
866 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
867 pCtx->csHid.Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff);
868 pCtx->csHid.u32Limit = cbLimit;
869 pCtx->csHid.u64Base = u64Base;
870 /** @todo check if the hidden bits are loaded correctly for 64-bit
871 * mode. */
872 return VINF_SUCCESS;
873 }
874
875 /*
876 * System selector.
877 */
878 if (IEM_IS_LONG_MODE(pIemCpu))
879 switch (Desc.Legacy.Gen.u4Type)
880 {
881 case AMD64_SEL_TYPE_SYS_LDT:
882 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
883 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
884 case AMD64_SEL_TYPE_SYS_CALL_GATE:
885 case AMD64_SEL_TYPE_SYS_INT_GATE:
886 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
887 /* Call various functions to do the work. */
888 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
889 default:
890 Log(("jmpf %04x:%08x -> wrong sys selector (64-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
891 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
892
893 }
894 switch (Desc.Legacy.Gen.u4Type)
895 {
896 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
897 case X86_SEL_TYPE_SYS_LDT:
898 case X86_SEL_TYPE_SYS_286_CALL_GATE:
899 case X86_SEL_TYPE_SYS_TASK_GATE:
900 case X86_SEL_TYPE_SYS_286_INT_GATE:
901 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
902 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
903 case X86_SEL_TYPE_SYS_386_CALL_GATE:
904 case X86_SEL_TYPE_SYS_386_INT_GATE:
905 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
906 /* Call various functions to do the work. */
907 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
908
909 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
910 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
911 /* Call various functions to do the work. */
912 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
913
914 default:
915 Log(("jmpf %04x:%08x -> wrong sys selector (32-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
916 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
917 }
918}
919
920
921/**
922 * Implements far calls.
923 *
924 * @param uSel The selector.
925 * @param offSeg The segment offset.
926 * @param enmOpSize The operand size (in case we need it).
927 */
928IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize)
929{
930 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
931 VBOXSTRICTRC rcStrict;
932 uint64_t uNewRsp;
933 void *pvRet;
934
935 /*
936 * Real mode and V8086 mode are easy. The only snag seems to be that
937 * CS.limit doesn't change and the limit check is done against the current
938 * limit.
939 */
940 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
941 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
942 {
943 Assert(enmOpSize == IEMMODE_16BIT || enmOpSize == IEMMODE_32BIT);
944
945 /* Check stack first - may #SS(0). */
946 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmOpSize == IEMMODE_32BIT ? 6 : 4,
947 &pvRet, &uNewRsp);
948 if (rcStrict != VINF_SUCCESS)
949 return rcStrict;
950
951 /* Check the target address range. */
952 if (offSeg > UINT32_MAX)
953 return iemRaiseGeneralProtectionFault0(pIemCpu);
954
955 /* Everything is fine, push the return address. */
956 if (enmOpSize == IEMMODE_16BIT)
957 {
958 ((uint16_t *)pvRet)[0] = pCtx->ip + cbInstr;
959 ((uint16_t *)pvRet)[1] = pCtx->cs;
960 }
961 else
962 {
963 ((uint32_t *)pvRet)[0] = pCtx->eip + cbInstr;
964 ((uint16_t *)pvRet)[3] = pCtx->cs;
965 }
966 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pvRet, uNewRsp);
967 if (rcStrict != VINF_SUCCESS)
968 return rcStrict;
969
970 /* Branch. */
971 pCtx->rip = offSeg;
972 pCtx->cs = uSel;
973 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
974 /** @todo Does REM reset the accessed bit here to? (See on jmp far16
975 * after disabling PE.) Check with VT-x and AMD-V. */
976#ifdef IEM_VERIFICATION_MODE
977 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
978#endif
979 return VINF_SUCCESS;
980 }
981
982 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
983}
984
985
986/**
987 * Implements retf.
988 *
989 * @param enmEffOpSize The effective operand size.
990 * @param cbPop The amount of arguments to pop from the stack
991 * (bytes).
992 */
993IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
994{
995 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
996 VBOXSTRICTRC rcStrict;
997 uint64_t uNewRsp;
998 NOREF(cbInstr);
999
1000 /*
1001 * Real mode and V8086 mode are easy.
1002 */
1003 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1004 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1005 {
1006 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1007 uint16_t const *pu16Frame;
1008 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 8 : 4,
1009 (void const **)&pu16Frame, &uNewRsp);
1010 if (rcStrict != VINF_SUCCESS)
1011 return rcStrict;
1012 uint32_t uNewEip;
1013 uint16_t uNewCS;
1014 if (enmEffOpSize == IEMMODE_32BIT)
1015 {
1016 uNewCS = pu16Frame[2];
1017 uNewEip = RT_MAKE_U32(pu16Frame[0], pu16Frame[1]);
1018 }
1019 else
1020 {
1021 uNewCS = pu16Frame[1];
1022 uNewEip = pu16Frame[0];
1023 }
1024 /** @todo check how this is supposed to work if sp=0xfffe. */
1025
1026 /* Check the limit of the new EIP. */
1027 /** @todo Intel pseudo code only does the limit check for 16-bit
1028 * operands, AMD does not make any distinction. What is right? */
1029 if (uNewEip > pCtx->csHid.u32Limit)
1030 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1031
1032 /* commit the operation. */
1033 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1034 if (rcStrict != VINF_SUCCESS)
1035 return rcStrict;
1036 pCtx->rip = uNewEip;
1037 pCtx->cs = uNewCS;
1038 pCtx->csHid.u64Base = (uint32_t)uNewCS << 4;
1039 /** @todo do we load attribs and limit as well? */
1040 if (cbPop)
1041 iemRegAddToRsp(pCtx, cbPop);
1042 return VINF_SUCCESS;
1043 }
1044
1045 AssertFailed();
1046 return VERR_NOT_IMPLEMENTED;
1047}
1048
1049
1050/**
1051 * Implements retn.
1052 *
1053 * We're doing this in C because of the \#GP that might be raised if the popped
1054 * program counter is out of bounds.
1055 *
1056 * @param enmEffOpSize The effective operand size.
1057 * @param cbPop The amount of arguments to pop from the stack
1058 * (bytes).
1059 */
1060IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1061{
1062 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1063 NOREF(cbInstr);
1064
1065 /* Fetch the RSP from the stack. */
1066 VBOXSTRICTRC rcStrict;
1067 RTUINT64U NewRip;
1068 RTUINT64U NewRsp;
1069 NewRsp.u = pCtx->rsp;
1070 switch (enmEffOpSize)
1071 {
1072 case IEMMODE_16BIT:
1073 NewRip.u = 0;
1074 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1075 break;
1076 case IEMMODE_32BIT:
1077 NewRip.u = 0;
1078 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1079 break;
1080 case IEMMODE_64BIT:
1081 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1082 break;
1083 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1084 }
1085 if (rcStrict != VINF_SUCCESS)
1086 return rcStrict;
1087
1088 /* Check the new RSP before loading it. */
1089 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1090 * of it. The canonical test is performed here and for call. */
1091 if (enmEffOpSize != IEMMODE_64BIT)
1092 {
1093 if (NewRip.DWords.dw0 > pCtx->csHid.u32Limit)
1094 {
1095 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->csHid.u32Limit));
1096 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1097 }
1098 }
1099 else
1100 {
1101 if (!IEM_IS_CANONICAL(NewRip.u))
1102 {
1103 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1104 return iemRaiseNotCanonical(pIemCpu);
1105 }
1106 }
1107
1108 /* Commit it. */
1109 pCtx->rip = NewRip.u;
1110 pCtx->rsp = NewRsp.u;
1111 if (cbPop)
1112 iemRegAddToRsp(pCtx, cbPop);
1113
1114 return VINF_SUCCESS;
1115}
1116
1117
1118/**
1119 * Implements leave.
1120 *
1121 * We're doing this in C because messing with the stack registers is annoying
1122 * since they depends on SS attributes.
1123 *
1124 * @param enmEffOpSize The effective operand size.
1125 */
1126IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1127{
1128 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1129
1130 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1131 RTUINT64U NewRsp;
1132 if (pCtx->ssHid.Attr.n.u1Long)
1133 {
1134 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1135 NewRsp.u = pCtx->rsp;
1136 NewRsp.Words.w0 = pCtx->bp;
1137 }
1138 else if (pCtx->ssHid.Attr.n.u1DefBig)
1139 NewRsp.u = pCtx->ebp;
1140 else
1141 NewRsp.u = pCtx->rbp;
1142
1143 /* Pop RBP according to the operand size. */
1144 VBOXSTRICTRC rcStrict;
1145 RTUINT64U NewRbp;
1146 switch (enmEffOpSize)
1147 {
1148 case IEMMODE_16BIT:
1149 NewRbp.u = pCtx->rbp;
1150 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1151 break;
1152 case IEMMODE_32BIT:
1153 NewRbp.u = 0;
1154 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1155 break;
1156 case IEMMODE_64BIT:
1157 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1158 break;
1159 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1160 }
1161 if (rcStrict != VINF_SUCCESS)
1162 return rcStrict;
1163
1164
1165 /* Commit it. */
1166 pCtx->rbp = NewRbp.u;
1167 pCtx->rsp = NewRsp.u;
1168 iemRegAddToRip(pIemCpu, cbInstr);
1169
1170 return VINF_SUCCESS;
1171}
1172
1173
1174/**
1175 * Implements int3 and int XX.
1176 *
1177 * @param u8Int The interrupt vector number.
1178 * @param fIsBpInstr Is it the breakpoint instruction.
1179 */
1180IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1181{
1182 Assert(pIemCpu->cXcptRecursions == 0);
1183 return iemRaiseXcptOrInt(pIemCpu,
1184 cbInstr,
1185 u8Int,
1186 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1187 0,
1188 0);
1189}
1190
1191
1192/**
1193 * Implements iret for real mode and V8086 mode.
1194 *
1195 * @param enmEffOpSize The effective operand size.
1196 */
1197IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1198{
1199 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1200 NOREF(cbInstr);
1201
1202 /*
1203 * iret throws an exception if VME isn't enabled.
1204 */
1205 if ( pCtx->eflags.Bits.u1VM
1206 && !(pCtx->cr4 & X86_CR4_VME))
1207 return iemRaiseGeneralProtectionFault0(pIemCpu);
1208
1209 /*
1210 * Do the stack bits, but don't commit RSP before everything checks
1211 * out right.
1212 */
1213 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1214 VBOXSTRICTRC rcStrict;
1215 RTCPTRUNION uFrame;
1216 uint16_t uNewCS;
1217 uint32_t uNewEip;
1218 uint32_t uNewFlags;
1219 uint64_t uNewRsp;
1220 if (enmEffOpSize == IEMMODE_32BIT)
1221 {
1222 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1223 if (rcStrict != VINF_SUCCESS)
1224 return rcStrict;
1225 uNewEip = uFrame.pu32[0];
1226 uNewCS = (uint16_t)uFrame.pu32[1];
1227 uNewFlags = uFrame.pu32[2];
1228 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1229 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1230 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1231 | X86_EFL_ID;
1232 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1233 }
1234 else
1235 {
1236 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1237 if (rcStrict != VINF_SUCCESS)
1238 return rcStrict;
1239 uNewEip = uFrame.pu16[0];
1240 uNewCS = uFrame.pu16[1];
1241 uNewFlags = uFrame.pu16[2];
1242 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1243 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1244 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1245 /** @todo The intel pseudo code does not indicate what happens to
1246 * reserved flags. We just ignore them. */
1247 }
1248 /** @todo Check how this is supposed to work if sp=0xfffe. */
1249
1250 /*
1251 * Check the limit of the new EIP.
1252 */
1253 /** @todo Only the AMD pseudo code check the limit here, what's
1254 * right? */
1255 if (uNewEip > pCtx->csHid.u32Limit)
1256 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1257
1258 /*
1259 * V8086 checks and flag adjustments
1260 */
1261 if (pCtx->eflags.Bits.u1VM)
1262 {
1263 if (pCtx->eflags.Bits.u2IOPL == 3)
1264 {
1265 /* Preserve IOPL and clear RF. */
1266 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1267 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1268 }
1269 else if ( enmEffOpSize == IEMMODE_16BIT
1270 && ( !(uNewFlags & X86_EFL_IF)
1271 || !pCtx->eflags.Bits.u1VIP )
1272 && !(uNewFlags & X86_EFL_TF) )
1273 {
1274 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1275 uNewFlags &= ~X86_EFL_VIF;
1276 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1277 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1278 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1279 }
1280 else
1281 return iemRaiseGeneralProtectionFault0(pIemCpu);
1282 }
1283
1284 /*
1285 * Commit the operation.
1286 */
1287 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1288 if (rcStrict != VINF_SUCCESS)
1289 return rcStrict;
1290 pCtx->rip = uNewEip;
1291 pCtx->cs = uNewCS;
1292 pCtx->csHid.u64Base = (uint32_t)uNewCS << 4;
1293 /** @todo do we load attribs and limit as well? */
1294 Assert(uNewFlags & X86_EFL_1);
1295 pCtx->eflags.u = uNewFlags;
1296
1297 return VINF_SUCCESS;
1298}
1299
1300
1301/**
1302 * Implements iret for protected mode
1303 *
1304 * @param enmEffOpSize The effective operand size.
1305 */
1306IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
1307{
1308 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1309 NOREF(cbInstr);
1310
1311 /*
1312 * Nested task return.
1313 */
1314 if (pCtx->eflags.Bits.u1NT)
1315 {
1316 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1317 }
1318 /*
1319 * Normal return.
1320 */
1321 else
1322 {
1323 /*
1324 * Do the stack bits, but don't commit RSP before everything checks
1325 * out right.
1326 */
1327 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1328 VBOXSTRICTRC rcStrict;
1329 RTCPTRUNION uFrame;
1330 uint16_t uNewCS;
1331 uint32_t uNewEip;
1332 uint32_t uNewFlags;
1333 uint64_t uNewRsp;
1334 if (enmEffOpSize == IEMMODE_32BIT)
1335 {
1336 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1337 if (rcStrict != VINF_SUCCESS)
1338 return rcStrict;
1339 uNewEip = uFrame.pu32[0];
1340 uNewCS = (uint16_t)uFrame.pu32[1];
1341 uNewFlags = uFrame.pu32[2];
1342 }
1343 else
1344 {
1345 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1346 if (rcStrict != VINF_SUCCESS)
1347 return rcStrict;
1348 uNewEip = uFrame.pu16[0];
1349 uNewCS = uFrame.pu16[1];
1350 uNewFlags = uFrame.pu16[2];
1351 }
1352 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
1353 if (rcStrict != VINF_SUCCESS)
1354 return rcStrict;
1355
1356 /*
1357 * What are we returning to?
1358 */
1359 if ( (uNewFlags & X86_EFL_VM)
1360 && pIemCpu->uCpl == 0)
1361 {
1362 /* V8086 mode! */
1363 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1364 }
1365 else
1366 {
1367 /*
1368 * Protected mode.
1369 */
1370 /* Read the CS descriptor. */
1371 if (!(uNewCS & (X86_SEL_MASK | X86_SEL_LDT)))
1372 {
1373 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCS, uNewEip));
1374 return iemRaiseGeneralProtectionFault0(pIemCpu);
1375 }
1376
1377 IEMSELDESC DescCS;
1378 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS);
1379 if (rcStrict != VINF_SUCCESS)
1380 return rcStrict;
1381
1382 /* Must be a code descriptor. */
1383 if (!DescCS.Legacy.Gen.u1DescType)
1384 {
1385 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCS, uNewEip, DescCS.Legacy.Gen.u4Type));
1386 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1387 }
1388 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1389 {
1390 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCS, uNewEip, DescCS.Legacy.Gen.u4Type));
1391 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1392 }
1393
1394 /* Privilege checks. */
1395 if ((uNewCS & X86_SEL_RPL) < pIemCpu->uCpl)
1396 {
1397 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCS, uNewEip, pIemCpu->uCpl));
1398 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1399 }
1400 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1401 && (uNewCS & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
1402 {
1403 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCS, uNewEip, DescCS.Legacy.Gen.u2Dpl));
1404 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1405 }
1406
1407 /* Present? */
1408 if (!DescCS.Legacy.Gen.u1Present)
1409 {
1410 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCS, uNewEip));
1411 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCS);
1412 }
1413
1414 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
1415 if (DescCS.Legacy.Gen.u1Granularity)
1416 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1417
1418 /*
1419 * Different level?
1420 */
1421 if ((uNewCS & X86_SEL_RPL) != pIemCpu->uCpl)
1422 {
1423 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1424 }
1425 /*
1426 * Same level.
1427 */
1428 else
1429 {
1430 /* Check EIP. */
1431 if (uNewEip > cbLimitCS)
1432 {
1433 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewEip, cbLimitCS));
1434 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCS);
1435 }
1436
1437 /*
1438 * Commit the changes, marking CS first since it may fail.
1439 */
1440 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1441 {
1442 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1443 if (rcStrict != VINF_SUCCESS)
1444 return rcStrict;
1445 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1446 }
1447
1448 pCtx->rip = uNewEip;
1449 pCtx->cs = uNewCS;
1450 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
1451 pCtx->csHid.u32Limit = cbLimitCS;
1452 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
1453 pCtx->rsp = uNewRsp;
1454
1455 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1456 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
1457 if (enmEffOpSize != IEMMODE_16BIT)
1458 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
1459 if (pIemCpu->uCpl == 0)
1460 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
1461 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
1462 fEFlagsMask |= X86_EFL_IF;
1463 pCtx->eflags.u &= ~fEFlagsMask;
1464 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
1465 /* Done! */
1466 }
1467 }
1468 }
1469
1470 return VINF_SUCCESS;
1471}
1472
1473
1474/**
1475 * Implements iret for long mode
1476 *
1477 * @param enmEffOpSize The effective operand size.
1478 */
1479IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
1480{
1481 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1482 //VBOXSTRICTRC rcStrict;
1483 //uint64_t uNewRsp;
1484
1485 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
1486 return VERR_NOT_IMPLEMENTED;
1487}
1488
1489
1490/**
1491 * Implements iret.
1492 *
1493 * @param enmEffOpSize The effective operand size.
1494 */
1495IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
1496{
1497 /*
1498 * Call a mode specific worker.
1499 */
1500 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1501 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1502 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
1503 if (IEM_IS_LONG_MODE(pIemCpu))
1504 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
1505
1506 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
1507}
1508
1509
1510/**
1511 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
1512 *
1513 * @param iSegReg The segment register number (valid).
1514 * @param uSel The new selector value.
1515 */
1516IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
1517{
1518 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
1519 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
1520 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
1521
1522 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
1523
1524 /*
1525 * Real mode and V8086 mode are easy.
1526 */
1527 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1528 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1529 {
1530 *pSel = uSel;
1531 pHid->u64Base = (uint32_t)uSel << 4;
1532 /** @todo Does the CPU actually load limits and attributes in the
1533 * real/V8086 mode segment load case? It doesn't for CS in far
1534 * jumps... Affects unreal mode. */
1535 pHid->u32Limit = 0xffff;
1536 pHid->Attr.u = 0;
1537 pHid->Attr.n.u1Present = 1;
1538 pHid->Attr.n.u1DescType = 1;
1539 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
1540 ? X86_SEL_TYPE_RW
1541 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
1542
1543 iemRegAddToRip(pIemCpu, cbInstr);
1544 return VINF_SUCCESS;
1545 }
1546
1547 /*
1548 * Protected mode.
1549 *
1550 * Check if it's a null segment selector value first, that's OK for DS, ES,
1551 * FS and GS. If not null, then we have to load and parse the descriptor.
1552 */
1553 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
1554 {
1555 if (iSegReg == X86_SREG_SS)
1556 {
1557 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
1558 || pIemCpu->uCpl != 0
1559 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
1560 {
1561 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
1562 return iemRaiseGeneralProtectionFault0(pIemCpu);
1563 }
1564
1565 /* In 64-bit kernel mode, the stack can be 0 because of the way
1566 interrupts are dispatched when in kernel ctx. Just load the
1567 selector value into the register and leave the hidden bits
1568 as is. */
1569 *pSel = uSel;
1570 iemRegAddToRip(pIemCpu, cbInstr);
1571 return VINF_SUCCESS;
1572 }
1573
1574 *pSel = uSel; /* Not RPL, remember :-) */
1575 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
1576 && iSegReg != X86_SREG_FS
1577 && iSegReg != X86_SREG_GS)
1578 {
1579 /** @todo figure out what this actually does, it works. Needs
1580 * testcase! */
1581 pHid->Attr.u = 0;
1582 pHid->Attr.n.u1Present = 1;
1583 pHid->Attr.n.u1Long = 1;
1584 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
1585 pHid->Attr.n.u2Dpl = 3;
1586 pHid->u32Limit = 0;
1587 pHid->u64Base = 0;
1588 }
1589 else
1590 {
1591 pHid->Attr.u = 0;
1592 pHid->u32Limit = 0;
1593 pHid->u64Base = 0;
1594 }
1595 iemRegAddToRip(pIemCpu, cbInstr);
1596 return VINF_SUCCESS;
1597 }
1598
1599 /* Fetch the descriptor. */
1600 IEMSELDESC Desc;
1601 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1602 if (rcStrict != VINF_SUCCESS)
1603 return rcStrict;
1604
1605 /* Check GPs first. */
1606 if (!Desc.Legacy.Gen.u1DescType)
1607 {
1608 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
1609 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1610 }
1611 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
1612 {
1613 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1614 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1615 {
1616 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
1617 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1618 }
1619 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1620 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1621 {
1622 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
1623 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1624 }
1625 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
1626 {
1627 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
1628 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1629 }
1630 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
1631 {
1632 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1633 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1634 }
1635 }
1636 else
1637 {
1638 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
1639 {
1640 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
1641 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1642 }
1643 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1644 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1645 {
1646#if 0 /* this is what intel says. */
1647 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
1648 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
1649 {
1650 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
1651 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
1652 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1653 }
1654#else /* this is what makes more sense. */
1655 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
1656 {
1657 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
1658 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
1659 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1660 }
1661 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
1662 {
1663 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
1664 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
1665 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1666 }
1667#endif
1668 }
1669 }
1670
1671 /* Is it there? */
1672 if (!Desc.Legacy.Gen.u1Present)
1673 {
1674 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
1675 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1676 }
1677
1678 /* The the base and limit. */
1679 uint64_t u64Base;
1680 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
1681 if (Desc.Legacy.Gen.u1Granularity)
1682 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1683
1684 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
1685 && iSegReg < X86_SREG_FS)
1686 u64Base = 0;
1687 else
1688 u64Base = X86DESC_BASE(Desc.Legacy);
1689
1690 /*
1691 * Ok, everything checked out fine. Now set the accessed bit before
1692 * committing the result into the registers.
1693 */
1694 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1695 {
1696 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1697 if (rcStrict != VINF_SUCCESS)
1698 return rcStrict;
1699 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1700 }
1701
1702 /* commit */
1703 *pSel = uSel;
1704 pHid->Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff); /** @todo do we have a define for 0xf0ff? */
1705 pHid->u32Limit = cbLimit;
1706 pHid->u64Base = u64Base;
1707
1708 /** @todo check if the hidden bits are loaded correctly for 64-bit
1709 * mode. */
1710
1711 iemRegAddToRip(pIemCpu, cbInstr);
1712 return VINF_SUCCESS;
1713}
1714
1715
1716/**
1717 * Implements 'mov SReg, r/m'.
1718 *
1719 * @param iSegReg The segment register number (valid).
1720 * @param uSel The new selector value.
1721 */
1722IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
1723{
1724 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1725 if (rcStrict == VINF_SUCCESS)
1726 {
1727 if (iSegReg == X86_SREG_SS)
1728 {
1729 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1730 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
1731 }
1732 }
1733 return rcStrict;
1734}
1735
1736
1737/**
1738 * Implements 'pop SReg'.
1739 *
1740 * @param iSegReg The segment register number (valid).
1741 * @param enmEffOpSize The efficient operand size (valid).
1742 */
1743IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
1744{
1745 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1746 VBOXSTRICTRC rcStrict;
1747
1748 /*
1749 * Read the selector off the stack and join paths with mov ss, reg.
1750 */
1751 RTUINT64U TmpRsp;
1752 TmpRsp.u = pCtx->rsp;
1753 switch (enmEffOpSize)
1754 {
1755 case IEMMODE_16BIT:
1756 {
1757 uint16_t uSel;
1758 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
1759 if (rcStrict == VINF_SUCCESS)
1760 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1761 break;
1762 }
1763
1764 case IEMMODE_32BIT:
1765 {
1766 uint32_t u32Value;
1767 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
1768 if (rcStrict == VINF_SUCCESS)
1769 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
1770 break;
1771 }
1772
1773 case IEMMODE_64BIT:
1774 {
1775 uint64_t u64Value;
1776 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
1777 if (rcStrict == VINF_SUCCESS)
1778 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
1779 break;
1780 }
1781 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1782 }
1783
1784 /*
1785 * Commit the stack on success.
1786 */
1787 if (rcStrict == VINF_SUCCESS)
1788 {
1789 pCtx->rsp = TmpRsp.u;
1790 if (iSegReg == X86_SREG_SS)
1791 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
1792 }
1793 return rcStrict;
1794}
1795
1796
1797/**
1798 * Implements lgs, lfs, les, lds & lss.
1799 */
1800IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
1801 uint16_t, uSel,
1802 uint64_t, offSeg,
1803 uint8_t, iSegReg,
1804 uint8_t, iGReg,
1805 IEMMODE, enmEffOpSize)
1806{
1807 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
1808 VBOXSTRICTRC rcStrict;
1809
1810 /*
1811 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
1812 */
1813 /** @todo verify and test that mov, pop and lXs works the segment
1814 * register loading in the exact same way. */
1815 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1816 if (rcStrict == VINF_SUCCESS)
1817 {
1818 switch (enmEffOpSize)
1819 {
1820 case IEMMODE_16BIT:
1821 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1822 break;
1823 case IEMMODE_32BIT:
1824 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1825 break;
1826 case IEMMODE_64BIT:
1827 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1828 break;
1829 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1830 }
1831 }
1832
1833 return rcStrict;
1834}
1835
1836
1837/**
1838 * Implements lgdt.
1839 *
1840 * @param iEffSeg The segment of the new ldtr contents
1841 * @param GCPtrEffSrc The address of the new ldtr contents.
1842 * @param enmEffOpSize The effective operand size.
1843 */
1844IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
1845{
1846 if (pIemCpu->uCpl != 0)
1847 return iemRaiseGeneralProtectionFault0(pIemCpu);
1848 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
1849
1850 /*
1851 * Fetch the limit and base address.
1852 */
1853 uint16_t cbLimit;
1854 RTGCPTR GCPtrBase;
1855 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
1856 if (rcStrict == VINF_SUCCESS)
1857 {
1858 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1859 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
1860 else
1861 {
1862 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1863 pCtx->gdtr.cbGdt = cbLimit;
1864 pCtx->gdtr.pGdt = GCPtrBase;
1865 }
1866 if (rcStrict == VINF_SUCCESS)
1867 iemRegAddToRip(pIemCpu, cbInstr);
1868 }
1869 return rcStrict;
1870}
1871
1872
1873/**
1874 * Implements lidt.
1875 *
1876 * @param iEffSeg The segment of the new ldtr contents
1877 * @param GCPtrEffSrc The address of the new ldtr contents.
1878 * @param enmEffOpSize The effective operand size.
1879 */
1880IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
1881{
1882 if (pIemCpu->uCpl != 0)
1883 return iemRaiseGeneralProtectionFault0(pIemCpu);
1884 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
1885
1886 /*
1887 * Fetch the limit and base address.
1888 */
1889 uint16_t cbLimit;
1890 RTGCPTR GCPtrBase;
1891 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
1892 if (rcStrict == VINF_SUCCESS)
1893 {
1894 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1895 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
1896 else
1897 {
1898 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1899 pCtx->idtr.cbIdt = cbLimit;
1900 pCtx->idtr.pIdt = GCPtrBase;
1901 }
1902 if (rcStrict == VINF_SUCCESS)
1903 iemRegAddToRip(pIemCpu, cbInstr);
1904 }
1905 return rcStrict;
1906}
1907
1908
1909/**
1910 * Implements lldt.
1911 *
1912 * @param uNewLdt The new LDT selector value.
1913 */
1914IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
1915{
1916 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1917
1918 /*
1919 * Check preconditions.
1920 */
1921 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1922 {
1923 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
1924 return iemRaiseUndefinedOpcode(pIemCpu);
1925 }
1926 if (pIemCpu->uCpl != 0)
1927 {
1928 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
1929 return iemRaiseGeneralProtectionFault0(pIemCpu);
1930 }
1931 if (uNewLdt & X86_SEL_LDT)
1932 {
1933 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
1934 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
1935 }
1936
1937 /*
1938 * Now, loading a NULL selector is easy.
1939 */
1940 if ((uNewLdt & X86_SEL_MASK) == 0)
1941 {
1942 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
1943 /** @todo check if the actual value is loaded or if it's always 0. */
1944 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1945 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), 0);
1946 else
1947 pCtx->ldtr = 0;
1948 pCtx->ldtrHid.Attr.u = 0;
1949 pCtx->ldtrHid.u64Base = 0;
1950 pCtx->ldtrHid.u32Limit = 0;
1951
1952 iemRegAddToRip(pIemCpu, cbInstr);
1953 return VINF_SUCCESS;
1954 }
1955
1956 /*
1957 * Read the descriptor.
1958 */
1959 IEMSELDESC Desc;
1960 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
1961 if (rcStrict != VINF_SUCCESS)
1962 return rcStrict;
1963
1964 /* Check GPs first. */
1965 if (Desc.Legacy.Gen.u1DescType)
1966 {
1967 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
1968 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
1969 }
1970 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1971 {
1972 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
1973 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
1974 }
1975 uint64_t u64Base;
1976 if (!IEM_IS_LONG_MODE(pIemCpu))
1977 u64Base = X86DESC_BASE(Desc.Legacy);
1978 else
1979 {
1980 if (Desc.Long.Gen.u5Zeros)
1981 {
1982 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
1983 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
1984 }
1985
1986 u64Base = X86DESC64_BASE(Desc.Long);
1987 if (!IEM_IS_CANONICAL(u64Base))
1988 {
1989 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
1990 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
1991 }
1992 }
1993
1994 /* NP */
1995 if (!Desc.Legacy.Gen.u1Present)
1996 {
1997 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
1998 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
1999 }
2000
2001 /*
2002 * It checks out alright, update the registers.
2003 */
2004/** @todo check if the actual value is loaded or if the RPL is dropped */
2005 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2006 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK);
2007 else
2008 pCtx->ldtr = uNewLdt & X86_SEL_MASK;
2009 pCtx->ldtrHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2010 pCtx->ldtrHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2011 pCtx->ldtrHid.u64Base = u64Base;
2012
2013 iemRegAddToRip(pIemCpu, cbInstr);
2014 return VINF_SUCCESS;
2015}
2016
2017
2018/**
2019 * Implements lldt.
2020 *
2021 * @param uNewLdt The new LDT selector value.
2022 */
2023IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2024{
2025 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2026
2027 /*
2028 * Check preconditions.
2029 */
2030 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2031 {
2032 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2033 return iemRaiseUndefinedOpcode(pIemCpu);
2034 }
2035 if (pIemCpu->uCpl != 0)
2036 {
2037 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2038 return iemRaiseGeneralProtectionFault0(pIemCpu);
2039 }
2040 if (uNewTr & X86_SEL_LDT)
2041 {
2042 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2043 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
2044 }
2045 if ((uNewTr & X86_SEL_MASK) == 0)
2046 {
2047 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2048 return iemRaiseGeneralProtectionFault0(pIemCpu);
2049 }
2050
2051 /*
2052 * Read the descriptor.
2053 */
2054 IEMSELDESC Desc;
2055 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2056 if (rcStrict != VINF_SUCCESS)
2057 return rcStrict;
2058
2059 /* Check GPs first. */
2060 if (Desc.Legacy.Gen.u1DescType)
2061 {
2062 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2063 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2064 }
2065 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2066 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2067 || IEM_IS_LONG_MODE(pIemCpu)) )
2068 {
2069 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2070 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2071 }
2072 uint64_t u64Base;
2073 if (!IEM_IS_LONG_MODE(pIemCpu))
2074 u64Base = X86DESC_BASE(Desc.Legacy);
2075 else
2076 {
2077 if (Desc.Long.Gen.u5Zeros)
2078 {
2079 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2080 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2081 }
2082
2083 u64Base = X86DESC64_BASE(Desc.Long);
2084 if (!IEM_IS_CANONICAL(u64Base))
2085 {
2086 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2087 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2088 }
2089 }
2090
2091 /* NP */
2092 if (!Desc.Legacy.Gen.u1Present)
2093 {
2094 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
2095 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
2096 }
2097
2098 /*
2099 * Set it busy.
2100 * Note! Intel says this should lock down the whole descriptor, but we'll
2101 * restrict our selves to 32-bit for now due to lack of inline
2102 * assembly and such.
2103 */
2104 void *pvDesc;
2105 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2106 if (rcStrict != VINF_SUCCESS)
2107 return rcStrict;
2108 switch ((uintptr_t)pvDesc & 3)
2109 {
2110 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
2111 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
2112 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
2113 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
2114 }
2115 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2116 if (rcStrict != VINF_SUCCESS)
2117 return rcStrict;
2118 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2119
2120 /*
2121 * It checks out alright, update the registers.
2122 */
2123/** @todo check if the actual value is loaded or if the RPL is dropped */
2124 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2125 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK);
2126 else
2127 pCtx->tr = uNewTr & X86_SEL_MASK;
2128 pCtx->trHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2129 pCtx->trHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2130 pCtx->trHid.u64Base = u64Base;
2131
2132 iemRegAddToRip(pIemCpu, cbInstr);
2133 return VINF_SUCCESS;
2134}
2135
2136
2137/**
2138 * Implements mov GReg,CRx.
2139 *
2140 * @param iGReg The general register to store the CRx value in.
2141 * @param iCrReg The CRx register to read (valid).
2142 */
2143IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
2144{
2145 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2146 if (pIemCpu->uCpl != 0)
2147 return iemRaiseGeneralProtectionFault0(pIemCpu);
2148 Assert(!pCtx->eflags.Bits.u1VM);
2149
2150 /* read it */
2151 uint64_t crX;
2152 switch (iCrReg)
2153 {
2154 case 0: crX = pCtx->cr0; break;
2155 case 2: crX = pCtx->cr2; break;
2156 case 3: crX = pCtx->cr3; break;
2157 case 4: crX = pCtx->cr4; break;
2158 case 8:
2159 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2160 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
2161 else
2162 crX = 0xff;
2163 break;
2164 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2165 }
2166
2167 /* store it */
2168 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2169 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
2170 else
2171 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
2172
2173 iemRegAddToRip(pIemCpu, cbInstr);
2174 return VINF_SUCCESS;
2175}
2176
2177
2178/**
2179 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
2180 *
2181 * @param iCrReg The CRx register to write (valid).
2182 * @param uNewCrX The new value.
2183 */
2184IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
2185{
2186 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2187 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2188 VBOXSTRICTRC rcStrict;
2189 int rc;
2190
2191 /*
2192 * Try store it.
2193 * Unfortunately, CPUM only does a tiny bit of the work.
2194 */
2195 switch (iCrReg)
2196 {
2197 case 0:
2198 {
2199 /*
2200 * Perform checks.
2201 */
2202 uint64_t const uOldCrX = pCtx->cr0;
2203 uNewCrX |= X86_CR0_ET; /* hardcoded */
2204
2205 /* Check for reserved bits. */
2206 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
2207 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
2208 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
2209 if (uNewCrX & ~(uint64_t)fValid)
2210 {
2211 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2212 return iemRaiseGeneralProtectionFault0(pIemCpu);
2213 }
2214
2215 /* Check for invalid combinations. */
2216 if ( (uNewCrX & X86_CR0_PG)
2217 && !(uNewCrX & X86_CR0_PE) )
2218 {
2219 Log(("Trying to set CR0.PG without CR0.PE\n"));
2220 return iemRaiseGeneralProtectionFault0(pIemCpu);
2221 }
2222
2223 if ( !(uNewCrX & X86_CR0_CD)
2224 && (uNewCrX & X86_CR0_NW) )
2225 {
2226 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
2227 return iemRaiseGeneralProtectionFault0(pIemCpu);
2228 }
2229
2230 /* Long mode consistency checks. */
2231 if ( (uNewCrX & X86_CR0_PG)
2232 && !(uOldCrX & X86_CR0_PG)
2233 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2234 {
2235 if (!(pCtx->cr4 & X86_CR4_PAE))
2236 {
2237 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
2238 return iemRaiseGeneralProtectionFault0(pIemCpu);
2239 }
2240 if (pCtx->csHid.Attr.n.u1Long)
2241 {
2242 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
2243 return iemRaiseGeneralProtectionFault0(pIemCpu);
2244 }
2245 }
2246
2247 /** @todo check reserved PDPTR bits as AMD states. */
2248
2249 /*
2250 * Change CR0.
2251 */
2252 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2253 {
2254 rc = CPUMSetGuestCR0(pVCpu, uNewCrX);
2255 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
2256 }
2257 else
2258 pCtx->cr0 = uNewCrX;
2259 Assert(pCtx->cr0 == uNewCrX);
2260
2261 /*
2262 * Change EFER.LMA if entering or leaving long mode.
2263 */
2264 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
2265 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2266 {
2267 uint64_t NewEFER = pCtx->msrEFER;
2268 if (uNewCrX & X86_CR0_PG)
2269 NewEFER |= MSR_K6_EFER_LME;
2270 else
2271 NewEFER &= ~MSR_K6_EFER_LME;
2272
2273 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2274 CPUMSetGuestEFER(pVCpu, NewEFER);
2275 else
2276 pCtx->msrEFER = NewEFER;
2277 Assert(pCtx->msrEFER == NewEFER);
2278 }
2279
2280 /*
2281 * Inform PGM.
2282 */
2283 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2284 {
2285 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
2286 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
2287 {
2288 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
2289 AssertRCReturn(rc, rc);
2290 /* ignore informational status codes */
2291 }
2292 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2293 /** @todo Status code management. */
2294 }
2295 else
2296 rcStrict = VINF_SUCCESS;
2297 break;
2298 }
2299
2300 /*
2301 * CR2 can be changed without any restrictions.
2302 */
2303 case 2:
2304 pCtx->cr2 = uNewCrX;
2305 rcStrict = VINF_SUCCESS;
2306 break;
2307
2308 /*
2309 * CR3 is relatively simple, although AMD and Intel have different
2310 * accounts of how setting reserved bits are handled. We take intel's
2311 * word for the lower bits and AMD's for the high bits (63:52).
2312 */
2313 /** @todo Testcase: Setting reserved bits in CR3, especially before
2314 * enabling paging. */
2315 case 3:
2316 {
2317 /* check / mask the value. */
2318 if (uNewCrX & UINT64_C(0xfff0000000000000))
2319 {
2320 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
2321 return iemRaiseGeneralProtectionFault0(pIemCpu);
2322 }
2323
2324 uint64_t fValid;
2325 if ( (pCtx->cr4 & X86_CR4_PAE)
2326 && (pCtx->msrEFER & MSR_K6_EFER_LME))
2327 fValid = UINT64_C(0x000ffffffffff014);
2328 else if (pCtx->cr4 & X86_CR4_PAE)
2329 fValid = UINT64_C(0xfffffff4);
2330 else
2331 fValid = UINT64_C(0xfffff014);
2332 if (uNewCrX & ~fValid)
2333 {
2334 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
2335 uNewCrX, uNewCrX & ~fValid));
2336 uNewCrX &= fValid;
2337 }
2338
2339 /** @todo If we're in PAE mode we should check the PDPTRs for
2340 * invalid bits. */
2341
2342 /* Make the change. */
2343 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2344 {
2345 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
2346 AssertRCSuccessReturn(rc, rc);
2347 }
2348 else
2349 pCtx->cr3 = uNewCrX;
2350
2351 /* Inform PGM. */
2352 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2353 {
2354 if (pCtx->cr0 & X86_CR0_PG)
2355 {
2356 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
2357 AssertRCReturn(rc, rc);
2358 /* ignore informational status codes */
2359 /** @todo status code management */
2360 }
2361 }
2362 rcStrict = VINF_SUCCESS;
2363 break;
2364 }
2365
2366 /*
2367 * CR4 is a bit more tedious as there are bits which cannot be cleared
2368 * under some circumstances and such.
2369 */
2370 case 4:
2371 {
2372 uint64_t const uOldCrX = pCtx->cr0;
2373
2374 /* reserved bits */
2375 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
2376 | X86_CR4_TSD | X86_CR4_DE
2377 | X86_CR4_PSE | X86_CR4_PAE
2378 | X86_CR4_MCE | X86_CR4_PGE
2379 | X86_CR4_PCE | X86_CR4_OSFSXR
2380 | X86_CR4_OSXMMEEXCPT;
2381 //if (xxx)
2382 // fValid |= X86_CR4_VMXE;
2383 //if (xxx)
2384 // fValid |= X86_CR4_OSXSAVE;
2385 if (uNewCrX & ~(uint64_t)fValid)
2386 {
2387 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2388 return iemRaiseGeneralProtectionFault0(pIemCpu);
2389 }
2390
2391 /* long mode checks. */
2392 if ( (uOldCrX & X86_CR4_PAE)
2393 && !(uNewCrX & X86_CR4_PAE)
2394 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
2395 {
2396 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
2397 return iemRaiseGeneralProtectionFault0(pIemCpu);
2398 }
2399
2400
2401 /*
2402 * Change it.
2403 */
2404 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2405 {
2406 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
2407 AssertRCSuccessReturn(rc, rc);
2408 }
2409 else
2410 pCtx->cr4 = uNewCrX;
2411 Assert(pCtx->cr4 == uNewCrX);
2412
2413 /*
2414 * Notify SELM and PGM.
2415 */
2416 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2417 {
2418 /* SELM - VME may change things wrt to the TSS shadowing. */
2419 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
2420 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2421
2422 /* PGM - flushing and mode. */
2423 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
2424 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
2425 {
2426 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
2427 AssertRCReturn(rc, rc);
2428 /* ignore informational status codes */
2429 }
2430 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2431 /** @todo Status code management. */
2432 }
2433 else
2434 rcStrict = VINF_SUCCESS;
2435 break;
2436 }
2437
2438 /*
2439 * CR8 maps to the APIC TPR.
2440 */
2441 case 8:
2442 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2443 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
2444 else
2445 rcStrict = VINF_SUCCESS;
2446 break;
2447
2448 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2449 }
2450
2451 /*
2452 * Advance the RIP on success.
2453 */
2454 /** @todo Status code management. */
2455 if (rcStrict == VINF_SUCCESS)
2456 iemRegAddToRip(pIemCpu, cbInstr);
2457 return rcStrict;
2458
2459}
2460
2461
2462/**
2463 * Implements mov CRx,GReg.
2464 *
2465 * @param iCrReg The CRx register to write (valid).
2466 * @param iGReg The general register to load the DRx value from.
2467 */
2468IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
2469{
2470 if (pIemCpu->uCpl != 0)
2471 return iemRaiseGeneralProtectionFault0(pIemCpu);
2472 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2473
2474 /*
2475 * Read the new value from the source register and call common worker.
2476 */
2477 uint64_t uNewCrX;
2478 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2479 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
2480 else
2481 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
2482 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
2483}
2484
2485
2486/**
2487 * Implements 'LMSW r/m16'
2488 *
2489 * @param u16NewMsw The new value.
2490 */
2491IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
2492{
2493 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2494
2495 if (pIemCpu->uCpl != 0)
2496 return iemRaiseGeneralProtectionFault0(pIemCpu);
2497 Assert(!pCtx->eflags.Bits.u1VM);
2498
2499 /*
2500 * Compose the new CR0 value and call common worker.
2501 */
2502 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2503 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2504 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
2505}
2506
2507
2508/**
2509 * Implements 'CLTS'.
2510 */
2511IEM_CIMPL_DEF_0(iemCImpl_clts)
2512{
2513 if (pIemCpu->uCpl != 0)
2514 return iemRaiseGeneralProtectionFault0(pIemCpu);
2515
2516 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2517 uint64_t uNewCr0 = pCtx->cr0;
2518 uNewCr0 &= ~X86_CR0_TS;
2519 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
2520}
2521
2522
2523/**
2524 * Implements mov GReg,DRx.
2525 *
2526 * @param iGReg The general register to store the DRx value in.
2527 * @param iDrReg The DRx register to read (0-7).
2528 */
2529IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
2530{
2531 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2532
2533 /*
2534 * Check preconditions.
2535 */
2536
2537 /* Raise GPs. */
2538 if (pIemCpu->uCpl != 0)
2539 return iemRaiseGeneralProtectionFault0(pIemCpu);
2540 Assert(!pCtx->eflags.Bits.u1VM);
2541
2542 if ( (iDrReg == 4 || iDrReg == 5)
2543 && (pCtx->cr4 & X86_CR4_DE) )
2544 {
2545 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
2546 return iemRaiseGeneralProtectionFault0(pIemCpu);
2547 }
2548
2549 /* Raise #DB if general access detect is enabled. */
2550 if (pCtx->dr[7] & X86_DR7_GD)
2551 {
2552 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
2553 return iemRaiseDebugException(pIemCpu);
2554 }
2555
2556 /*
2557 * Read the debug register and store it in the specified general register.
2558 */
2559 uint64_t drX;
2560 switch (iDrReg)
2561 {
2562 case 0: drX = pCtx->dr[0]; break;
2563 case 1: drX = pCtx->dr[1]; break;
2564 case 2: drX = pCtx->dr[2]; break;
2565 case 3: drX = pCtx->dr[3]; break;
2566 case 6:
2567 case 4:
2568 drX = pCtx->dr[6];
2569 drX &= ~RT_BIT_32(12);
2570 drX |= UINT32_C(0xffff0ff0);
2571 break;
2572 case 7:
2573 case 5:
2574 drX = pCtx->dr[7];
2575 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
2576 drX |= RT_BIT_32(10);
2577 break;
2578 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2579 }
2580
2581 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2582 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
2583 else
2584 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
2585
2586 iemRegAddToRip(pIemCpu, cbInstr);
2587 return VINF_SUCCESS;
2588}
2589
2590
2591/**
2592 * Implements mov DRx,GReg.
2593 *
2594 * @param iDrReg The DRx register to write (valid).
2595 * @param iGReg The general register to load the DRx value from.
2596 */
2597IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
2598{
2599 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2600
2601 /*
2602 * Check preconditions.
2603 */
2604 if (pIemCpu->uCpl != 0)
2605 return iemRaiseGeneralProtectionFault0(pIemCpu);
2606 Assert(!pCtx->eflags.Bits.u1VM);
2607
2608 if ( (iDrReg == 4 || iDrReg == 5)
2609 && (pCtx->cr4 & X86_CR4_DE) )
2610 {
2611 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
2612 return iemRaiseGeneralProtectionFault0(pIemCpu);
2613 }
2614
2615 /* Raise #DB if general access detect is enabled. */
2616 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
2617 * \#GP? */
2618 if (pCtx->dr[7] & X86_DR7_GD)
2619 {
2620 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
2621 return iemRaiseDebugException(pIemCpu);
2622 }
2623
2624 /*
2625 * Read the new value from the source register.
2626 */
2627 uint64_t uNewDrX;
2628 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2629 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
2630 else
2631 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
2632
2633 /*
2634 * Adjust it.
2635 */
2636 switch (iDrReg)
2637 {
2638 case 0:
2639 case 1:
2640 case 2:
2641 case 3:
2642 /* nothing to adjust */
2643 break;
2644
2645 case 6:
2646 case 4:
2647 if (uNewDrX & UINT64_C(0xffffffff00000000))
2648 {
2649 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
2650 return iemRaiseGeneralProtectionFault0(pIemCpu);
2651 }
2652 uNewDrX &= ~RT_BIT_32(12);
2653 uNewDrX |= UINT32_C(0xffff0ff0);
2654 break;
2655
2656 case 7:
2657 case 5:
2658 if (uNewDrX & UINT64_C(0xffffffff00000000))
2659 {
2660 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
2661 return iemRaiseGeneralProtectionFault0(pIemCpu);
2662 }
2663 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
2664 uNewDrX |= RT_BIT_32(10);
2665 break;
2666
2667 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2668 }
2669
2670 /*
2671 * Do the actual setting.
2672 */
2673 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2674 {
2675 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
2676 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
2677 }
2678 else
2679 pCtx->dr[iDrReg] = uNewDrX;
2680
2681 iemRegAddToRip(pIemCpu, cbInstr);
2682 return VINF_SUCCESS;
2683}
2684
2685
2686/**
2687 * Implements RDTSC.
2688 */
2689IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
2690{
2691 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2692
2693 /*
2694 * Check preconditions.
2695 */
2696 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
2697 return iemRaiseUndefinedOpcode(pIemCpu);
2698
2699 if ( (pCtx->cr4 & X86_CR4_TSD)
2700 && pIemCpu->uCpl != 0)
2701 {
2702 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
2703 return iemRaiseGeneralProtectionFault0(pIemCpu);
2704 }
2705
2706 /*
2707 * Do the job.
2708 */
2709 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
2710 pCtx->rax = (uint32_t)uTicks;
2711 pCtx->rdx = uTicks >> 32;
2712#ifdef IEM_VERIFICATION_MODE
2713 pIemCpu->fIgnoreRaxRdx = true;
2714#endif
2715
2716 iemRegAddToRip(pIemCpu, cbInstr);
2717 return VINF_SUCCESS;
2718}
2719
2720
2721/**
2722 * Implements 'IN eAX, port'.
2723 *
2724 * @param u16Port The source port.
2725 * @param cbReg The register size.
2726 */
2727IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
2728{
2729 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2730
2731 /*
2732 * CPL check
2733 */
2734 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
2735 if (rcStrict != VINF_SUCCESS)
2736 return rcStrict;
2737
2738 /*
2739 * Perform the I/O.
2740 */
2741 uint32_t u32Value;
2742 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2743 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
2744 else
2745 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
2746 if (IOM_SUCCESS(rcStrict))
2747 {
2748 switch (cbReg)
2749 {
2750 case 1: pCtx->al = (uint8_t)u32Value; break;
2751 case 2: pCtx->ax = (uint16_t)u32Value; break;
2752 case 4: pCtx->rax = u32Value; break;
2753 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
2754 }
2755 iemRegAddToRip(pIemCpu, cbInstr);
2756 pIemCpu->cPotentialExits++;
2757 }
2758 /** @todo massage rcStrict. */
2759 return rcStrict;
2760}
2761
2762
2763/**
2764 * Implements 'IN eAX, DX'.
2765 *
2766 * @param cbReg The register size.
2767 */
2768IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
2769{
2770 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
2771}
2772
2773
2774/**
2775 * Implements 'OUT port, eAX'.
2776 *
2777 * @param u16Port The destination port.
2778 * @param cbReg The register size.
2779 */
2780IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
2781{
2782 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2783
2784 /*
2785 * CPL check
2786 */
2787 if ( (pCtx->cr0 & X86_CR0_PE)
2788 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
2789 || pCtx->eflags.Bits.u1VM) )
2790 {
2791 /** @todo I/O port permission bitmap check */
2792 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
2793 }
2794
2795 /*
2796 * Perform the I/O.
2797 */
2798 uint32_t u32Value;
2799 switch (cbReg)
2800 {
2801 case 1: u32Value = pCtx->al; break;
2802 case 2: u32Value = pCtx->ax; break;
2803 case 4: u32Value = pCtx->eax; break;
2804 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
2805 }
2806 VBOXSTRICTRC rc;
2807 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2808 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
2809 else
2810 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
2811 if (IOM_SUCCESS(rc))
2812 {
2813 iemRegAddToRip(pIemCpu, cbInstr);
2814 pIemCpu->cPotentialExits++;
2815 /** @todo massage rc. */
2816 }
2817 return rc;
2818}
2819
2820
2821/**
2822 * Implements 'OUT DX, eAX'.
2823 *
2824 * @param cbReg The register size.
2825 */
2826IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
2827{
2828 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
2829}
2830
2831
2832/**
2833 * Implements 'CLI'.
2834 */
2835IEM_CIMPL_DEF_0(iemCImpl_cli)
2836{
2837 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2838
2839 if (pCtx->cr0 & X86_CR0_PE)
2840 {
2841 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
2842 if (!pCtx->eflags.Bits.u1VM)
2843 {
2844 if (pIemCpu->uCpl <= uIopl)
2845 pCtx->eflags.Bits.u1IF = 0;
2846 else if ( pIemCpu->uCpl == 3
2847 && (pCtx->cr4 & X86_CR4_PVI) )
2848 pCtx->eflags.Bits.u1VIF = 0;
2849 else
2850 return iemRaiseGeneralProtectionFault0(pIemCpu);
2851 }
2852 /* V8086 */
2853 else if (uIopl == 3)
2854 pCtx->eflags.Bits.u1IF = 0;
2855 else if ( uIopl < 3
2856 && (pCtx->cr4 & X86_CR4_VME) )
2857 pCtx->eflags.Bits.u1VIF = 0;
2858 else
2859 return iemRaiseGeneralProtectionFault0(pIemCpu);
2860 }
2861 /* real mode */
2862 else
2863 pCtx->eflags.Bits.u1IF = 0;
2864 iemRegAddToRip(pIemCpu, cbInstr);
2865 return VINF_SUCCESS;
2866}
2867
2868
2869/**
2870 * Implements 'STI'.
2871 */
2872IEM_CIMPL_DEF_0(iemCImpl_sti)
2873{
2874 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2875
2876 if (pCtx->cr0 & X86_CR0_PE)
2877 {
2878 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
2879 if (!pCtx->eflags.Bits.u1VM)
2880 {
2881 if (pIemCpu->uCpl <= uIopl)
2882 pCtx->eflags.Bits.u1IF = 1;
2883 else if ( pIemCpu->uCpl == 3
2884 && (pCtx->cr4 & X86_CR4_PVI)
2885 && !pCtx->eflags.Bits.u1VIP )
2886 pCtx->eflags.Bits.u1VIF = 1;
2887 else
2888 return iemRaiseGeneralProtectionFault0(pIemCpu);
2889 }
2890 /* V8086 */
2891 else if (uIopl == 3)
2892 pCtx->eflags.Bits.u1IF = 1;
2893 else if ( uIopl < 3
2894 && (pCtx->cr4 & X86_CR4_VME)
2895 && !pCtx->eflags.Bits.u1VIP )
2896 pCtx->eflags.Bits.u1VIF = 1;
2897 else
2898 return iemRaiseGeneralProtectionFault0(pIemCpu);
2899 }
2900 /* real mode */
2901 else
2902 pCtx->eflags.Bits.u1IF = 1;
2903
2904 iemRegAddToRip(pIemCpu, cbInstr);
2905 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2906 return VINF_SUCCESS;
2907}
2908
2909
2910/**
2911 * Implements 'HLT'.
2912 */
2913IEM_CIMPL_DEF_0(iemCImpl_hlt)
2914{
2915 if (pIemCpu->uCpl != 0)
2916 return iemRaiseGeneralProtectionFault0(pIemCpu);
2917 iemRegAddToRip(pIemCpu, cbInstr);
2918 return VINF_EM_HALT;
2919}
2920
2921
2922/**
2923 * Implements 'CPUID'.
2924 */
2925IEM_CIMPL_DEF_0(iemCImpl_cpuid)
2926{
2927 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2928
2929 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
2930 pCtx->rax &= UINT32_C(0xffffffff);
2931 pCtx->rbx &= UINT32_C(0xffffffff);
2932 pCtx->rcx &= UINT32_C(0xffffffff);
2933 pCtx->rdx &= UINT32_C(0xffffffff);
2934
2935 iemRegAddToRip(pIemCpu, cbInstr);
2936 return VINF_SUCCESS;
2937}
2938
2939
2940/**
2941 * Implements 'AAD'.
2942 *
2943 * @param enmEffOpSize The effective operand size.
2944 */
2945IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
2946{
2947 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2948
2949 uint16_t const ax = pCtx->ax;
2950 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
2951 pCtx->ax = al;
2952 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
2953 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
2954 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
2955
2956 iemRegAddToRip(pIemCpu, cbInstr);
2957 return VINF_SUCCESS;
2958}
2959
2960
2961/**
2962 * Implements 'AAM'.
2963 *
2964 * @param bImm The immediate operand. Cannot be 0.
2965 */
2966IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
2967{
2968 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2969 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
2970
2971 uint16_t const ax = pCtx->ax;
2972 uint8_t const al = (uint8_t)ax % bImm;
2973 uint8_t const ah = (uint8_t)ax / bImm;
2974 pCtx->ax = (ah << 8) + al;
2975 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
2976 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
2977 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
2978
2979 iemRegAddToRip(pIemCpu, cbInstr);
2980 return VINF_SUCCESS;
2981}
2982
2983
2984
2985
2986/*
2987 * Instantiate the various string operation combinations.
2988 */
2989#define OP_SIZE 8
2990#define ADDR_SIZE 16
2991#include "IEMAllCImplStrInstr.cpp.h"
2992#define OP_SIZE 8
2993#define ADDR_SIZE 32
2994#include "IEMAllCImplStrInstr.cpp.h"
2995#define OP_SIZE 8
2996#define ADDR_SIZE 64
2997#include "IEMAllCImplStrInstr.cpp.h"
2998
2999#define OP_SIZE 16
3000#define ADDR_SIZE 16
3001#include "IEMAllCImplStrInstr.cpp.h"
3002#define OP_SIZE 16
3003#define ADDR_SIZE 32
3004#include "IEMAllCImplStrInstr.cpp.h"
3005#define OP_SIZE 16
3006#define ADDR_SIZE 64
3007#include "IEMAllCImplStrInstr.cpp.h"
3008
3009#define OP_SIZE 32
3010#define ADDR_SIZE 16
3011#include "IEMAllCImplStrInstr.cpp.h"
3012#define OP_SIZE 32
3013#define ADDR_SIZE 32
3014#include "IEMAllCImplStrInstr.cpp.h"
3015#define OP_SIZE 32
3016#define ADDR_SIZE 64
3017#include "IEMAllCImplStrInstr.cpp.h"
3018
3019#define OP_SIZE 64
3020#define ADDR_SIZE 32
3021#include "IEMAllCImplStrInstr.cpp.h"
3022#define OP_SIZE 64
3023#define ADDR_SIZE 64
3024#include "IEMAllCImplStrInstr.cpp.h"
3025
3026
3027/**
3028 * Implements 'FINIT' and 'FNINIT'.
3029 *
3030 * @param fCheckXcpts Whether to check for umasked pending exceptions or
3031 * not.
3032 */
3033IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
3034{
3035 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3036
3037 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
3038 return iemRaiseDeviceNotAvailable(pIemCpu);
3039
3040 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
3041 if (fCheckXcpts && TODO )
3042 return iemRaiseMathFault(pIemCpu);
3043 */
3044
3045 if (iemFRegIsFxSaveFormat(pIemCpu))
3046 {
3047 pCtx->fpu.FCW = 0x37f;
3048 pCtx->fpu.FSW = 0;
3049 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
3050 pCtx->fpu.FPUDP = 0;
3051 pCtx->fpu.DS = 0; //??
3052 pCtx->fpu.FPUIP = 0;
3053 pCtx->fpu.CS = 0; //??
3054 pCtx->fpu.FOP = 0;
3055 }
3056 else
3057 {
3058 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
3059 pFpu->FCW = 0x37f;
3060 pFpu->FSW = 0;
3061 pFpu->FTW = 0xffff; /* 11 - empty */
3062 pFpu->FPUOO = 0; //??
3063 pFpu->FPUOS = 0; //??
3064 pFpu->FPUIP = 0;
3065 pFpu->CS = 0; //??
3066 pFpu->FOP = 0;
3067 }
3068
3069 iemRegAddToRip(pIemCpu, cbInstr);
3070 return VINF_SUCCESS;
3071}
3072
3073
3074/** @} */
3075
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette