VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 36891

Last change on this file since 36891 was 36860, checked in by vboxsync, 14 years ago

IEM: rdtsc, mov DRx, ltr, lldt. cmovnle fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 89.0 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 36860 2011-04-27 17:31:21Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 /** @todo I/O port permission bitmap check */
42 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
43 }
44 return VINF_SUCCESS;
45}
46
47/** @} */
48
49/** @name C Implementations
50 * @{
51 */
52
53/**
54 * Implements a 16-bit popa.
55 */
56IEM_CIMPL_DEF_0(iemCImpl_popa_16)
57{
58 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
59 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
60 RTGCPTR GCPtrLast = GCPtrStart + 15;
61 VBOXSTRICTRC rcStrict;
62
63 /*
64 * The docs are a bit hard to comprehend here, but it looks like we wrap
65 * around in real mode as long as none of the individual "popa" crosses the
66 * end of the stack segment. In protected mode we check the whole access
67 * in one go. For efficiency, only do the word-by-word thing if we're in
68 * danger of wrapping around.
69 */
70 /** @todo do popa boundary / wrap-around checks. */
71 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
72 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
73 {
74 /* word-by-word */
75 RTUINT64U TmpRsp;
76 TmpRsp.u = pCtx->rsp;
77 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
78 if (rcStrict == VINF_SUCCESS)
79 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
80 if (rcStrict == VINF_SUCCESS)
81 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
82 if (rcStrict == VINF_SUCCESS)
83 {
84 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
85 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
86 }
87 if (rcStrict == VINF_SUCCESS)
88 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
89 if (rcStrict == VINF_SUCCESS)
90 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
91 if (rcStrict == VINF_SUCCESS)
92 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
93 if (rcStrict == VINF_SUCCESS)
94 {
95 pCtx->rsp = TmpRsp.u;
96 iemRegAddToRip(pIemCpu, cbInstr);
97 }
98 }
99 else
100 {
101 uint16_t const *pa16Mem = NULL;
102 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
103 if (rcStrict == VINF_SUCCESS)
104 {
105 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
106 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
107 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
108 /* skip sp */
109 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
110 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
111 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
112 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
113 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
114 if (rcStrict == VINF_SUCCESS)
115 {
116 iemRegAddToRsp(pCtx, 16);
117 iemRegAddToRip(pIemCpu, cbInstr);
118 }
119 }
120 }
121 return rcStrict;
122}
123
124
125/**
126 * Implements a 32-bit popa.
127 */
128IEM_CIMPL_DEF_0(iemCImpl_popa_32)
129{
130 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
131 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
132 RTGCPTR GCPtrLast = GCPtrStart + 31;
133 VBOXSTRICTRC rcStrict;
134
135 /*
136 * The docs are a bit hard to comprehend here, but it looks like we wrap
137 * around in real mode as long as none of the individual "popa" crosses the
138 * end of the stack segment. In protected mode we check the whole access
139 * in one go. For efficiency, only do the word-by-word thing if we're in
140 * danger of wrapping around.
141 */
142 /** @todo do popa boundary / wrap-around checks. */
143 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
144 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
145 {
146 /* word-by-word */
147 RTUINT64U TmpRsp;
148 TmpRsp.u = pCtx->rsp;
149 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
150 if (rcStrict == VINF_SUCCESS)
151 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
152 if (rcStrict == VINF_SUCCESS)
153 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
154 if (rcStrict == VINF_SUCCESS)
155 {
156 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
157 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
158 }
159 if (rcStrict == VINF_SUCCESS)
160 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
161 if (rcStrict == VINF_SUCCESS)
162 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
163 if (rcStrict == VINF_SUCCESS)
164 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
165 if (rcStrict == VINF_SUCCESS)
166 {
167#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
168 pCtx->rdi &= UINT32_MAX;
169 pCtx->rsi &= UINT32_MAX;
170 pCtx->rbp &= UINT32_MAX;
171 pCtx->rbx &= UINT32_MAX;
172 pCtx->rdx &= UINT32_MAX;
173 pCtx->rcx &= UINT32_MAX;
174 pCtx->rax &= UINT32_MAX;
175#endif
176 pCtx->rsp = TmpRsp.u;
177 iemRegAddToRip(pIemCpu, cbInstr);
178 }
179 }
180 else
181 {
182 uint32_t const *pa32Mem;
183 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
184 if (rcStrict == VINF_SUCCESS)
185 {
186 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
187 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
188 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
189 /* skip esp */
190 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
191 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
192 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
193 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
194 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
195 if (rcStrict == VINF_SUCCESS)
196 {
197 iemRegAddToRsp(pCtx, 32);
198 iemRegAddToRip(pIemCpu, cbInstr);
199 }
200 }
201 }
202 return rcStrict;
203}
204
205
206/**
207 * Implements a 16-bit pusha.
208 */
209IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
210{
211 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
212 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
213 RTGCPTR GCPtrBottom = GCPtrTop - 15;
214 VBOXSTRICTRC rcStrict;
215
216 /*
217 * The docs are a bit hard to comprehend here, but it looks like we wrap
218 * around in real mode as long as none of the individual "pushd" crosses the
219 * end of the stack segment. In protected mode we check the whole access
220 * in one go. For efficiency, only do the word-by-word thing if we're in
221 * danger of wrapping around.
222 */
223 /** @todo do pusha boundary / wrap-around checks. */
224 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
225 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
226 {
227 /* word-by-word */
228 RTUINT64U TmpRsp;
229 TmpRsp.u = pCtx->rsp;
230 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
231 if (rcStrict == VINF_SUCCESS)
232 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
233 if (rcStrict == VINF_SUCCESS)
234 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
235 if (rcStrict == VINF_SUCCESS)
236 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
237 if (rcStrict == VINF_SUCCESS)
238 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
239 if (rcStrict == VINF_SUCCESS)
240 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
241 if (rcStrict == VINF_SUCCESS)
242 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
243 if (rcStrict == VINF_SUCCESS)
244 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
245 if (rcStrict == VINF_SUCCESS)
246 {
247 pCtx->rsp = TmpRsp.u;
248 iemRegAddToRip(pIemCpu, cbInstr);
249 }
250 }
251 else
252 {
253 GCPtrBottom--;
254 uint16_t *pa16Mem = NULL;
255 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
256 if (rcStrict == VINF_SUCCESS)
257 {
258 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
259 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
260 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
261 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
262 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
263 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
264 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
265 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
266 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
267 if (rcStrict == VINF_SUCCESS)
268 {
269 iemRegSubFromRsp(pCtx, 16);
270 iemRegAddToRip(pIemCpu, cbInstr);
271 }
272 }
273 }
274 return rcStrict;
275}
276
277
278/**
279 * Implements a 32-bit pusha.
280 */
281IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
282{
283 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
284 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
285 RTGCPTR GCPtrBottom = GCPtrTop - 31;
286 VBOXSTRICTRC rcStrict;
287
288 /*
289 * The docs are a bit hard to comprehend here, but it looks like we wrap
290 * around in real mode as long as none of the individual "pusha" crosses the
291 * end of the stack segment. In protected mode we check the whole access
292 * in one go. For efficiency, only do the word-by-word thing if we're in
293 * danger of wrapping around.
294 */
295 /** @todo do pusha boundary / wrap-around checks. */
296 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
297 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
298 {
299 /* word-by-word */
300 RTUINT64U TmpRsp;
301 TmpRsp.u = pCtx->rsp;
302 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
303 if (rcStrict == VINF_SUCCESS)
304 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
305 if (rcStrict == VINF_SUCCESS)
306 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
307 if (rcStrict == VINF_SUCCESS)
308 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
309 if (rcStrict == VINF_SUCCESS)
310 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
311 if (rcStrict == VINF_SUCCESS)
312 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
313 if (rcStrict == VINF_SUCCESS)
314 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
315 if (rcStrict == VINF_SUCCESS)
316 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
317 if (rcStrict == VINF_SUCCESS)
318 {
319 pCtx->rsp = TmpRsp.u;
320 iemRegAddToRip(pIemCpu, cbInstr);
321 }
322 }
323 else
324 {
325 GCPtrBottom--;
326 uint32_t *pa32Mem;
327 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
328 if (rcStrict == VINF_SUCCESS)
329 {
330 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
331 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
332 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
333 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
334 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
335 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
336 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
337 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
338 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
339 if (rcStrict == VINF_SUCCESS)
340 {
341 iemRegSubFromRsp(pCtx, 32);
342 iemRegAddToRip(pIemCpu, cbInstr);
343 }
344 }
345 }
346 return rcStrict;
347}
348
349
350/**
351 * Implements pushf.
352 *
353 *
354 * @param enmEffOpSize The effective operand size.
355 */
356IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
357{
358 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
359
360 /*
361 * If we're in V8086 mode some care is required (which is why we're in
362 * doing this in a C implementation).
363 */
364 uint32_t fEfl = pCtx->eflags.u;
365 if ( (fEfl & X86_EFL_VM)
366 && X86_EFL_GET_IOPL(fEfl) != 3 )
367 {
368 Assert(pCtx->cr0 & X86_CR0_PE);
369 if ( enmEffOpSize != IEMMODE_16BIT
370 || !(pCtx->cr4 & X86_CR4_VME))
371 return iemRaiseGeneralProtectionFault0(pIemCpu);
372 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
373 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
374 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
375 }
376
377 /*
378 * Ok, clear RF and VM and push the flags.
379 */
380 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
381
382 VBOXSTRICTRC rcStrict;
383 switch (enmEffOpSize)
384 {
385 case IEMMODE_16BIT:
386 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
387 break;
388 case IEMMODE_32BIT:
389 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
390 break;
391 case IEMMODE_64BIT:
392 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
393 break;
394 IEM_NOT_REACHED_DEFAULT_CASE_RET();
395 }
396 if (rcStrict != VINF_SUCCESS)
397 return rcStrict;
398
399 iemRegAddToRip(pIemCpu, cbInstr);
400 return VINF_SUCCESS;
401}
402
403
404/**
405 * Implements popf.
406 *
407 * @param enmEffOpSize The effective operand size.
408 */
409IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
410{
411 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
412 uint32_t const fEflOld = pCtx->eflags.u;
413 VBOXSTRICTRC rcStrict;
414 uint32_t fEflNew;
415
416 /*
417 * V8086 is special as usual.
418 */
419 if (fEflOld & X86_EFL_VM)
420 {
421 /*
422 * Almost anything goes if IOPL is 3.
423 */
424 if (X86_EFL_GET_IOPL(fEflOld) == 3)
425 {
426 switch (enmEffOpSize)
427 {
428 case IEMMODE_16BIT:
429 {
430 uint16_t u16Value;
431 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
432 if (rcStrict != VINF_SUCCESS)
433 return rcStrict;
434 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
435 break;
436 }
437 case IEMMODE_32BIT:
438 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
439 if (rcStrict != VINF_SUCCESS)
440 return rcStrict;
441 break;
442 IEM_NOT_REACHED_DEFAULT_CASE_RET();
443 }
444
445 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
446 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
447 }
448 /*
449 * Interrupt flag virtualization with CR4.VME=1.
450 */
451 else if ( enmEffOpSize == IEMMODE_16BIT
452 && (pCtx->cr4 & X86_CR4_VME) )
453 {
454 uint16_t u16Value;
455 RTUINT64U TmpRsp;
456 TmpRsp.u = pCtx->rsp;
457 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
458 if (rcStrict != VINF_SUCCESS)
459 return rcStrict;
460
461 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
462 * or before? */
463 if ( ( (u16Value & X86_EFL_IF)
464 && (fEflOld & X86_EFL_VIP))
465 || (u16Value & X86_EFL_TF) )
466 return iemRaiseGeneralProtectionFault0(pIemCpu);
467
468 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
469 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
470 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
471 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
472
473 pCtx->rsp = TmpRsp.u;
474 }
475 else
476 return iemRaiseGeneralProtectionFault0(pIemCpu);
477
478 }
479 /*
480 * Not in V8086 mode.
481 */
482 else
483 {
484 /* Pop the flags. */
485 switch (enmEffOpSize)
486 {
487 case IEMMODE_16BIT:
488 {
489 uint16_t u16Value;
490 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
491 if (rcStrict != VINF_SUCCESS)
492 return rcStrict;
493 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
494 break;
495 }
496 case IEMMODE_32BIT:
497 case IEMMODE_64BIT:
498 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
499 if (rcStrict != VINF_SUCCESS)
500 return rcStrict;
501 break;
502 IEM_NOT_REACHED_DEFAULT_CASE_RET();
503 }
504
505 /* Merge them with the current flags. */
506 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
507 || pIemCpu->uCpl == 0)
508 {
509 fEflNew &= X86_EFL_POPF_BITS;
510 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
511 }
512 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
513 {
514 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
515 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
516 }
517 else
518 {
519 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
520 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
521 }
522 }
523
524 /*
525 * Commit the flags.
526 */
527 Assert(fEflNew & RT_BIT_32(1));
528 pCtx->eflags.u = fEflNew;
529 iemRegAddToRip(pIemCpu, cbInstr);
530
531 return VINF_SUCCESS;
532}
533
534
535/**
536 * Implements an indirect call.
537 *
538 * @param uNewPC The new program counter (RIP) value (loaded from the
539 * operand).
540 * @param enmEffOpSize The effective operand size.
541 */
542IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
543{
544 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
545 uint16_t uOldPC = pCtx->ip + cbInstr;
546 if (uNewPC > pCtx->csHid.u32Limit)
547 return iemRaiseGeneralProtectionFault0(pIemCpu);
548
549 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
550 if (rcStrict != VINF_SUCCESS)
551 return rcStrict;
552
553 pCtx->rip = uNewPC;
554 return VINF_SUCCESS;
555
556}
557
558
559/**
560 * Implements a 16-bit relative call.
561 *
562 * @param offDisp The displacment offset.
563 */
564IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
565{
566 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
567 uint16_t uOldPC = pCtx->ip + cbInstr;
568 uint16_t uNewPC = uOldPC + offDisp;
569 if (uNewPC > pCtx->csHid.u32Limit)
570 return iemRaiseGeneralProtectionFault0(pIemCpu);
571
572 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
573 if (rcStrict != VINF_SUCCESS)
574 return rcStrict;
575
576 pCtx->rip = uNewPC;
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Implements a 32-bit indirect call.
583 *
584 * @param uNewPC The new program counter (RIP) value (loaded from the
585 * operand).
586 * @param enmEffOpSize The effective operand size.
587 */
588IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
589{
590 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
591 uint32_t uOldPC = pCtx->eip + cbInstr;
592 if (uNewPC > pCtx->csHid.u32Limit)
593 return iemRaiseGeneralProtectionFault0(pIemCpu);
594
595 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
596 if (rcStrict != VINF_SUCCESS)
597 return rcStrict;
598
599 pCtx->rip = uNewPC;
600 return VINF_SUCCESS;
601
602}
603
604
605/**
606 * Implements a 32-bit relative call.
607 *
608 * @param offDisp The displacment offset.
609 */
610IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
611{
612 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
613 uint32_t uOldPC = pCtx->eip + cbInstr;
614 uint32_t uNewPC = uOldPC + offDisp;
615 if (uNewPC > pCtx->csHid.u32Limit)
616 return iemRaiseGeneralProtectionFault0(pIemCpu);
617
618 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
619 if (rcStrict != VINF_SUCCESS)
620 return rcStrict;
621
622 pCtx->rip = uNewPC;
623 return VINF_SUCCESS;
624}
625
626
627/**
628 * Implements a 64-bit indirect call.
629 *
630 * @param uNewPC The new program counter (RIP) value (loaded from the
631 * operand).
632 * @param enmEffOpSize The effective operand size.
633 */
634IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
635{
636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
637 uint64_t uOldPC = pCtx->rip + cbInstr;
638 if (!IEM_IS_CANONICAL(uNewPC))
639 return iemRaiseGeneralProtectionFault0(pIemCpu);
640
641 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
642 if (rcStrict != VINF_SUCCESS)
643 return rcStrict;
644
645 pCtx->rip = uNewPC;
646 return VINF_SUCCESS;
647
648}
649
650
651/**
652 * Implements a 64-bit relative call.
653 *
654 * @param offDisp The displacment offset.
655 */
656IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
657{
658 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
659 uint64_t uOldPC = pCtx->rip + cbInstr;
660 uint64_t uNewPC = uOldPC + offDisp;
661 if (!IEM_IS_CANONICAL(uNewPC))
662 return iemRaiseNotCanonical(pIemCpu);
663
664 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
665 if (rcStrict != VINF_SUCCESS)
666 return rcStrict;
667
668 pCtx->rip = uNewPC;
669 return VINF_SUCCESS;
670}
671
672
673/**
674 * Implements far jumps.
675 *
676 * @param uSel The selector.
677 * @param offSeg The segment offset.
678 * @param enmEffOpSize The effective operand size.
679 */
680IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint32_t, offSeg, IEMMODE, enmEffOpSize)
681{
682 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
683
684 /*
685 * Real mode and V8086 mode are easy. The only snag seems to be that
686 * CS.limit doesn't change and the limit check is done against the current
687 * limit.
688 */
689 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
690 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
691 {
692 if (offSeg > pCtx->csHid.u32Limit)
693 return iemRaiseGeneralProtectionFault0(pIemCpu);
694
695 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
696 pCtx->rip = offSeg;
697 else
698 pCtx->rip = offSeg & UINT16_MAX;
699 pCtx->cs = uSel;
700 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
701 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
702 * PE. Check with VT-x and AMD-V. */
703#ifdef IEM_VERIFICATION_MODE
704 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
705#endif
706 return VINF_SUCCESS;
707 }
708
709 /*
710 * Protected mode. Need to parse the specified descriptor...
711 */
712 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
713 {
714 Log(("jmpf %04x:%08x -> invalid selector, #GP(0)\n", uSel, offSeg));
715 return iemRaiseGeneralProtectionFault0(pIemCpu);
716 }
717
718 /* Fetch the descriptor. */
719 IEMSELDESC Desc;
720 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
721 if (rcStrict != VINF_SUCCESS)
722 return rcStrict;
723
724 /* Is it there? */
725 if (!Desc.Legacy.Gen.u1Present)
726 {
727 Log(("jmpf %04x:%08x -> segment not present\n", uSel, offSeg));
728 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
729 }
730
731 /*
732 * Deal with it according to its type.
733 */
734 if (Desc.Legacy.Gen.u1DescType)
735 {
736 /* Only code segments. */
737 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
738 {
739 Log(("jmpf %04x:%08x -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
740 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
741 }
742
743 /* L vs D. */
744 if ( Desc.Legacy.Gen.u1Long
745 && Desc.Legacy.Gen.u1DefBig
746 && IEM_IS_LONG_MODE(pIemCpu))
747 {
748 Log(("jmpf %04x:%08x -> both L and D are set.\n", uSel, offSeg));
749 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
750 }
751
752 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
753 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
754 {
755 if (Desc.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
756 {
757 Log(("jmpf %04x:%08x -> DPL violation (conforming); DPL=%d CPL=%u\n",
758 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
759 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
760 }
761 }
762 else
763 {
764 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
765 {
766 Log(("jmpf %04x:%08x -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
767 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
768 }
769 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
770 {
771 Log(("jmpf %04x:%08x -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
772 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
773 }
774 }
775
776 /* Limit check. (Should alternatively check for non-canonical addresses
777 here, but that is ruled out by offSeg being 32-bit, right?) */
778 uint64_t u64Base;
779 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
780 if (Desc.Legacy.Gen.u1Granularity)
781 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
782 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
783 u64Base = 0;
784 else
785 {
786 if (offSeg > cbLimit)
787 {
788 Log(("jmpf %04x:%08x -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
789 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
790 }
791 u64Base = X86DESC_BASE(Desc.Legacy);
792 }
793
794 /*
795 * Ok, everything checked out fine. Now set the accessed bit before
796 * committing the result into CS, CSHID and RIP.
797 */
798 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
799 {
800 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
801 if (rcStrict != VINF_SUCCESS)
802 return rcStrict;
803#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
804 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
805#endif
806 }
807
808 /* commit */
809 pCtx->rip = offSeg;
810 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
811 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
812 pCtx->csHid.Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff);
813 pCtx->csHid.u32Limit = cbLimit;
814 pCtx->csHid.u64Base = u64Base;
815 /** @todo check if the hidden bits are loaded correctly for 64-bit
816 * mode. */
817 return VINF_SUCCESS;
818 }
819
820 /*
821 * System selector.
822 */
823 if (IEM_IS_LONG_MODE(pIemCpu))
824 switch (Desc.Legacy.Gen.u4Type)
825 {
826 case AMD64_SEL_TYPE_SYS_LDT:
827 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
828 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
829 case AMD64_SEL_TYPE_SYS_CALL_GATE:
830 case AMD64_SEL_TYPE_SYS_INT_GATE:
831 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
832 /* Call various functions to do the work. */
833 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
834 default:
835 Log(("jmpf %04x:%08x -> wrong sys selector (64-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
836 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
837
838 }
839 switch (Desc.Legacy.Gen.u4Type)
840 {
841 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
842 case X86_SEL_TYPE_SYS_LDT:
843 case X86_SEL_TYPE_SYS_286_CALL_GATE:
844 case X86_SEL_TYPE_SYS_TASK_GATE:
845 case X86_SEL_TYPE_SYS_286_INT_GATE:
846 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
847 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
848 case X86_SEL_TYPE_SYS_386_CALL_GATE:
849 case X86_SEL_TYPE_SYS_386_INT_GATE:
850 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
851 /* Call various functions to do the work. */
852 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
853
854 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
855 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
856 /* Call various functions to do the work. */
857 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
858
859 default:
860 Log(("jmpf %04x:%08x -> wrong sys selector (32-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
861 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
862 }
863}
864
865
866/**
867 * Implements far calls.
868 *
869 * @param uSel The selector.
870 * @param offSeg The segment offset.
871 * @param enmOpSize The operand size (in case we need it).
872 */
873IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize)
874{
875 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
876 VBOXSTRICTRC rcStrict;
877 uint64_t uNewRsp;
878 void *pvRet;
879
880 /*
881 * Real mode and V8086 mode are easy. The only snag seems to be that
882 * CS.limit doesn't change and the limit check is done against the current
883 * limit.
884 */
885 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
886 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
887 {
888 Assert(enmOpSize == IEMMODE_16BIT || enmOpSize == IEMMODE_32BIT);
889
890 /* Check stack first - may #SS(0). */
891 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmOpSize == IEMMODE_32BIT ? 6 : 4,
892 &pvRet, &uNewRsp);
893 if (rcStrict != VINF_SUCCESS)
894 return rcStrict;
895
896 /* Check the target address range. */
897 if (offSeg > UINT32_MAX)
898 return iemRaiseGeneralProtectionFault0(pIemCpu);
899
900 /* Everything is fine, push the return address. */
901 if (enmOpSize == IEMMODE_16BIT)
902 {
903 ((uint16_t *)pvRet)[0] = pCtx->ip + cbInstr;
904 ((uint16_t *)pvRet)[1] = pCtx->cs;
905 }
906 else
907 {
908 ((uint32_t *)pvRet)[0] = pCtx->eip + cbInstr;
909 ((uint16_t *)pvRet)[3] = pCtx->cs;
910 }
911 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pvRet, uNewRsp);
912 if (rcStrict != VINF_SUCCESS)
913 return rcStrict;
914
915 /* Branch. */
916 pCtx->rip = offSeg;
917 pCtx->cs = uSel;
918 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
919 /** @todo Does REM reset the accessed bit here to? (See on jmp far16
920 * after disabling PE.) Check with VT-x and AMD-V. */
921#ifdef IEM_VERIFICATION_MODE
922 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
923#endif
924 return VINF_SUCCESS;
925 }
926
927 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
928}
929
930
931/**
932 * Implements retf.
933 *
934 * @param enmEffOpSize The effective operand size.
935 * @param cbPop The amount of arguments to pop from the stack
936 * (bytes).
937 */
938IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
939{
940 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
941 VBOXSTRICTRC rcStrict;
942 uint64_t uNewRsp;
943
944 /*
945 * Real mode and V8086 mode are easy.
946 */
947 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
948 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
949 {
950 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
951 uint16_t const *pu16Frame;
952 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 8 : 4,
953 (void const **)&pu16Frame, &uNewRsp);
954 if (rcStrict != VINF_SUCCESS)
955 return rcStrict;
956 uint32_t uNewEip;
957 uint16_t uNewCs;
958 if (enmEffOpSize == IEMMODE_32BIT)
959 {
960 uNewCs = pu16Frame[2];
961 uNewEip = RT_MAKE_U32(pu16Frame[0], pu16Frame[1]);
962 }
963 else
964 {
965 uNewCs = pu16Frame[1];
966 uNewEip = pu16Frame[0];
967 }
968 /** @todo check how this is supposed to work if sp=0xfffe. */
969
970 /* Check the limit of the new EIP. */
971 /** @todo Intel pseudo code only does the limit check for 16-bit
972 * operands, AMD does not make any distinction. What is right? */
973 if (uNewEip > pCtx->csHid.u32Limit)
974 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
975
976 /* commit the operation. */
977 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
978 if (rcStrict != VINF_SUCCESS)
979 return rcStrict;
980 pCtx->rip = uNewEip;
981 pCtx->cs = uNewCs;
982 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
983 /** @todo do we load attribs and limit as well? */
984 if (cbPop)
985 iemRegAddToRsp(pCtx, cbPop);
986 return VINF_SUCCESS;
987 }
988
989 AssertFailed();
990 return VERR_NOT_IMPLEMENTED;
991}
992
993
994/**
995 * Implements retn.
996 *
997 * We're doing this in C because of the \#GP that might be raised if the popped
998 * program counter is out of bounds.
999 *
1000 * @param enmEffOpSize The effective operand size.
1001 * @param cbPop The amount of arguments to pop from the stack
1002 * (bytes).
1003 */
1004IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1005{
1006 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1007
1008 /* Fetch the RSP from the stack. */
1009 VBOXSTRICTRC rcStrict;
1010 RTUINT64U NewRip;
1011 RTUINT64U NewRsp;
1012 NewRsp.u = pCtx->rsp;
1013 switch (enmEffOpSize)
1014 {
1015 case IEMMODE_16BIT:
1016 NewRip.u = 0;
1017 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1018 break;
1019 case IEMMODE_32BIT:
1020 NewRip.u = 0;
1021 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1022 break;
1023 case IEMMODE_64BIT:
1024 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1025 break;
1026 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1027 }
1028 if (rcStrict != VINF_SUCCESS)
1029 return rcStrict;
1030
1031 /* Check the new RSP before loading it. */
1032 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1033 * of it. The canonical test is performed here and for call. */
1034 if (enmEffOpSize != IEMMODE_64BIT)
1035 {
1036 if (NewRip.DWords.dw0 > pCtx->csHid.u32Limit)
1037 {
1038 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->csHid.u32Limit));
1039 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1040 }
1041 }
1042 else
1043 {
1044 if (!IEM_IS_CANONICAL(NewRip.u))
1045 {
1046 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1047 return iemRaiseNotCanonical(pIemCpu);
1048 }
1049 }
1050
1051 /* Commit it. */
1052 pCtx->rip = NewRip.u;
1053 pCtx->rsp = NewRsp.u;
1054 if (cbPop)
1055 iemRegAddToRsp(pCtx, cbPop);
1056
1057 return VINF_SUCCESS;
1058}
1059
1060
1061/**
1062 * Implements leave.
1063 *
1064 * We're doing this in C because messing with the stack registers is annoying
1065 * since they depends on SS attributes.
1066 *
1067 * @param enmEffOpSize The effective operand size.
1068 */
1069IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1070{
1071 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1072
1073 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1074 RTUINT64U NewRsp;
1075 if (pCtx->ssHid.Attr.n.u1Long)
1076 {
1077 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1078 NewRsp.u = pCtx->rsp;
1079 NewRsp.Words.w0 = pCtx->bp;
1080 }
1081 else if (pCtx->ssHid.Attr.n.u1DefBig)
1082 NewRsp.u = pCtx->ebp;
1083 else
1084 NewRsp.u = pCtx->rbp;
1085
1086 /* Pop RBP according to the operand size. */
1087 VBOXSTRICTRC rcStrict;
1088 RTUINT64U NewRbp;
1089 switch (enmEffOpSize)
1090 {
1091 case IEMMODE_16BIT:
1092 NewRbp.u = pCtx->rbp;
1093 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1094 break;
1095 case IEMMODE_32BIT:
1096 NewRbp.u = 0;
1097 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1098 break;
1099 case IEMMODE_64BIT:
1100 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1101 break;
1102 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1103 }
1104 if (rcStrict != VINF_SUCCESS)
1105 return rcStrict;
1106
1107
1108 /* Commit it. */
1109 pCtx->rbp = NewRbp.u;
1110 pCtx->rsp = NewRsp.u;
1111 iemRegAddToRip(pIemCpu, cbInstr);
1112
1113 return VINF_SUCCESS;
1114}
1115
1116
1117/**
1118 * Implements int3 and int XX.
1119 *
1120 * @param u8Int The interrupt vector number.
1121 * @param fIsBpInstr Is it the breakpoint instruction.
1122 */
1123IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1124{
1125 /** @todo we should call TRPM to do this job. */
1126 VBOXSTRICTRC rcStrict;
1127 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1128
1129 /*
1130 * Real mode is easy.
1131 */
1132 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1133 && IEM_IS_REAL_MODE(pIemCpu))
1134 {
1135 /* read the IDT entry. */
1136 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Int + 3)
1137 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Int << X86_TRAP_ERR_SEL_SHIFT));
1138 RTFAR16 Idte;
1139 rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Int);
1140 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1141 return rcStrict;
1142
1143 /* push the stack frame. */
1144 uint16_t *pu16Frame;
1145 uint64_t uNewRsp;
1146 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1147 if (rcStrict != VINF_SUCCESS)
1148 return rcStrict;
1149
1150 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
1151 pu16Frame[1] = (uint16_t)pCtx->cs;
1152 pu16Frame[0] = pCtx->ip + cbInstr;
1153 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1154 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1155 return rcStrict;
1156
1157 /* load the vector address into cs:ip. */
1158 pCtx->cs = Idte.sel;
1159 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;
1160 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1161 pCtx->rip = Idte.off;
1162 pCtx->eflags.Bits.u1IF = 0;
1163 return VINF_SUCCESS;
1164 }
1165
1166 AssertFailed();
1167 return VERR_NOT_IMPLEMENTED;
1168}
1169
1170
1171/**
1172 * Implements iret.
1173 *
1174 * @param enmEffOpSize The effective operand size.
1175 */
1176IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
1177{
1178 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1179 VBOXSTRICTRC rcStrict;
1180 uint64_t uNewRsp;
1181
1182 /*
1183 * Real mode is easy, V8086 mode is relative similar.
1184 */
1185 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1186 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1187 {
1188 /* iret throws an exception if VME isn't enabled. */
1189 if ( pCtx->eflags.Bits.u1VM
1190 && !(pCtx->cr4 & X86_CR4_VME))
1191 return iemRaiseGeneralProtectionFault0(pIemCpu);
1192
1193 /* Do the stack bits, but don't commit RSP before everything checks
1194 out right. */
1195 union
1196 {
1197 uint32_t const *pu32;
1198 uint16_t const *pu16;
1199 void const *pv;
1200 } uFrame;
1201 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1202 uint16_t uNewCs;
1203 uint32_t uNewEip;
1204 uint32_t uNewFlags;
1205 if (enmEffOpSize == IEMMODE_32BIT)
1206 {
1207 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1208 if (rcStrict != VINF_SUCCESS)
1209 return rcStrict;
1210 uNewEip = uFrame.pu32[0];
1211 uNewCs = (uint16_t)uFrame.pu32[1];
1212 uNewFlags = uFrame.pu32[2];
1213 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1214 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1215 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1216 | X86_EFL_ID;
1217 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1218 }
1219 else
1220 {
1221 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1222 if (rcStrict != VINF_SUCCESS)
1223 return rcStrict;
1224 uNewEip = uFrame.pu16[0];
1225 uNewCs = uFrame.pu16[1];
1226 uNewFlags = uFrame.pu16[2];
1227 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1228 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1229 uNewFlags |= pCtx->eflags.u & (UINT16_C(0xffff0000) | X86_EFL_1);
1230 /** @todo The intel pseudo code does not indicate what happens to
1231 * reserved flags. We just ignore them. */
1232 }
1233 /** @todo Check how this is supposed to work if sp=0xfffe. */
1234
1235 /* Check the limit of the new EIP. */
1236 /** @todo Only the AMD pseudo code check the limit here, what's
1237 * right? */
1238 if (uNewEip > pCtx->csHid.u32Limit)
1239 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1240
1241 /* V8086 checks and flag adjustments */
1242 if (pCtx->eflags.Bits.u1VM)
1243 {
1244 if (pCtx->eflags.Bits.u2IOPL == 3)
1245 {
1246 /* Preserve IOPL and clear RF. */
1247 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1248 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1249 }
1250 else if ( enmEffOpSize == IEMMODE_16BIT
1251 && ( !(uNewFlags & X86_EFL_IF)
1252 || !pCtx->eflags.Bits.u1VIP )
1253 && !(uNewFlags & X86_EFL_TF) )
1254 {
1255 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1256 uNewFlags &= ~X86_EFL_VIF;
1257 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1258 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1259 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1260 }
1261 else
1262 return iemRaiseGeneralProtectionFault0(pIemCpu);
1263 }
1264
1265 /* commit the operation. */
1266 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1267 if (rcStrict != VINF_SUCCESS)
1268 return rcStrict;
1269 pCtx->rip = uNewEip;
1270 pCtx->cs = uNewCs;
1271 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
1272 /** @todo do we load attribs and limit as well? */
1273 Assert(uNewFlags & X86_EFL_1);
1274 pCtx->eflags.u = uNewFlags;
1275
1276 return VINF_SUCCESS;
1277 }
1278
1279
1280 AssertFailed();
1281 return VERR_NOT_IMPLEMENTED;
1282}
1283
1284
1285/**
1286 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
1287 *
1288 * @param iSegReg The segment register number (valid).
1289 * @param uSel The new selector value.
1290 */
1291IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
1292{
1293 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1294 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
1295 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
1296
1297 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
1298
1299 /*
1300 * Real mode and V8086 mode are easy.
1301 */
1302 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1303 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1304 {
1305 *pSel = uSel;
1306 pHid->u64Base = (uint32_t)uSel << 4;
1307 /** @todo Does the CPU actually load limits and attributes in the
1308 * real/V8086 mode segment load case? It doesn't for CS in far
1309 * jumps... Affects unreal mode. */
1310 pHid->u32Limit = 0xffff;
1311 pHid->Attr.u = 0;
1312 pHid->Attr.n.u1Present = 1;
1313 pHid->Attr.n.u1DescType = 1;
1314 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
1315 ? X86_SEL_TYPE_RW
1316 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
1317
1318 iemRegAddToRip(pIemCpu, cbInstr);
1319 return VINF_SUCCESS;
1320 }
1321
1322 /*
1323 * Protected mode.
1324 *
1325 * Check if it's a null segment selector value first, that's OK for DS, ES,
1326 * FS and GS. If not null, then we have to load and parse the descriptor.
1327 */
1328 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
1329 {
1330 if (iSegReg == X86_SREG_SS)
1331 {
1332 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
1333 || pIemCpu->uCpl != 0
1334 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
1335 {
1336 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
1337 return iemRaiseGeneralProtectionFault0(pIemCpu);
1338 }
1339
1340 /* In 64-bit kernel mode, the stack can be 0 because of the way
1341 interrupts are dispatched when in kernel ctx. Just load the
1342 selector value into the register and leave the hidden bits
1343 as is. */
1344 *pSel = uSel;
1345 iemRegAddToRip(pIemCpu, cbInstr);
1346 return VINF_SUCCESS;
1347 }
1348
1349 *pSel = uSel; /* Not RPL, remember :-) */
1350 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
1351 && iSegReg != X86_SREG_FS
1352 && iSegReg != X86_SREG_GS)
1353 {
1354 /** @todo figure out what this actually does, it works. Needs
1355 * testcase! */
1356 pHid->Attr.u = 0;
1357 pHid->Attr.n.u1Present = 1;
1358 pHid->Attr.n.u1Long = 1;
1359 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
1360 pHid->Attr.n.u2Dpl = 3;
1361 pHid->u32Limit = 0;
1362 pHid->u64Base = 0;
1363 }
1364 else
1365 {
1366 pHid->Attr.u = 0;
1367 pHid->u32Limit = 0;
1368 pHid->u64Base = 0;
1369 }
1370 iemRegAddToRip(pIemCpu, cbInstr);
1371 return VINF_SUCCESS;
1372 }
1373
1374 /* Fetch the descriptor. */
1375 IEMSELDESC Desc;
1376 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1377 if (rcStrict != VINF_SUCCESS)
1378 return rcStrict;
1379
1380 /* Check GPs first. */
1381 if (!Desc.Legacy.Gen.u1DescType)
1382 {
1383 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
1384 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1385 }
1386 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
1387 {
1388 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1389 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1390 {
1391 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
1392 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1393 }
1394 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1395 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1396 {
1397 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
1398 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1399 }
1400 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
1401 {
1402 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
1403 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1404 }
1405 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
1406 {
1407 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1408 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1409 }
1410 }
1411 else
1412 {
1413 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
1414 {
1415 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
1416 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1417 }
1418 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1419 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1420 {
1421#if 0 /* this is what intel says. */
1422 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
1423 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
1424 {
1425 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
1426 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
1427 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1428 }
1429#else /* this is what makes more sense. */
1430 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
1431 {
1432 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
1433 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
1434 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1435 }
1436 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
1437 {
1438 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
1439 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
1440 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1441 }
1442#endif
1443 }
1444 }
1445
1446 /* Is it there? */
1447 if (!Desc.Legacy.Gen.u1Present)
1448 {
1449 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
1450 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1451 }
1452
1453 /* The the base and limit. */
1454 uint64_t u64Base;
1455 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
1456 if (Desc.Legacy.Gen.u1Granularity)
1457 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1458
1459 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
1460 && iSegReg < X86_SREG_FS)
1461 u64Base = 0;
1462 else
1463 u64Base = X86DESC_BASE(Desc.Legacy);
1464
1465 /*
1466 * Ok, everything checked out fine. Now set the accessed bit before
1467 * committing the result into the registers.
1468 */
1469 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1470 {
1471 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1472 if (rcStrict != VINF_SUCCESS)
1473 return rcStrict;
1474 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1475 }
1476
1477 /* commit */
1478 *pSel = uSel;
1479 pHid->Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff); /** @todo do we have a define for 0xf0ff? */
1480 pHid->u32Limit = cbLimit;
1481 pHid->u64Base = u64Base;
1482
1483 /** @todo check if the hidden bits are loaded correctly for 64-bit
1484 * mode. */
1485
1486 iemRegAddToRip(pIemCpu, cbInstr);
1487 return VINF_SUCCESS;
1488}
1489
1490
1491/**
1492 * Implements 'mov SReg, r/m'.
1493 *
1494 * @param iSegReg The segment register number (valid).
1495 * @param uSel The new selector value.
1496 */
1497IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
1498{
1499 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1500 if (rcStrict == VINF_SUCCESS)
1501 {
1502 if (iSegReg == X86_SREG_SS)
1503 {
1504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1505 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
1506 }
1507 }
1508 return rcStrict;
1509}
1510
1511
1512/**
1513 * Implements 'pop SReg'.
1514 *
1515 * @param iSegReg The segment register number (valid).
1516 * @param enmEffOpSize The efficient operand size (valid).
1517 */
1518IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
1519{
1520 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1521 VBOXSTRICTRC rcStrict;
1522
1523 /*
1524 * Read the selector off the stack and join paths with mov ss, reg.
1525 */
1526 RTUINT64U TmpRsp;
1527 TmpRsp.u = pCtx->rsp;
1528 switch (enmEffOpSize)
1529 {
1530 case IEMMODE_16BIT:
1531 {
1532 uint16_t uSel;
1533 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
1534 if (rcStrict == VINF_SUCCESS)
1535 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1536 break;
1537 }
1538
1539 case IEMMODE_32BIT:
1540 {
1541 uint32_t u32Value;
1542 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
1543 if (rcStrict == VINF_SUCCESS)
1544 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
1545 break;
1546 }
1547
1548 case IEMMODE_64BIT:
1549 {
1550 uint64_t u64Value;
1551 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
1552 if (rcStrict == VINF_SUCCESS)
1553 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
1554 break;
1555 }
1556 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1557 }
1558
1559 /*
1560 * Commit the stack on success.
1561 */
1562 if (rcStrict == VINF_SUCCESS)
1563 {
1564 pCtx->rsp = TmpRsp.u;
1565 if (iSegReg == X86_SREG_SS)
1566 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
1567 }
1568 return rcStrict;
1569}
1570
1571
1572/**
1573 * Implements lgs, lfs, les, lds & lss.
1574 */
1575IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
1576 uint16_t, uSel,
1577 uint64_t, offSeg,
1578 uint8_t, iSegReg,
1579 uint8_t, iGReg,
1580 IEMMODE, enmEffOpSize)
1581{
1582 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1583 VBOXSTRICTRC rcStrict;
1584
1585 /*
1586 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
1587 */
1588 /** @todo verify and test that mov, pop and lXs works the segment
1589 * register loading in the exact same way. */
1590 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1591 if (rcStrict == VINF_SUCCESS)
1592 {
1593 switch (enmEffOpSize)
1594 {
1595 case IEMMODE_16BIT:
1596 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1597 break;
1598 case IEMMODE_32BIT:
1599 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1600 break;
1601 case IEMMODE_64BIT:
1602 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1603 break;
1604 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1605 }
1606 }
1607
1608 return rcStrict;
1609}
1610
1611
1612/**
1613 * Implements lgdt.
1614 *
1615 * @param iEffSeg The segment of the new ldtr contents
1616 * @param GCPtrEffSrc The address of the new ldtr contents.
1617 * @param enmEffOpSize The effective operand size.
1618 */
1619IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
1620{
1621 if (pIemCpu->uCpl != 0)
1622 return iemRaiseGeneralProtectionFault0(pIemCpu);
1623 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
1624
1625 /*
1626 * Fetch the limit and base address.
1627 */
1628 uint16_t cbLimit;
1629 RTGCPTR GCPtrBase;
1630 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
1631 if (rcStrict == VINF_SUCCESS)
1632 {
1633 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1634 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
1635 else
1636 {
1637 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1638 pCtx->gdtr.cbGdt = cbLimit;
1639 pCtx->gdtr.pGdt = GCPtrBase;
1640 }
1641 if (rcStrict == VINF_SUCCESS)
1642 iemRegAddToRip(pIemCpu, cbInstr);
1643 }
1644 return rcStrict;
1645}
1646
1647
1648/**
1649 * Implements lidt.
1650 *
1651 * @param iEffSeg The segment of the new ldtr contents
1652 * @param GCPtrEffSrc The address of the new ldtr contents.
1653 * @param enmEffOpSize The effective operand size.
1654 */
1655IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
1656{
1657 if (pIemCpu->uCpl != 0)
1658 return iemRaiseGeneralProtectionFault0(pIemCpu);
1659 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
1660
1661 /*
1662 * Fetch the limit and base address.
1663 */
1664 uint16_t cbLimit;
1665 RTGCPTR GCPtrBase;
1666 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
1667 if (rcStrict == VINF_SUCCESS)
1668 {
1669 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1670 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
1671 else
1672 {
1673 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1674 pCtx->idtr.cbIdt = cbLimit;
1675 pCtx->idtr.pIdt = GCPtrBase;
1676 }
1677 if (rcStrict == VINF_SUCCESS)
1678 iemRegAddToRip(pIemCpu, cbInstr);
1679 }
1680 return rcStrict;
1681}
1682
1683
1684/**
1685 * Implements lldt.
1686 *
1687 * @param uNewLdt The new LDT selector value.
1688 */
1689IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
1690{
1691 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1692
1693 /*
1694 * Check preconditions.
1695 */
1696 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1697 {
1698 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
1699 return iemRaiseUndefinedOpcode(pIemCpu);
1700 }
1701 if (pIemCpu->uCpl != 0)
1702 {
1703 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
1704 return iemRaiseGeneralProtectionFault0(pIemCpu);
1705 }
1706 if (uNewLdt & X86_SEL_LDT)
1707 {
1708 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
1709 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & (X86_SEL_MASK | X86_SEL_LDT));
1710 }
1711
1712 /*
1713 * Now, loading a NULL selector is easy.
1714 */
1715 if ((uNewLdt & X86_SEL_MASK) == 0)
1716 {
1717 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
1718 /** @todo check if the actual value is loaded or if it's always 0. */
1719 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1720 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), 0);
1721 else
1722 pCtx->ldtr = 0;
1723 pCtx->ldtrHid.Attr.u = 0;
1724 pCtx->ldtrHid.u64Base = 0;
1725 pCtx->ldtrHid.u32Limit = 0;
1726
1727 iemRegAddToRip(pIemCpu, cbInstr);
1728 return VINF_SUCCESS;
1729 }
1730
1731 /*
1732 * Read the descriptor.
1733 */
1734 IEMSELDESC Desc;
1735 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
1736 if (rcStrict != VINF_SUCCESS)
1737 return rcStrict;
1738
1739 /* Check GPs first. */
1740 if (Desc.Legacy.Gen.u1DescType)
1741 {
1742 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
1743 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
1744 }
1745 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1746 {
1747 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
1748 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
1749 }
1750 uint64_t u64Base;
1751 if (!IEM_IS_LONG_MODE(pIemCpu))
1752 u64Base = X86DESC_BASE(Desc.Legacy);
1753 else
1754 {
1755 if (Desc.Long.Gen.u5Zeros)
1756 {
1757 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
1758 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
1759 }
1760
1761 u64Base = X86DESC64_BASE(Desc.Long);
1762 if (!IEM_IS_CANONICAL(u64Base))
1763 {
1764 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
1765 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
1766 }
1767 }
1768
1769 /* NP */
1770 if (!Desc.Legacy.Gen.u1Present)
1771 {
1772 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
1773 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
1774 }
1775
1776 /*
1777 * It checks out alright, update the registers.
1778 */
1779/** @todo check if the actual value is loaded or if the RPL is dropped */
1780 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1781 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK);
1782 else
1783 pCtx->ldtr = uNewLdt & X86_SEL_MASK;
1784 pCtx->ldtrHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1785 pCtx->ldtrHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
1786 pCtx->ldtrHid.u64Base = u64Base;
1787
1788 iemRegAddToRip(pIemCpu, cbInstr);
1789 return VINF_SUCCESS;
1790}
1791
1792
1793/**
1794 * Implements lldt.
1795 *
1796 * @param uNewLdt The new LDT selector value.
1797 */
1798IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
1799{
1800 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1801
1802 /*
1803 * Check preconditions.
1804 */
1805 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1806 {
1807 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
1808 return iemRaiseUndefinedOpcode(pIemCpu);
1809 }
1810 if (pIemCpu->uCpl != 0)
1811 {
1812 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
1813 return iemRaiseGeneralProtectionFault0(pIemCpu);
1814 }
1815 if (uNewTr & X86_SEL_LDT)
1816 {
1817 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
1818 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & (X86_SEL_MASK | X86_SEL_LDT));
1819 }
1820 if ((uNewTr & X86_SEL_MASK) == 0)
1821 {
1822 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
1823 return iemRaiseGeneralProtectionFault0(pIemCpu);
1824 }
1825
1826 /*
1827 * Read the descriptor.
1828 */
1829 IEMSELDESC Desc;
1830 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
1831 if (rcStrict != VINF_SUCCESS)
1832 return rcStrict;
1833
1834 /* Check GPs first. */
1835 if (Desc.Legacy.Gen.u1DescType)
1836 {
1837 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
1838 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
1839 }
1840 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
1841 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
1842 || IEM_IS_LONG_MODE(pIemCpu)) )
1843 {
1844 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
1845 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
1846 }
1847 uint64_t u64Base;
1848 if (!IEM_IS_LONG_MODE(pIemCpu))
1849 u64Base = X86DESC_BASE(Desc.Legacy);
1850 else
1851 {
1852 if (Desc.Long.Gen.u5Zeros)
1853 {
1854 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
1855 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
1856 }
1857
1858 u64Base = X86DESC64_BASE(Desc.Long);
1859 if (!IEM_IS_CANONICAL(u64Base))
1860 {
1861 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
1862 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
1863 }
1864 }
1865
1866 /* NP */
1867 if (!Desc.Legacy.Gen.u1Present)
1868 {
1869 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
1870 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
1871 }
1872
1873 /*
1874 * Set it busy.
1875 * Note! Intel says this should lock down the whole descriptor, but we'll
1876 * restrict our selves to 32-bit for now due to lack of inline
1877 * assembly and such.
1878 */
1879 void *pvDesc;
1880 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
1881 if (rcStrict != VINF_SUCCESS)
1882 return rcStrict;
1883 switch ((uintptr_t)pvDesc & 3)
1884 {
1885 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
1886 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
1887 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
1888 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
1889 }
1890 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
1891 if (rcStrict != VINF_SUCCESS)
1892 return rcStrict;
1893 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
1894
1895 /*
1896 * It checks out alright, update the registers.
1897 */
1898/** @todo check if the actual value is loaded or if the RPL is dropped */
1899 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1900 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK);
1901 else
1902 pCtx->tr = uNewTr & X86_SEL_MASK;
1903 pCtx->trHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
1904 pCtx->trHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
1905 pCtx->trHid.u64Base = u64Base;
1906
1907 iemRegAddToRip(pIemCpu, cbInstr);
1908 return VINF_SUCCESS;
1909}
1910
1911
1912/**
1913 * Implements mov GReg,CRx.
1914 *
1915 * @param iGReg The general register to store the CRx value in.
1916 * @param iCrReg The CRx register to read (valid).
1917 */
1918IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
1919{
1920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1921 if (pIemCpu->uCpl != 0)
1922 return iemRaiseGeneralProtectionFault0(pIemCpu);
1923 Assert(!pCtx->eflags.Bits.u1VM);
1924
1925 /* read it */
1926 uint64_t crX;
1927 switch (iCrReg)
1928 {
1929 case 0: crX = pCtx->cr0; break;
1930 case 2: crX = pCtx->cr2; break;
1931 case 3: crX = pCtx->cr3; break;
1932 case 4: crX = pCtx->cr4; break;
1933 case 8:
1934 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1935 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
1936 else
1937 crX = 0xff;
1938 break;
1939 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
1940 }
1941
1942 /* store it */
1943 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1944 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
1945 else
1946 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
1947
1948 iemRegAddToRip(pIemCpu, cbInstr);
1949 return VINF_SUCCESS;
1950}
1951
1952
1953/**
1954 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
1955 *
1956 * @param iCrReg The CRx register to write (valid).
1957 * @param uNewCrX The new value.
1958 */
1959IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
1960{
1961 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1962 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1963 VBOXSTRICTRC rcStrict;
1964 int rc;
1965
1966 /*
1967 * Try store it.
1968 * Unfortunately, CPUM only does a tiny bit of the work.
1969 */
1970 switch (iCrReg)
1971 {
1972 case 0:
1973 {
1974 /*
1975 * Perform checks.
1976 */
1977 uint64_t const uOldCrX = pCtx->cr0;
1978 uNewCrX |= X86_CR0_ET; /* hardcoded */
1979
1980 /* Check for reserved bits. */
1981 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
1982 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
1983 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
1984 if (uNewCrX & ~(uint64_t)fValid)
1985 {
1986 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
1987 return iemRaiseGeneralProtectionFault0(pIemCpu);
1988 }
1989
1990 /* Check for invalid combinations. */
1991 if ( (uNewCrX & X86_CR0_PG)
1992 && !(uNewCrX & X86_CR0_PE) )
1993 {
1994 Log(("Trying to set CR0.PG without CR0.PE\n"));
1995 return iemRaiseGeneralProtectionFault0(pIemCpu);
1996 }
1997
1998 if ( !(uNewCrX & X86_CR0_CD)
1999 && (uNewCrX & X86_CR0_NW) )
2000 {
2001 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
2002 return iemRaiseGeneralProtectionFault0(pIemCpu);
2003 }
2004
2005 /* Long mode consistency checks. */
2006 if ( (uNewCrX & X86_CR0_PG)
2007 && !(uOldCrX & X86_CR0_PG)
2008 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2009 {
2010 if (!(pCtx->cr4 & X86_CR4_PAE))
2011 {
2012 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
2013 return iemRaiseGeneralProtectionFault0(pIemCpu);
2014 }
2015 if (pCtx->csHid.Attr.n.u1Long)
2016 {
2017 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
2018 return iemRaiseGeneralProtectionFault0(pIemCpu);
2019 }
2020 }
2021
2022 /** @todo check reserved PDPTR bits as AMD states. */
2023
2024 /*
2025 * Change CR0.
2026 */
2027 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2028 {
2029 rc = CPUMSetGuestCR0(pVCpu, uNewCrX);
2030 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
2031 }
2032 else
2033 pCtx->cr0 = uNewCrX;
2034 Assert(pCtx->cr0 == uNewCrX);
2035
2036 /*
2037 * Change EFER.LMA if entering or leaving long mode.
2038 */
2039 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
2040 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2041 {
2042 uint64_t NewEFER = pCtx->msrEFER;
2043 if (uNewCrX & X86_CR0_PG)
2044 NewEFER |= MSR_K6_EFER_LME;
2045 else
2046 NewEFER &= ~MSR_K6_EFER_LME;
2047
2048 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2049 CPUMSetGuestEFER(pVCpu, NewEFER);
2050 else
2051 pCtx->msrEFER = NewEFER;
2052 Assert(pCtx->msrEFER == NewEFER);
2053 }
2054
2055 /*
2056 * Inform PGM.
2057 */
2058 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2059 {
2060 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
2061 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
2062 {
2063 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
2064 AssertRCReturn(rc, rc);
2065 /* ignore informational status codes */
2066 }
2067 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2068 /** @todo Status code management. */
2069 }
2070 else
2071 rcStrict = VINF_SUCCESS;
2072 break;
2073 }
2074
2075 /*
2076 * CR2 can be changed without any restrictions.
2077 */
2078 case 2:
2079 pCtx->cr2 = uNewCrX;
2080 rcStrict = VINF_SUCCESS;
2081 break;
2082
2083 /*
2084 * CR3 is relatively simple, although AMD and Intel have different
2085 * accounts of how setting reserved bits are handled. We take intel's
2086 * word for the lower bits and AMD's for the high bits (63:52).
2087 */
2088 /** @todo Testcase: Setting reserved bits in CR3, especially before
2089 * enabling paging. */
2090 case 3:
2091 {
2092 /* check / mask the value. */
2093 if (uNewCrX & UINT64_C(0xfff0000000000000))
2094 {
2095 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
2096 return iemRaiseGeneralProtectionFault0(pIemCpu);
2097 }
2098
2099 uint64_t fValid;
2100 if ( (pCtx->cr4 & X86_CR4_PAE)
2101 && (pCtx->msrEFER & MSR_K6_EFER_LME))
2102 fValid = UINT64_C(0x000ffffffffff014);
2103 else if (pCtx->cr4 & X86_CR4_PAE)
2104 fValid = UINT64_C(0xfffffff4);
2105 else
2106 fValid = UINT64_C(0xfffff014);
2107 if (uNewCrX & ~fValid)
2108 {
2109 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
2110 uNewCrX, uNewCrX & ~fValid));
2111 uNewCrX &= fValid;
2112 }
2113
2114 /** @todo If we're in PAE mode we should check the PDPTRs for
2115 * invalid bits. */
2116
2117 /* Make the change. */
2118 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2119 {
2120 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
2121 AssertRCSuccessReturn(rc, rc);
2122 }
2123 else
2124 pCtx->cr3 = uNewCrX;
2125
2126 /* Inform PGM. */
2127 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2128 {
2129 if (pCtx->cr0 & X86_CR0_PG)
2130 {
2131 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
2132 AssertRCReturn(rc, rc);
2133 /* ignore informational status codes */
2134 /** @todo status code management */
2135 }
2136 }
2137 rcStrict = VINF_SUCCESS;
2138 break;
2139 }
2140
2141 /*
2142 * CR4 is a bit more tedious as there are bits which cannot be cleared
2143 * under some circumstances and such.
2144 */
2145 case 4:
2146 {
2147 uint64_t const uOldCrX = pCtx->cr0;
2148
2149 /* reserved bits */
2150 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
2151 | X86_CR4_TSD | X86_CR4_DE
2152 | X86_CR4_PSE | X86_CR4_PAE
2153 | X86_CR4_MCE | X86_CR4_PGE
2154 | X86_CR4_PCE | X86_CR4_OSFSXR
2155 | X86_CR4_OSXMMEEXCPT;
2156 //if (xxx)
2157 // fValid |= X86_CR4_VMXE;
2158 //if (xxx)
2159 // fValid |= X86_CR4_OSXSAVE;
2160 if (uNewCrX & ~(uint64_t)fValid)
2161 {
2162 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2163 return iemRaiseGeneralProtectionFault0(pIemCpu);
2164 }
2165
2166 /* long mode checks. */
2167 if ( (uOldCrX & X86_CR4_PAE)
2168 && !(uNewCrX & X86_CR4_PAE)
2169 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
2170 {
2171 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
2172 return iemRaiseGeneralProtectionFault0(pIemCpu);
2173 }
2174
2175
2176 /*
2177 * Change it.
2178 */
2179 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2180 {
2181 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
2182 AssertRCSuccessReturn(rc, rc);
2183 }
2184 else
2185 pCtx->cr4 = uNewCrX;
2186 Assert(pCtx->cr4 == uNewCrX);
2187
2188 /*
2189 * Notify SELM and PGM.
2190 */
2191 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2192 {
2193 /* SELM - VME may change things wrt to the TSS shadowing. */
2194 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
2195 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2196
2197 /* PGM - flushing and mode. */
2198 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
2199 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
2200 {
2201 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
2202 AssertRCReturn(rc, rc);
2203 /* ignore informational status codes */
2204 }
2205 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2206 /** @todo Status code management. */
2207 }
2208 else
2209 rcStrict = VINF_SUCCESS;
2210 break;
2211 }
2212
2213 /*
2214 * CR8 maps to the APIC TPR.
2215 */
2216 case 8:
2217 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2218 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
2219 else
2220 rcStrict = VINF_SUCCESS;
2221 break;
2222
2223 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2224 }
2225
2226 /*
2227 * Advance the RIP on success.
2228 */
2229 /** @todo Status code management. */
2230 if (rcStrict == VINF_SUCCESS)
2231 iemRegAddToRip(pIemCpu, cbInstr);
2232 return rcStrict;
2233
2234}
2235
2236
2237/**
2238 * Implements mov CRx,GReg.
2239 *
2240 * @param iCrReg The CRx register to write (valid).
2241 * @param iGReg The general register to load the DRx value from.
2242 */
2243IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
2244{
2245 if (pIemCpu->uCpl != 0)
2246 return iemRaiseGeneralProtectionFault0(pIemCpu);
2247 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2248
2249 /*
2250 * Read the new value from the source register and call common worker.
2251 */
2252 uint64_t uNewCrX;
2253 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2254 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
2255 else
2256 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
2257 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
2258}
2259
2260
2261/**
2262 * Implements 'LMSW r/m16'
2263 *
2264 * @param u16NewMsw The new value.
2265 */
2266IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
2267{
2268 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2269
2270 if (pIemCpu->uCpl != 0)
2271 return iemRaiseGeneralProtectionFault0(pIemCpu);
2272 Assert(!pCtx->eflags.Bits.u1VM);
2273
2274 /*
2275 * Compose the new CR0 value and call common worker.
2276 */
2277 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2278 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2279 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
2280}
2281
2282
2283/**
2284 * Implements 'CLTS'.
2285 */
2286IEM_CIMPL_DEF_0(iemCImpl_clts)
2287{
2288 if (pIemCpu->uCpl != 0)
2289 return iemRaiseGeneralProtectionFault0(pIemCpu);
2290
2291 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2292 uint64_t uNewCr0 = pCtx->cr0;
2293 uNewCr0 &= ~X86_CR0_TS;
2294 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
2295}
2296
2297
2298/**
2299 * Implements mov GReg,DRx.
2300 *
2301 * @param iGReg The general register to store the DRx value in.
2302 * @param iDrReg The DRx register to read (0-7).
2303 */
2304IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
2305{
2306 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2307
2308 /*
2309 * Check preconditions.
2310 */
2311
2312 /* Raise GPs. */
2313 if (pIemCpu->uCpl != 0)
2314 return iemRaiseGeneralProtectionFault0(pIemCpu);
2315 Assert(!pCtx->eflags.Bits.u1VM);
2316
2317 if ( (iDrReg == 4 || iDrReg == 5)
2318 && (pCtx->cr4 & X86_CR4_DE) )
2319 {
2320 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
2321 return iemRaiseGeneralProtectionFault0(pIemCpu);
2322 }
2323
2324 /* Raise #DB if general access detect is enabled. */
2325 if (pCtx->dr[7] & X86_DR7_GD)
2326 {
2327 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
2328 return iemRaiseDebugException(pIemCpu);
2329 }
2330
2331 /*
2332 * Read the debug register and store it in the specified general register.
2333 */
2334 uint64_t drX;
2335 switch (iDrReg)
2336 {
2337 case 0: drX = pCtx->dr[0]; break;
2338 case 1: drX = pCtx->dr[1]; break;
2339 case 2: drX = pCtx->dr[2]; break;
2340 case 3: drX = pCtx->dr[3]; break;
2341 case 6:
2342 case 4:
2343 drX = pCtx->dr[6];
2344 drX &= ~RT_BIT_32(12);
2345 drX |= UINT32_C(0xffff0ff0);
2346 break;
2347 case 7:
2348 case 5:
2349 drX = pCtx->dr[7];
2350 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
2351 drX |= RT_BIT_32(10);
2352 break;
2353 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2354 }
2355
2356 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2357 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
2358 else
2359 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
2360
2361 iemRegAddToRip(pIemCpu, cbInstr);
2362 return VINF_SUCCESS;
2363}
2364
2365
2366/**
2367 * Implements mov DRx,GReg.
2368 *
2369 * @param iDrReg The DRx register to write (valid).
2370 * @param iGReg The general register to load the DRx value from.
2371 */
2372IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
2373{
2374 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2375
2376 /*
2377 * Check preconditions.
2378 */
2379 if (pIemCpu->uCpl != 0)
2380 return iemRaiseGeneralProtectionFault0(pIemCpu);
2381 Assert(!pCtx->eflags.Bits.u1VM);
2382
2383 if ( (iDrReg == 4 || iDrReg == 5)
2384 && (pCtx->cr4 & X86_CR4_DE) )
2385 {
2386 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
2387 return iemRaiseGeneralProtectionFault0(pIemCpu);
2388 }
2389
2390 /* Raise #DB if general access detect is enabled. */
2391 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
2392 * \#GP? */
2393 if (pCtx->dr[7] & X86_DR7_GD)
2394 {
2395 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
2396 return iemRaiseDebugException(pIemCpu);
2397 }
2398
2399 /*
2400 * Read the new value from the source register.
2401 */
2402 uint64_t uNewDrX;
2403 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2404 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
2405 else
2406 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
2407
2408 /*
2409 * Adjust it.
2410 */
2411 switch (iDrReg)
2412 {
2413 case 0:
2414 case 1:
2415 case 2:
2416 case 3:
2417 /* nothing to adjust */
2418 break;
2419
2420 case 6:
2421 case 4:
2422 if (uNewDrX & UINT64_C(0xffffffff00000000))
2423 {
2424 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
2425 return iemRaiseGeneralProtectionFault0(pIemCpu);
2426 }
2427 uNewDrX &= ~RT_BIT_32(12);
2428 uNewDrX |= UINT32_C(0xffff0ff0);
2429 break;
2430
2431 case 7:
2432 case 5:
2433 if (uNewDrX & UINT64_C(0xffffffff00000000))
2434 {
2435 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
2436 return iemRaiseGeneralProtectionFault0(pIemCpu);
2437 }
2438 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
2439 uNewDrX |= RT_BIT_32(10);
2440 break;
2441
2442 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2443 }
2444
2445 /*
2446 * Do the actual setting.
2447 */
2448 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2449 {
2450 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
2451 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
2452 }
2453 else
2454 pCtx->dr[iDrReg] = uNewDrX;
2455
2456 iemRegAddToRip(pIemCpu, cbInstr);
2457 return VINF_SUCCESS;
2458}
2459
2460
2461/**
2462 * Implements RDTSC.
2463 */
2464IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
2465{
2466 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2467
2468 /*
2469 * Check preconditions.
2470 */
2471 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
2472 return iemRaiseUndefinedOpcode(pIemCpu);
2473
2474 if ( (pCtx->cr4 & X86_CR4_TSD)
2475 && pIemCpu->uCpl != 0)
2476 {
2477 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
2478 return iemRaiseGeneralProtectionFault0(pIemCpu);
2479 }
2480
2481 /*
2482 * Do the job.
2483 */
2484 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
2485 pCtx->rax = (uint32_t)uTicks;
2486 pCtx->rdx = uTicks >> 32;
2487
2488 iemRegAddToRip(pIemCpu, cbInstr);
2489 return VINF_SUCCESS;
2490}
2491
2492
2493/**
2494 * Implements 'IN eAX, port'.
2495 *
2496 * @param u16Port The source port.
2497 * @param cbReg The register size.
2498 */
2499IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
2500{
2501 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2502
2503 /*
2504 * CPL check
2505 */
2506 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
2507 if (rcStrict != VINF_SUCCESS)
2508 return rcStrict;
2509
2510 /*
2511 * Perform the I/O.
2512 */
2513 uint32_t u32Value;
2514 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2515 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
2516 else
2517 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
2518 if (IOM_SUCCESS(rcStrict))
2519 {
2520 switch (cbReg)
2521 {
2522 case 1: pCtx->al = (uint8_t)u32Value; break;
2523 case 2: pCtx->ax = (uint16_t)u32Value; break;
2524 case 4: pCtx->rax = u32Value; break;
2525 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
2526 }
2527 iemRegAddToRip(pIemCpu, cbInstr);
2528 pIemCpu->cPotentialExits++;
2529 }
2530 /** @todo massage rcStrict. */
2531 return rcStrict;
2532}
2533
2534
2535/**
2536 * Implements 'IN eAX, DX'.
2537 *
2538 * @param cbReg The register size.
2539 */
2540IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
2541{
2542 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
2543}
2544
2545
2546/**
2547 * Implements 'OUT port, eAX'.
2548 *
2549 * @param u16Port The destination port.
2550 * @param cbReg The register size.
2551 */
2552IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
2553{
2554 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2555
2556 /*
2557 * CPL check
2558 */
2559 if ( (pCtx->cr0 & X86_CR0_PE)
2560 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
2561 || pCtx->eflags.Bits.u1VM) )
2562 {
2563 /** @todo I/O port permission bitmap check */
2564 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
2565 }
2566
2567 /*
2568 * Perform the I/O.
2569 */
2570 uint32_t u32Value;
2571 switch (cbReg)
2572 {
2573 case 1: u32Value = pCtx->al; break;
2574 case 2: u32Value = pCtx->ax; break;
2575 case 4: u32Value = pCtx->eax; break;
2576 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
2577 }
2578 VBOXSTRICTRC rc;
2579 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2580 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
2581 else
2582 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
2583 if (IOM_SUCCESS(rc))
2584 {
2585 iemRegAddToRip(pIemCpu, cbInstr);
2586 pIemCpu->cPotentialExits++;
2587 /** @todo massage rc. */
2588 }
2589 return rc;
2590}
2591
2592
2593/**
2594 * Implements 'OUT DX, eAX'.
2595 *
2596 * @param cbReg The register size.
2597 */
2598IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
2599{
2600 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
2601}
2602
2603
2604/**
2605 * Implements 'CLI'.
2606 */
2607IEM_CIMPL_DEF_0(iemCImpl_cli)
2608{
2609 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2610
2611 if (pCtx->cr0 & X86_CR0_PE)
2612 {
2613 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
2614 if (!pCtx->eflags.Bits.u1VM)
2615 {
2616 if (pIemCpu->uCpl <= uIopl)
2617 pCtx->eflags.Bits.u1IF = 0;
2618 else if ( pIemCpu->uCpl == 3
2619 && (pCtx->cr4 & X86_CR4_PVI) )
2620 pCtx->eflags.Bits.u1VIF = 0;
2621 else
2622 return iemRaiseGeneralProtectionFault0(pIemCpu);
2623 }
2624 /* V8086 */
2625 else if (uIopl == 3)
2626 pCtx->eflags.Bits.u1IF = 0;
2627 else if ( uIopl < 3
2628 && (pCtx->cr4 & X86_CR4_VME) )
2629 pCtx->eflags.Bits.u1VIF = 0;
2630 else
2631 return iemRaiseGeneralProtectionFault0(pIemCpu);
2632 }
2633 /* real mode */
2634 else
2635 pCtx->eflags.Bits.u1IF = 0;
2636 iemRegAddToRip(pIemCpu, cbInstr);
2637 return VINF_SUCCESS;
2638}
2639
2640
2641/**
2642 * Implements 'STI'.
2643 */
2644IEM_CIMPL_DEF_0(iemCImpl_sti)
2645{
2646 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2647
2648 if (pCtx->cr0 & X86_CR0_PE)
2649 {
2650 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
2651 if (!pCtx->eflags.Bits.u1VM)
2652 {
2653 if (pIemCpu->uCpl <= uIopl)
2654 pCtx->eflags.Bits.u1IF = 1;
2655 else if ( pIemCpu->uCpl == 3
2656 && (pCtx->cr4 & X86_CR4_PVI)
2657 && !pCtx->eflags.Bits.u1VIP )
2658 pCtx->eflags.Bits.u1VIF = 1;
2659 else
2660 return iemRaiseGeneralProtectionFault0(pIemCpu);
2661 }
2662 /* V8086 */
2663 else if (uIopl == 3)
2664 pCtx->eflags.Bits.u1IF = 1;
2665 else if ( uIopl < 3
2666 && (pCtx->cr4 & X86_CR4_VME)
2667 && !pCtx->eflags.Bits.u1VIP )
2668 pCtx->eflags.Bits.u1VIF = 1;
2669 else
2670 return iemRaiseGeneralProtectionFault0(pIemCpu);
2671 }
2672 /* real mode */
2673 else
2674 pCtx->eflags.Bits.u1IF = 1;
2675
2676 iemRegAddToRip(pIemCpu, cbInstr);
2677 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2678 return VINF_SUCCESS;
2679}
2680
2681
2682/**
2683 * Implements 'HLT'.
2684 */
2685IEM_CIMPL_DEF_0(iemCImpl_hlt)
2686{
2687 if (pIemCpu->uCpl != 0)
2688 return iemRaiseGeneralProtectionFault0(pIemCpu);
2689 iemRegAddToRip(pIemCpu, cbInstr);
2690 return VINF_EM_HALT;
2691}
2692
2693
2694/**
2695 * Implements 'CPUID'.
2696 */
2697IEM_CIMPL_DEF_0(iemCImpl_cpuid)
2698{
2699 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2700
2701 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
2702 pCtx->rax &= UINT32_C(0xffffffff);
2703 pCtx->rbx &= UINT32_C(0xffffffff);
2704 pCtx->rcx &= UINT32_C(0xffffffff);
2705 pCtx->rdx &= UINT32_C(0xffffffff);
2706
2707 iemRegAddToRip(pIemCpu, cbInstr);
2708 return VINF_SUCCESS;
2709}
2710
2711
2712/*
2713 * Instantiate the various string operation combinations.
2714 */
2715#define OP_SIZE 8
2716#define ADDR_SIZE 16
2717#include "IEMAllCImplStrInstr.cpp.h"
2718#define OP_SIZE 8
2719#define ADDR_SIZE 32
2720#include "IEMAllCImplStrInstr.cpp.h"
2721#define OP_SIZE 8
2722#define ADDR_SIZE 64
2723#include "IEMAllCImplStrInstr.cpp.h"
2724
2725#define OP_SIZE 16
2726#define ADDR_SIZE 16
2727#include "IEMAllCImplStrInstr.cpp.h"
2728#define OP_SIZE 16
2729#define ADDR_SIZE 32
2730#include "IEMAllCImplStrInstr.cpp.h"
2731#define OP_SIZE 16
2732#define ADDR_SIZE 64
2733#include "IEMAllCImplStrInstr.cpp.h"
2734
2735#define OP_SIZE 32
2736#define ADDR_SIZE 16
2737#include "IEMAllCImplStrInstr.cpp.h"
2738#define OP_SIZE 32
2739#define ADDR_SIZE 32
2740#include "IEMAllCImplStrInstr.cpp.h"
2741#define OP_SIZE 32
2742#define ADDR_SIZE 64
2743#include "IEMAllCImplStrInstr.cpp.h"
2744
2745#define OP_SIZE 64
2746#define ADDR_SIZE 32
2747#include "IEMAllCImplStrInstr.cpp.h"
2748#define OP_SIZE 64
2749#define ADDR_SIZE 64
2750#include "IEMAllCImplStrInstr.cpp.h"
2751
2752
2753/**
2754 * Implements 'FINIT' and 'FNINIT'.
2755 *
2756 * @param fCheckXcpts Whether to check for umasked pending exceptions or
2757 * not.
2758 */
2759IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
2760{
2761 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2762
2763 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
2764 return iemRaiseDeviceNotAvailable(pIemCpu);
2765 /** @todo trigger pending exceptions:
2766 if (fCheckXcpts && TODO )
2767 return iemRaiseMathFault(pIemCpu);
2768 */
2769
2770 if (iemFRegIsFxSaveFormat(pIemCpu))
2771 {
2772 pCtx->fpu.FCW = 0x37f;
2773 pCtx->fpu.FSW = 0;
2774 pCtx->fpu.FTW = 0xff;
2775 pCtx->fpu.FPUDP = 0;
2776 pCtx->fpu.DS = 0; //??
2777 pCtx->fpu.FPUIP = 0;
2778 pCtx->fpu.CS = 0; //??
2779 pCtx->fpu.FOP = 0;
2780 }
2781 else
2782 {
2783 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
2784 pFpu->FCW = 0x37f;
2785 pFpu->FSW = 0;
2786 pFpu->FTW = 0xffff;
2787 pFpu->FPUOO = 0; //??
2788 pFpu->FPUOS = 0; //??
2789 pFpu->FPUIP = 0;
2790 pFpu->CS = 0; //??
2791 pFpu->FOP = 0;
2792 }
2793
2794 iemRegAddToRip(pIemCpu, cbInstr);
2795 return VINF_SUCCESS;
2796}
2797
2798
2799/** @} */
2800
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette