VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 36841

Last change on this file since 36841 was 36841, checked in by vboxsync, 14 years ago

IEM: CMOVcc, JMPF Ep.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 73.8 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 36841 2011-04-26 00:09:06Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 /** @todo I/O port permission bitmap check */
42 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
43 }
44 return VINF_SUCCESS;
45}
46
47/** @} */
48
49/** @name C Implementations
50 * @{
51 */
52
53/**
54 * Implements a 16-bit popa.
55 */
56IEM_CIMPL_DEF_0(iemCImpl_popa_16)
57{
58 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
59 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
60 RTGCPTR GCPtrLast = GCPtrStart + 15;
61 VBOXSTRICTRC rcStrict;
62
63 /*
64 * The docs are a bit hard to comprehend here, but it looks like we wrap
65 * around in real mode as long as none of the individual "popa" crosses the
66 * end of the stack segment. In protected mode we check the whole access
67 * in one go. For efficiency, only do the word-by-word thing if we're in
68 * danger of wrapping around.
69 */
70 /** @todo do popa boundary / wrap-around checks. */
71 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
72 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
73 {
74 /* word-by-word */
75 RTUINT64U TmpRsp;
76 TmpRsp.u = pCtx->rsp;
77 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
78 if (rcStrict == VINF_SUCCESS)
79 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
80 if (rcStrict == VINF_SUCCESS)
81 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
82 if (rcStrict == VINF_SUCCESS)
83 {
84 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
85 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
86 }
87 if (rcStrict == VINF_SUCCESS)
88 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
89 if (rcStrict == VINF_SUCCESS)
90 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
91 if (rcStrict == VINF_SUCCESS)
92 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
93 if (rcStrict == VINF_SUCCESS)
94 {
95 pCtx->rsp = TmpRsp.u;
96 iemRegAddToRip(pIemCpu, cbInstr);
97 }
98 }
99 else
100 {
101 uint16_t const *pa16Mem = NULL;
102 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
103 if (rcStrict == VINF_SUCCESS)
104 {
105 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
106 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
107 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
108 /* skip sp */
109 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
110 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
111 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
112 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
113 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
114 if (rcStrict == VINF_SUCCESS)
115 {
116 iemRegAddToRsp(pCtx, 16);
117 iemRegAddToRip(pIemCpu, cbInstr);
118 }
119 }
120 }
121 return rcStrict;
122}
123
124
125/**
126 * Implements a 32-bit popa.
127 */
128IEM_CIMPL_DEF_0(iemCImpl_popa_32)
129{
130 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
131 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
132 RTGCPTR GCPtrLast = GCPtrStart + 31;
133 VBOXSTRICTRC rcStrict;
134
135 /*
136 * The docs are a bit hard to comprehend here, but it looks like we wrap
137 * around in real mode as long as none of the individual "popa" crosses the
138 * end of the stack segment. In protected mode we check the whole access
139 * in one go. For efficiency, only do the word-by-word thing if we're in
140 * danger of wrapping around.
141 */
142 /** @todo do popa boundary / wrap-around checks. */
143 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
144 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
145 {
146 /* word-by-word */
147 RTUINT64U TmpRsp;
148 TmpRsp.u = pCtx->rsp;
149 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
150 if (rcStrict == VINF_SUCCESS)
151 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
152 if (rcStrict == VINF_SUCCESS)
153 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
154 if (rcStrict == VINF_SUCCESS)
155 {
156 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
157 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
158 }
159 if (rcStrict == VINF_SUCCESS)
160 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
161 if (rcStrict == VINF_SUCCESS)
162 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
163 if (rcStrict == VINF_SUCCESS)
164 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
165 if (rcStrict == VINF_SUCCESS)
166 {
167#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
168 pCtx->rdi &= UINT32_MAX;
169 pCtx->rsi &= UINT32_MAX;
170 pCtx->rbp &= UINT32_MAX;
171 pCtx->rbx &= UINT32_MAX;
172 pCtx->rdx &= UINT32_MAX;
173 pCtx->rcx &= UINT32_MAX;
174 pCtx->rax &= UINT32_MAX;
175#endif
176 pCtx->rsp = TmpRsp.u;
177 iemRegAddToRip(pIemCpu, cbInstr);
178 }
179 }
180 else
181 {
182 uint32_t const *pa32Mem;
183 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
184 if (rcStrict == VINF_SUCCESS)
185 {
186 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
187 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
188 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
189 /* skip esp */
190 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
191 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
192 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
193 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
194 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
195 if (rcStrict == VINF_SUCCESS)
196 {
197 iemRegAddToRsp(pCtx, 32);
198 iemRegAddToRip(pIemCpu, cbInstr);
199 }
200 }
201 }
202 return rcStrict;
203}
204
205
206/**
207 * Implements a 16-bit pusha.
208 */
209IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
210{
211 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
212 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
213 RTGCPTR GCPtrBottom = GCPtrTop - 15;
214 VBOXSTRICTRC rcStrict;
215
216 /*
217 * The docs are a bit hard to comprehend here, but it looks like we wrap
218 * around in real mode as long as none of the individual "pushd" crosses the
219 * end of the stack segment. In protected mode we check the whole access
220 * in one go. For efficiency, only do the word-by-word thing if we're in
221 * danger of wrapping around.
222 */
223 /** @todo do pusha boundary / wrap-around checks. */
224 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
225 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
226 {
227 /* word-by-word */
228 RTUINT64U TmpRsp;
229 TmpRsp.u = pCtx->rsp;
230 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
231 if (rcStrict == VINF_SUCCESS)
232 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
233 if (rcStrict == VINF_SUCCESS)
234 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
235 if (rcStrict == VINF_SUCCESS)
236 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
237 if (rcStrict == VINF_SUCCESS)
238 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
239 if (rcStrict == VINF_SUCCESS)
240 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
241 if (rcStrict == VINF_SUCCESS)
242 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
243 if (rcStrict == VINF_SUCCESS)
244 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
245 if (rcStrict == VINF_SUCCESS)
246 {
247 pCtx->rsp = TmpRsp.u;
248 iemRegAddToRip(pIemCpu, cbInstr);
249 }
250 }
251 else
252 {
253 GCPtrBottom--;
254 uint16_t *pa16Mem = NULL;
255 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
256 if (rcStrict == VINF_SUCCESS)
257 {
258 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
259 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
260 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
261 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
262 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
263 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
264 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
265 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
266 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
267 if (rcStrict == VINF_SUCCESS)
268 {
269 iemRegSubFromRsp(pCtx, 16);
270 iemRegAddToRip(pIemCpu, cbInstr);
271 }
272 }
273 }
274 return rcStrict;
275}
276
277
278/**
279 * Implements a 32-bit pusha.
280 */
281IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
282{
283 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
284 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
285 RTGCPTR GCPtrBottom = GCPtrTop - 31;
286 VBOXSTRICTRC rcStrict;
287
288 /*
289 * The docs are a bit hard to comprehend here, but it looks like we wrap
290 * around in real mode as long as none of the individual "pusha" crosses the
291 * end of the stack segment. In protected mode we check the whole access
292 * in one go. For efficiency, only do the word-by-word thing if we're in
293 * danger of wrapping around.
294 */
295 /** @todo do pusha boundary / wrap-around checks. */
296 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
297 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
298 {
299 /* word-by-word */
300 RTUINT64U TmpRsp;
301 TmpRsp.u = pCtx->rsp;
302 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
303 if (rcStrict == VINF_SUCCESS)
304 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
305 if (rcStrict == VINF_SUCCESS)
306 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
307 if (rcStrict == VINF_SUCCESS)
308 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
309 if (rcStrict == VINF_SUCCESS)
310 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
311 if (rcStrict == VINF_SUCCESS)
312 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
313 if (rcStrict == VINF_SUCCESS)
314 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
315 if (rcStrict == VINF_SUCCESS)
316 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
317 if (rcStrict == VINF_SUCCESS)
318 {
319 pCtx->rsp = TmpRsp.u;
320 iemRegAddToRip(pIemCpu, cbInstr);
321 }
322 }
323 else
324 {
325 GCPtrBottom--;
326 uint32_t *pa32Mem;
327 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
328 if (rcStrict == VINF_SUCCESS)
329 {
330 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
331 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
332 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
333 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
334 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
335 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
336 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
337 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
338 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
339 if (rcStrict == VINF_SUCCESS)
340 {
341 iemRegSubFromRsp(pCtx, 32);
342 iemRegAddToRip(pIemCpu, cbInstr);
343 }
344 }
345 }
346 return rcStrict;
347}
348
349
350/**
351 * Implements pushf.
352 *
353 *
354 * @param enmEffOpSize The effective operand size.
355 */
356IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
357{
358 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
359
360 /*
361 * If we're in V8086 mode some care is required (which is why we're in
362 * doing this in a C implementation).
363 */
364 uint32_t fEfl = pCtx->eflags.u;
365 if ( (fEfl & X86_EFL_VM)
366 && X86_EFL_GET_IOPL(fEfl) != 3 )
367 {
368 Assert(pCtx->cr0 & X86_CR0_PE);
369 if ( enmEffOpSize != IEMMODE_16BIT
370 || !(pCtx->cr4 & X86_CR4_VME))
371 return iemRaiseGeneralProtectionFault0(pIemCpu);
372 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
373 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
374 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
375 }
376
377 /*
378 * Ok, clear RF and VM and push the flags.
379 */
380 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
381
382 VBOXSTRICTRC rcStrict;
383 switch (enmEffOpSize)
384 {
385 case IEMMODE_16BIT:
386 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
387 break;
388 case IEMMODE_32BIT:
389 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
390 break;
391 case IEMMODE_64BIT:
392 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
393 break;
394 IEM_NOT_REACHED_DEFAULT_CASE_RET();
395 }
396 if (rcStrict != VINF_SUCCESS)
397 return rcStrict;
398
399 iemRegAddToRip(pIemCpu, cbInstr);
400 return VINF_SUCCESS;
401}
402
403
404/**
405 * Implements popf.
406 *
407 * @param enmEffOpSize The effective operand size.
408 */
409IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
410{
411 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
412 uint32_t const fEflOld = pCtx->eflags.u;
413 VBOXSTRICTRC rcStrict;
414 uint32_t fEflNew;
415
416 /*
417 * V8086 is special as usual.
418 */
419 if (fEflOld & X86_EFL_VM)
420 {
421 /*
422 * Almost anything goes if IOPL is 3.
423 */
424 if (X86_EFL_GET_IOPL(fEflOld) == 3)
425 {
426 switch (enmEffOpSize)
427 {
428 case IEMMODE_16BIT:
429 {
430 uint16_t u16Value;
431 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
432 if (rcStrict != VINF_SUCCESS)
433 return rcStrict;
434 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
435 break;
436 }
437 case IEMMODE_32BIT:
438 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
439 if (rcStrict != VINF_SUCCESS)
440 return rcStrict;
441 break;
442 IEM_NOT_REACHED_DEFAULT_CASE_RET();
443 }
444
445 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
446 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
447 }
448 /*
449 * Interrupt flag virtualization with CR4.VME=1.
450 */
451 else if ( enmEffOpSize == IEMMODE_16BIT
452 && (pCtx->cr4 & X86_CR4_VME) )
453 {
454 uint16_t u16Value;
455 RTUINT64U TmpRsp;
456 TmpRsp.u = pCtx->rsp;
457 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
458 if (rcStrict != VINF_SUCCESS)
459 return rcStrict;
460
461 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
462 * or before? */
463 if ( ( (u16Value & X86_EFL_IF)
464 && (fEflOld & X86_EFL_VIP))
465 || (u16Value & X86_EFL_TF) )
466 return iemRaiseGeneralProtectionFault0(pIemCpu);
467
468 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
469 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
470 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
471 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
472
473 pCtx->rsp = TmpRsp.u;
474 }
475 else
476 return iemRaiseGeneralProtectionFault0(pIemCpu);
477
478 }
479 /*
480 * Not in V8086 mode.
481 */
482 else
483 {
484 /* Pop the flags. */
485 switch (enmEffOpSize)
486 {
487 case IEMMODE_16BIT:
488 {
489 uint16_t u16Value;
490 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
491 if (rcStrict != VINF_SUCCESS)
492 return rcStrict;
493 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
494 break;
495 }
496 case IEMMODE_32BIT:
497 case IEMMODE_64BIT:
498 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
499 if (rcStrict != VINF_SUCCESS)
500 return rcStrict;
501 break;
502 IEM_NOT_REACHED_DEFAULT_CASE_RET();
503 }
504
505 /* Merge them with the current flags. */
506 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
507 || pIemCpu->uCpl == 0)
508 {
509 fEflNew &= X86_EFL_POPF_BITS;
510 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
511 }
512 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
513 {
514 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
515 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
516 }
517 else
518 {
519 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
520 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
521 }
522 }
523
524 /*
525 * Commit the flags.
526 */
527 Assert(fEflNew & RT_BIT_32(1));
528 pCtx->eflags.u = fEflNew;
529 iemRegAddToRip(pIemCpu, cbInstr);
530
531 return VINF_SUCCESS;
532}
533
534
535/**
536 * Implements an indirect call.
537 *
538 * @param uNewPC The new program counter (RIP) value (loaded from the
539 * operand).
540 * @param enmEffOpSize The effective operand size.
541 */
542IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
543{
544 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
545 uint16_t uOldPC = pCtx->ip + cbInstr;
546 if (uNewPC > pCtx->csHid.u32Limit)
547 return iemRaiseGeneralProtectionFault0(pIemCpu);
548
549 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
550 if (rcStrict != VINF_SUCCESS)
551 return rcStrict;
552
553 pCtx->rip = uNewPC;
554 return VINF_SUCCESS;
555
556}
557
558
559/**
560 * Implements a 16-bit relative call.
561 *
562 * @param offDisp The displacment offset.
563 */
564IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
565{
566 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
567 uint16_t uOldPC = pCtx->ip + cbInstr;
568 uint16_t uNewPC = uOldPC + offDisp;
569 if (uNewPC > pCtx->csHid.u32Limit)
570 return iemRaiseGeneralProtectionFault0(pIemCpu);
571
572 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
573 if (rcStrict != VINF_SUCCESS)
574 return rcStrict;
575
576 pCtx->rip = uNewPC;
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Implements a 32-bit indirect call.
583 *
584 * @param uNewPC The new program counter (RIP) value (loaded from the
585 * operand).
586 * @param enmEffOpSize The effective operand size.
587 */
588IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
589{
590 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
591 uint32_t uOldPC = pCtx->eip + cbInstr;
592 if (uNewPC > pCtx->csHid.u32Limit)
593 return iemRaiseGeneralProtectionFault0(pIemCpu);
594
595 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
596 if (rcStrict != VINF_SUCCESS)
597 return rcStrict;
598
599 pCtx->rip = uNewPC;
600 return VINF_SUCCESS;
601
602}
603
604
605/**
606 * Implements a 32-bit relative call.
607 *
608 * @param offDisp The displacment offset.
609 */
610IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
611{
612 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
613 uint32_t uOldPC = pCtx->eip + cbInstr;
614 uint32_t uNewPC = uOldPC + offDisp;
615 if (uNewPC > pCtx->csHid.u32Limit)
616 return iemRaiseGeneralProtectionFault0(pIemCpu);
617
618 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
619 if (rcStrict != VINF_SUCCESS)
620 return rcStrict;
621
622 pCtx->rip = uNewPC;
623 return VINF_SUCCESS;
624}
625
626
627/**
628 * Implements a 64-bit indirect call.
629 *
630 * @param uNewPC The new program counter (RIP) value (loaded from the
631 * operand).
632 * @param enmEffOpSize The effective operand size.
633 */
634IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
635{
636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
637 uint64_t uOldPC = pCtx->rip + cbInstr;
638 if (!IEM_IS_CANONICAL(uNewPC))
639 return iemRaiseGeneralProtectionFault0(pIemCpu);
640
641 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
642 if (rcStrict != VINF_SUCCESS)
643 return rcStrict;
644
645 pCtx->rip = uNewPC;
646 return VINF_SUCCESS;
647
648}
649
650
651/**
652 * Implements a 64-bit relative call.
653 *
654 * @param offDisp The displacment offset.
655 */
656IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
657{
658 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
659 uint64_t uOldPC = pCtx->rip + cbInstr;
660 uint64_t uNewPC = uOldPC + offDisp;
661 if (!IEM_IS_CANONICAL(uNewPC))
662 return iemRaiseNotCanonical(pIemCpu);
663
664 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
665 if (rcStrict != VINF_SUCCESS)
666 return rcStrict;
667
668 pCtx->rip = uNewPC;
669 return VINF_SUCCESS;
670}
671
672
673/**
674 * Implements far jumps.
675 *
676 * @param uSel The selector.
677 * @param offSeg The segment offset.
678 * @param enmEffOpSize The effective operand size.
679 */
680IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint32_t, offSeg, IEMMODE, enmEffOpSize)
681{
682 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
683
684 /*
685 * Real mode and V8086 mode are easy. The only snag seems to be that
686 * CS.limit doesn't change and the limit check is done against the current
687 * limit.
688 */
689 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
690 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
691 {
692 if (offSeg > pCtx->csHid.u32Limit)
693 return iemRaiseGeneralProtectionFault0(pIemCpu);
694
695 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
696 pCtx->rip = offSeg;
697 else
698 pCtx->rip = offSeg & UINT16_MAX;
699 pCtx->cs = uSel;
700 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
701 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
702 * PE. Check with VT-x and AMD-V. */
703#ifdef IEM_VERIFICATION_MODE
704 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
705#endif
706 return VINF_SUCCESS;
707 }
708
709 /*
710 * Protected mode. Need to parse the specified descriptor...
711 */
712 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
713 {
714 Log(("jmpf %04x:%08x -> invalid selector, #GP(0)\n", uSel, offSeg));
715 return iemRaiseGeneralProtectionFault0(pIemCpu);
716 }
717
718 /* Fetch the descriptor. */
719 IEMSELDESC Desc;
720 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
721 if (rcStrict != VINF_SUCCESS)
722 return rcStrict;
723
724 /* Is it there? */
725 if (!Desc.Legacy.Gen.u1Present)
726 {
727 Log(("jmpf %04x:%08x -> segment not present\n", uSel, offSeg));
728 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
729 }
730
731 /*
732 * Deal with it according to its type.
733 */
734 if (Desc.Legacy.Gen.u1DescType)
735 {
736 /* Only code segments. */
737 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
738 {
739 Log(("jmpf %04x:%08x -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
740 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
741 }
742
743 /* L vs D. */
744 if ( Desc.Legacy.Gen.u1Long
745 && Desc.Legacy.Gen.u1DefBig
746 && IEM_IS_LONG_MODE(pIemCpu))
747 {
748 Log(("jmpf %04x:%08x -> both L and D are set.\n", uSel, offSeg));
749 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
750 }
751
752 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
753 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
754 {
755 if (Desc.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
756 {
757 Log(("jmpf %04x:%08x -> DPL violation (conforming); DPL=%d CPL=%u\n",
758 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
759 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
760 }
761 }
762 else
763 {
764 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
765 {
766 Log(("jmpf %04x:%08x -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
767 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
768 }
769 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
770 {
771 Log(("jmpf %04x:%08x -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
772 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
773 }
774 }
775
776 /* Limit check. (Should alternatively check for non-canonical addresses
777 here, but that is ruled out by offSeg being 32-bit, right?) */
778 uint64_t u64Base;
779 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
780 if (Desc.Legacy.Gen.u1Granularity)
781 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
782 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
783 u64Base = 0;
784 else
785 {
786 if (offSeg > cbLimit)
787 {
788 Log(("jmpf %04x:%08x -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
789 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
790 }
791 u64Base = X86DESC_BASE(Desc.Legacy);
792 }
793
794 /*
795 * Ok, everything checked out fine. Now set the accessed bit before
796 * committing the result into CS, CSHID and RIP.
797 */
798 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
799 {
800 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
801 if (rcStrict != VINF_SUCCESS)
802 return rcStrict;
803#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
804 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
805#endif
806 }
807
808 /* commit */
809 pCtx->rip = offSeg;
810 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
811 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
812 pCtx->csHid.Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff);
813 pCtx->csHid.u32Limit = cbLimit;
814 pCtx->csHid.u64Base = u64Base;
815 /** @todo check if the hidden bits are loaded correctly for 64-bit
816 * mode. */
817 return VINF_SUCCESS;
818 }
819
820 /*
821 * System selector.
822 */
823 if (IEM_IS_LONG_MODE(pIemCpu))
824 switch (Desc.Legacy.Gen.u4Type)
825 {
826 case AMD64_SEL_TYPE_SYS_LDT:
827 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
828 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
829 case AMD64_SEL_TYPE_SYS_CALL_GATE:
830 case AMD64_SEL_TYPE_SYS_INT_GATE:
831 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
832 /* Call various functions to do the work. */
833 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
834 default:
835 Log(("jmpf %04x:%08x -> wrong sys selector (64-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
836 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
837
838 }
839 switch (Desc.Legacy.Gen.u4Type)
840 {
841 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
842 case X86_SEL_TYPE_SYS_LDT:
843 case X86_SEL_TYPE_SYS_286_CALL_GATE:
844 case X86_SEL_TYPE_SYS_TASK_GATE:
845 case X86_SEL_TYPE_SYS_286_INT_GATE:
846 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
847 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
848 case X86_SEL_TYPE_SYS_386_CALL_GATE:
849 case X86_SEL_TYPE_SYS_386_INT_GATE:
850 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
851 /* Call various functions to do the work. */
852 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
853
854 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
855 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
856 /* Call various functions to do the work. */
857 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
858
859 default:
860 Log(("jmpf %04x:%08x -> wrong sys selector (32-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
861 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
862 }
863}
864
865
866/**
867 * Implements far calls.
868 *
869 * @param uSel The selector.
870 * @param offSeg The segment offset.
871 * @param enmOpSize The operand size (in case we need it).
872 */
873IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize)
874{
875 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
876 VBOXSTRICTRC rcStrict;
877 uint64_t uNewRsp;
878 void *pvRet;
879
880 /*
881 * Real mode and V8086 mode are easy. The only snag seems to be that
882 * CS.limit doesn't change and the limit check is done against the current
883 * limit.
884 */
885 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
886 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
887 {
888 Assert(enmOpSize == IEMMODE_16BIT || enmOpSize == IEMMODE_32BIT);
889
890 /* Check stack first - may #SS(0). */
891 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmOpSize == IEMMODE_32BIT ? 6 : 4,
892 &pvRet, &uNewRsp);
893 if (rcStrict != VINF_SUCCESS)
894 return rcStrict;
895
896 /* Check the target address range. */
897 if (offSeg > UINT32_MAX)
898 return iemRaiseGeneralProtectionFault0(pIemCpu);
899
900 /* Everything is fine, push the return address. */
901 if (enmOpSize == IEMMODE_16BIT)
902 {
903 ((uint16_t *)pvRet)[0] = pCtx->ip + cbInstr;
904 ((uint16_t *)pvRet)[1] = pCtx->cs;
905 }
906 else
907 {
908 ((uint32_t *)pvRet)[0] = pCtx->eip + cbInstr;
909 ((uint16_t *)pvRet)[3] = pCtx->cs;
910 }
911 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pvRet, uNewRsp);
912 if (rcStrict != VINF_SUCCESS)
913 return rcStrict;
914
915 /* Branch. */
916 pCtx->rip = offSeg;
917 pCtx->cs = uSel;
918 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
919 /** @todo Does REM reset the accessed bit here to? (See on jmp far16
920 * after disabling PE.) Check with VT-x and AMD-V. */
921#ifdef IEM_VERIFICATION_MODE
922 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
923#endif
924 return VINF_SUCCESS;
925 }
926
927 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
928}
929
930
931/**
932 * Implements retf.
933 *
934 * @param enmEffOpSize The effective operand size.
935 * @param cbPop The amount of arguments to pop from the stack
936 * (bytes).
937 */
938IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
939{
940 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
941 VBOXSTRICTRC rcStrict;
942 uint64_t uNewRsp;
943
944 /*
945 * Real mode and V8086 mode are easy.
946 */
947 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
948 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
949 {
950 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
951 uint16_t const *pu16Frame;
952 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 8 : 4,
953 (void const **)&pu16Frame, &uNewRsp);
954 if (rcStrict != VINF_SUCCESS)
955 return rcStrict;
956 uint32_t uNewEip;
957 uint16_t uNewCs;
958 if (enmEffOpSize == IEMMODE_32BIT)
959 {
960 uNewCs = pu16Frame[2];
961 uNewEip = RT_MAKE_U32(pu16Frame[0], pu16Frame[1]);
962 }
963 else
964 {
965 uNewCs = pu16Frame[1];
966 uNewEip = pu16Frame[0];
967 }
968 /** @todo check how this is supposed to work if sp=0xfffe. */
969
970 /* Check the limit of the new EIP. */
971 /** @todo Intel pseudo code only does the limit check for 16-bit
972 * operands, AMD does not make any distinction. What is right? */
973 if (uNewEip > pCtx->csHid.u32Limit)
974 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
975
976 /* commit the operation. */
977 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
978 if (rcStrict != VINF_SUCCESS)
979 return rcStrict;
980 pCtx->rip = uNewEip;
981 pCtx->cs = uNewCs;
982 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
983 /** @todo do we load attribs and limit as well? */
984 if (cbPop)
985 iemRegAddToRsp(pCtx, cbPop);
986 return VINF_SUCCESS;
987 }
988
989 AssertFailed();
990 return VERR_NOT_IMPLEMENTED;
991}
992
993
994/**
995 * Implements retn.
996 *
997 * We're doing this in C because of the \#GP that might be raised if the popped
998 * program counter is out of bounds.
999 *
1000 * @param enmEffOpSize The effective operand size.
1001 * @param cbPop The amount of arguments to pop from the stack
1002 * (bytes).
1003 */
1004IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1005{
1006 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1007
1008 /* Fetch the RSP from the stack. */
1009 VBOXSTRICTRC rcStrict;
1010 RTUINT64U NewRip;
1011 RTUINT64U NewRsp;
1012 NewRsp.u = pCtx->rsp;
1013 switch (enmEffOpSize)
1014 {
1015 case IEMMODE_16BIT:
1016 NewRip.u = 0;
1017 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1018 break;
1019 case IEMMODE_32BIT:
1020 NewRip.u = 0;
1021 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1022 break;
1023 case IEMMODE_64BIT:
1024 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1025 break;
1026 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1027 }
1028 if (rcStrict != VINF_SUCCESS)
1029 return rcStrict;
1030
1031 /* Check the new RSP before loading it. */
1032 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1033 * of it. The canonical test is performed here and for call. */
1034 if (enmEffOpSize != IEMMODE_64BIT)
1035 {
1036 if (NewRip.DWords.dw0 > pCtx->csHid.u32Limit)
1037 {
1038 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->csHid.u32Limit));
1039 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1040 }
1041 }
1042 else
1043 {
1044 if (!IEM_IS_CANONICAL(NewRip.u))
1045 {
1046 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1047 return iemRaiseNotCanonical(pIemCpu);
1048 }
1049 }
1050
1051 /* Commit it. */
1052 pCtx->rip = NewRip.u;
1053 pCtx->rsp = NewRsp.u;
1054 if (cbPop)
1055 iemRegAddToRsp(pCtx, cbPop);
1056
1057 return VINF_SUCCESS;
1058}
1059
1060
1061/**
1062 * Implements int3 and int XX.
1063 *
1064 * @param u8Int The interrupt vector number.
1065 * @param fIsBpInstr Is it the breakpoint instruction.
1066 */
1067IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1068{
1069 /** @todo we should call TRPM to do this job. */
1070 VBOXSTRICTRC rcStrict;
1071 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1072
1073 /*
1074 * Real mode is easy.
1075 */
1076 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1077 && IEM_IS_REAL_MODE(pIemCpu))
1078 {
1079 /* read the IDT entry. */
1080 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Int + 3)
1081 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Int << X86_TRAP_ERR_SEL_SHIFT));
1082 RTFAR16 Idte;
1083 rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Int);
1084 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1085 return rcStrict;
1086
1087 /* push the stack frame. */
1088 uint16_t *pu16Frame;
1089 uint64_t uNewRsp;
1090 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1091 if (rcStrict != VINF_SUCCESS)
1092 return rcStrict;
1093
1094 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
1095 pu16Frame[1] = (uint16_t)pCtx->cs;
1096 pu16Frame[0] = pCtx->ip + cbInstr;
1097 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1098 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1099 return rcStrict;
1100
1101 /* load the vector address into cs:ip. */
1102 pCtx->cs = Idte.sel;
1103 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;
1104 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1105 pCtx->rip = Idte.off;
1106 pCtx->eflags.Bits.u1IF = 0;
1107 return VINF_SUCCESS;
1108 }
1109
1110 AssertFailed();
1111 return VERR_NOT_IMPLEMENTED;
1112}
1113
1114
1115/**
1116 * Implements iret.
1117 *
1118 * @param enmEffOpSize The effective operand size.
1119 */
1120IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
1121{
1122 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1123 VBOXSTRICTRC rcStrict;
1124 uint64_t uNewRsp;
1125
1126 /*
1127 * Real mode is easy, V8086 mode is relative similar.
1128 */
1129 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1130 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1131 {
1132 /* iret throws an exception if VME isn't enabled. */
1133 if ( pCtx->eflags.Bits.u1VM
1134 && !(pCtx->cr4 & X86_CR4_VME))
1135 return iemRaiseGeneralProtectionFault0(pIemCpu);
1136
1137 /* Do the stack bits, but don't commit RSP before everything checks
1138 out right. */
1139 union
1140 {
1141 uint32_t const *pu32;
1142 uint16_t const *pu16;
1143 void const *pv;
1144 } uFrame;
1145 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1146 uint16_t uNewCs;
1147 uint32_t uNewEip;
1148 uint32_t uNewFlags;
1149 if (enmEffOpSize == IEMMODE_32BIT)
1150 {
1151 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1152 if (rcStrict != VINF_SUCCESS)
1153 return rcStrict;
1154 uNewEip = uFrame.pu32[0];
1155 uNewCs = (uint16_t)uFrame.pu32[1];
1156 uNewFlags = uFrame.pu32[2];
1157 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1158 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1159 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1160 | X86_EFL_ID;
1161 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1162 }
1163 else
1164 {
1165 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1166 if (rcStrict != VINF_SUCCESS)
1167 return rcStrict;
1168 uNewEip = uFrame.pu16[0];
1169 uNewCs = uFrame.pu16[1];
1170 uNewFlags = uFrame.pu16[2];
1171 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1172 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1173 uNewFlags |= pCtx->eflags.u & (UINT16_C(0xffff0000) | X86_EFL_1);
1174 /** @todo The intel pseudo code does not indicate what happens to
1175 * reserved flags. We just ignore them. */
1176 }
1177 /** @todo Check how this is supposed to work if sp=0xfffe. */
1178
1179 /* Check the limit of the new EIP. */
1180 /** @todo Only the AMD pseudo code check the limit here, what's
1181 * right? */
1182 if (uNewEip > pCtx->csHid.u32Limit)
1183 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1184
1185 /* V8086 checks and flag adjustments */
1186 if (pCtx->eflags.Bits.u1VM)
1187 {
1188 if (pCtx->eflags.Bits.u2IOPL == 3)
1189 {
1190 /* Preserve IOPL and clear RF. */
1191 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1192 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1193 }
1194 else if ( enmEffOpSize == IEMMODE_16BIT
1195 && ( !(uNewFlags & X86_EFL_IF)
1196 || !pCtx->eflags.Bits.u1VIP )
1197 && !(uNewFlags & X86_EFL_TF) )
1198 {
1199 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1200 uNewFlags &= ~X86_EFL_VIF;
1201 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1202 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1203 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1204 }
1205 else
1206 return iemRaiseGeneralProtectionFault0(pIemCpu);
1207 }
1208
1209 /* commit the operation. */
1210 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1211 if (rcStrict != VINF_SUCCESS)
1212 return rcStrict;
1213 pCtx->rip = uNewEip;
1214 pCtx->cs = uNewCs;
1215 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
1216 /** @todo do we load attribs and limit as well? */
1217 Assert(uNewFlags & X86_EFL_1);
1218 pCtx->eflags.u = uNewFlags;
1219
1220 return VINF_SUCCESS;
1221 }
1222
1223
1224 AssertFailed();
1225 return VERR_NOT_IMPLEMENTED;
1226}
1227
1228
1229/**
1230 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
1231 *
1232 * @param iSegReg The segment register number (valid).
1233 * @param uSel The new selector value.
1234 */
1235IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
1236{
1237 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1238 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
1239 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
1240
1241 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
1242
1243 /*
1244 * Real mode and V8086 mode are easy.
1245 */
1246 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1247 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1248 {
1249 *pSel = uSel;
1250 pHid->u64Base = (uint32_t)uSel << 4;
1251 /** @todo Does the CPU actually load limits and attributes in the
1252 * real/V8086 mode segment load case? It doesn't for CS in far
1253 * jumps... Affects unreal mode. */
1254 pHid->u32Limit = 0xffff;
1255 pHid->Attr.u = 0;
1256 pHid->Attr.n.u1Present = 1;
1257 pHid->Attr.n.u1DescType = 1;
1258 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
1259 ? X86_SEL_TYPE_RW
1260 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
1261
1262 iemRegAddToRip(pIemCpu, cbInstr);
1263 return VINF_SUCCESS;
1264 }
1265
1266 /*
1267 * Protected mode.
1268 *
1269 * Check if it's a null segment selector value first, that's OK for DS, ES,
1270 * FS and GS. If not null, then we have to load and parse the descriptor.
1271 */
1272 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
1273 {
1274 if (iSegReg == X86_SREG_SS)
1275 {
1276 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
1277 || pIemCpu->uCpl != 0
1278 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
1279 {
1280 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
1281 return iemRaiseGeneralProtectionFault0(pIemCpu);
1282 }
1283
1284 /* In 64-bit kernel mode, the stack can be 0 because of the way
1285 interrupts are dispatched when in kernel ctx. Just load the
1286 selector value into the register and leave the hidden bits
1287 as is. */
1288 *pSel = uSel;
1289 iemRegAddToRip(pIemCpu, cbInstr);
1290 return VINF_SUCCESS;
1291 }
1292
1293 *pSel = uSel; /* Not RPL, remember :-) */
1294 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
1295 && iSegReg != X86_SREG_FS
1296 && iSegReg != X86_SREG_GS)
1297 {
1298 /** @todo figure out what this actually does, it works. Needs
1299 * testcase! */
1300 pHid->Attr.u = 0;
1301 pHid->Attr.n.u1Present = 1;
1302 pHid->Attr.n.u1Long = 1;
1303 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
1304 pHid->Attr.n.u2Dpl = 3;
1305 pHid->u32Limit = 0;
1306 pHid->u64Base = 0;
1307 }
1308 else
1309 {
1310 pHid->Attr.u = 0;
1311 pHid->u32Limit = 0;
1312 pHid->u64Base = 0;
1313 }
1314 iemRegAddToRip(pIemCpu, cbInstr);
1315 return VINF_SUCCESS;
1316 }
1317
1318 /* Fetch the descriptor. */
1319 IEMSELDESC Desc;
1320 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1321 if (rcStrict != VINF_SUCCESS)
1322 return rcStrict;
1323
1324 /* Check GPs first. */
1325 if (!Desc.Legacy.Gen.u1DescType)
1326 {
1327 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
1328 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1329 }
1330 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
1331 {
1332 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1333 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1334 {
1335 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
1336 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1337 }
1338 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1339 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1340 {
1341 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
1342 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1343 }
1344 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
1345 {
1346 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
1347 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1348 }
1349 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
1350 {
1351 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1352 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1353 }
1354 }
1355 else
1356 {
1357 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
1358 {
1359 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
1360 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1361 }
1362 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1363 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1364 {
1365#if 0 /* this is what intel says. */
1366 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
1367 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
1368 {
1369 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
1370 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
1371 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1372 }
1373#else /* this is what makes more sense. */
1374 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
1375 {
1376 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
1377 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
1378 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1379 }
1380 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
1381 {
1382 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
1383 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
1384 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1385 }
1386#endif
1387 }
1388 }
1389
1390 /* Is it there? */
1391 if (!Desc.Legacy.Gen.u1Present)
1392 {
1393 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
1394 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1395 }
1396
1397 /* The the base and limit. */
1398 uint64_t u64Base;
1399 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
1400 if (Desc.Legacy.Gen.u1Granularity)
1401 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1402
1403 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
1404 && iSegReg < X86_SREG_FS)
1405 u64Base = 0;
1406 else
1407 u64Base = X86DESC_BASE(Desc.Legacy);
1408
1409 /*
1410 * Ok, everything checked out fine. Now set the accessed bit before
1411 * committing the result into the registers.
1412 */
1413 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1414 {
1415 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1416 if (rcStrict != VINF_SUCCESS)
1417 return rcStrict;
1418 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1419 }
1420
1421 /* commit */
1422 *pSel = uSel;
1423 pHid->Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff); /** @todo do we have a define for 0xf0ff? */
1424 pHid->u32Limit = cbLimit;
1425 pHid->u64Base = u64Base;
1426
1427 /** @todo check if the hidden bits are loaded correctly for 64-bit
1428 * mode. */
1429
1430 iemRegAddToRip(pIemCpu, cbInstr);
1431 return VINF_SUCCESS;
1432}
1433
1434
1435/**
1436 * Implements 'mov SReg, r/m'.
1437 *
1438 * @param iSegReg The segment register number (valid).
1439 * @param uSel The new selector value.
1440 */
1441IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
1442{
1443 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1444 if (rcStrict == VINF_SUCCESS)
1445 {
1446 if (iSegReg == X86_SREG_SS)
1447 {
1448 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1449 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
1450 }
1451 }
1452 return rcStrict;
1453}
1454
1455
1456/**
1457 * Implements 'pop SReg'.
1458 *
1459 * @param iSegReg The segment register number (valid).
1460 * @param enmEffOpSize The efficient operand size (valid).
1461 */
1462IEM_CIMPL_DEF_2(iemOpCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
1463{
1464 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1465 VBOXSTRICTRC rcStrict;
1466
1467 /*
1468 * Read the selector off the stack and join paths with mov ss, reg.
1469 */
1470 RTUINT64U TmpRsp;
1471 TmpRsp.u = pCtx->rsp;
1472 switch (enmEffOpSize)
1473 {
1474 case IEMMODE_16BIT:
1475 {
1476 uint16_t uSel;
1477 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
1478 if (rcStrict == VINF_SUCCESS)
1479 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1480 break;
1481 }
1482
1483 case IEMMODE_32BIT:
1484 {
1485 uint32_t u32Value;
1486 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
1487 if (rcStrict == VINF_SUCCESS)
1488 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
1489 break;
1490 }
1491
1492 case IEMMODE_64BIT:
1493 {
1494 uint64_t u64Value;
1495 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
1496 if (rcStrict == VINF_SUCCESS)
1497 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
1498 break;
1499 }
1500 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1501 }
1502
1503 /*
1504 * Commit the stack on success.
1505 */
1506 if (rcStrict == VINF_SUCCESS)
1507 {
1508 pCtx->rsp = TmpRsp.u;
1509 if (iSegReg == X86_SREG_SS)
1510 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
1511 }
1512 return rcStrict;
1513}
1514
1515
1516/**
1517 * Implements lgs, lfs, les, lds & lss.
1518 */
1519IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
1520 uint16_t, uSel,
1521 uint64_t, offSeg,
1522 uint8_t, iSegReg,
1523 uint8_t, iGReg,
1524 IEMMODE, enmEffOpSize)
1525{
1526 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1527 VBOXSTRICTRC rcStrict;
1528
1529 /*
1530 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
1531 */
1532 /** @todo verify and test that mov, pop and lXs works the segment
1533 * register loading in the exact same way. */
1534 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1535 if (rcStrict == VINF_SUCCESS)
1536 {
1537 switch (enmEffOpSize)
1538 {
1539 case IEMMODE_16BIT:
1540 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1541 break;
1542 case IEMMODE_32BIT:
1543 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1544 break;
1545 case IEMMODE_64BIT:
1546 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1547 break;
1548 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1549 }
1550 }
1551
1552 return rcStrict;
1553}
1554
1555
1556/**
1557 * Implements lgdt.
1558 *
1559 * @param iEffSeg The segment of the new ldtr contents
1560 * @param GCPtrEffSrc The address of the new ldtr contents.
1561 * @param enmEffOpSize The effective operand size.
1562 */
1563IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
1564{
1565 if (pIemCpu->uCpl != 0)
1566 return iemRaiseGeneralProtectionFault0(pIemCpu);
1567 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
1568
1569 /*
1570 * Fetch the limit and base address.
1571 */
1572 uint16_t cbLimit;
1573 RTGCPTR GCPtrBase;
1574 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
1575 if (rcStrict == VINF_SUCCESS)
1576 {
1577 if (IEM_VERIFICATION_ENABLED(pIemCpu))
1578 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
1579 else
1580 {
1581 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1582 pCtx->gdtr.cbGdt = cbLimit;
1583 pCtx->gdtr.pGdt = GCPtrBase;
1584 }
1585 if (rcStrict == VINF_SUCCESS)
1586 iemRegAddToRip(pIemCpu, cbInstr);
1587 }
1588 return rcStrict;
1589}
1590
1591
1592/**
1593 * Implements lidt.
1594 *
1595 * @param iEffSeg The segment of the new ldtr contents
1596 * @param GCPtrEffSrc The address of the new ldtr contents.
1597 * @param enmEffOpSize The effective operand size.
1598 */
1599IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
1600{
1601 if (pIemCpu->uCpl != 0)
1602 return iemRaiseGeneralProtectionFault0(pIemCpu);
1603 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
1604
1605 /*
1606 * Fetch the limit and base address.
1607 */
1608 uint16_t cbLimit;
1609 RTGCPTR GCPtrBase;
1610 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
1611 if (rcStrict == VINF_SUCCESS)
1612 {
1613 if (IEM_VERIFICATION_ENABLED(pIemCpu))
1614 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
1615 else
1616 {
1617 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1618 pCtx->idtr.cbIdt = cbLimit;
1619 pCtx->idtr.pIdt = GCPtrBase;
1620 }
1621 if (rcStrict == VINF_SUCCESS)
1622 iemRegAddToRip(pIemCpu, cbInstr);
1623 }
1624 return rcStrict;
1625}
1626
1627
1628/**
1629 * Implements mov GReg,CRx.
1630 *
1631 * @param iGReg The general register to store the CRx value in.
1632 * @param iCrReg The CRx register to read (valid).
1633 */
1634IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
1635{
1636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1637 if (pIemCpu->uCpl != 0)
1638 return iemRaiseGeneralProtectionFault0(pIemCpu);
1639 Assert(!pCtx->eflags.Bits.u1VM);
1640
1641 /* read it */
1642 uint64_t crX;
1643 switch (iCrReg)
1644 {
1645 case 0: crX = pCtx->cr0; break;
1646 case 2: crX = pCtx->cr2; break;
1647 case 3: crX = pCtx->cr3; break;
1648 case 4: crX = pCtx->cr4; break;
1649 case 8:
1650 if (IEM_VERIFICATION_ENABLED(pIemCpu))
1651 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
1652 else
1653 crX = 0xff;
1654 break;
1655 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
1656 }
1657
1658 /* store it */
1659 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1660 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
1661 else
1662 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
1663
1664 iemRegAddToRip(pIemCpu, cbInstr);
1665 return VINF_SUCCESS;
1666}
1667
1668
1669/**
1670 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
1671 *
1672 * @param iCrReg The CRx register to write (valid).
1673 * @param uNewCrX The new value.
1674 */
1675IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
1676{
1677 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1678 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1679 VBOXSTRICTRC rcStrict;
1680 int rc;
1681
1682 /*
1683 * Try store it.
1684 * Unfortunately, CPUM only does a tiny bit of the work.
1685 */
1686 switch (iCrReg)
1687 {
1688 case 0:
1689 {
1690 /*
1691 * Perform checks.
1692 */
1693 uint64_t const uOldCrX = pCtx->cr0;
1694 uNewCrX |= X86_CR0_ET; /* hardcoded */
1695
1696 /* Check for reserved bits. */
1697 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
1698 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
1699 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
1700 if (uNewCrX & ~(uint64_t)fValid)
1701 {
1702 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
1703 return iemRaiseGeneralProtectionFault0(pIemCpu);
1704 }
1705
1706 /* Check for invalid combinations. */
1707 if ( (uNewCrX & X86_CR0_PG)
1708 && !(uNewCrX & X86_CR0_PE) )
1709 {
1710 Log(("Trying to set CR0.PG without CR0.PE\n"));
1711 return iemRaiseGeneralProtectionFault0(pIemCpu);
1712 }
1713
1714 if ( !(uNewCrX & X86_CR0_CD)
1715 && (uNewCrX & X86_CR0_NW) )
1716 {
1717 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
1718 return iemRaiseGeneralProtectionFault0(pIemCpu);
1719 }
1720
1721 /* Long mode consistency checks. */
1722 if ( (uNewCrX & X86_CR0_PG)
1723 && !(uOldCrX & X86_CR0_PG)
1724 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
1725 {
1726 if (!(pCtx->cr4 & X86_CR4_PAE))
1727 {
1728 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
1729 return iemRaiseGeneralProtectionFault0(pIemCpu);
1730 }
1731 if (pCtx->csHid.Attr.n.u1Long)
1732 {
1733 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
1734 return iemRaiseGeneralProtectionFault0(pIemCpu);
1735 }
1736 }
1737
1738 /** @todo check reserved PDPTR bits as AMD states. */
1739
1740 /*
1741 * Change CR0.
1742 */
1743 if (IEM_VERIFICATION_ENABLED(pIemCpu))
1744 {
1745 rc = CPUMSetGuestCR0(pVCpu, uNewCrX);
1746 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
1747 }
1748 else
1749 pCtx->cr0 = uNewCrX;
1750 Assert(pCtx->cr0 == uNewCrX);
1751
1752 /*
1753 * Change EFER.LMA if entering or leaving long mode.
1754 */
1755 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
1756 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
1757 {
1758 uint64_t NewEFER = pCtx->msrEFER;
1759 if (uNewCrX & X86_CR0_PG)
1760 NewEFER |= MSR_K6_EFER_LME;
1761 else
1762 NewEFER &= ~MSR_K6_EFER_LME;
1763
1764 if (IEM_VERIFICATION_ENABLED(pIemCpu))
1765 CPUMSetGuestEFER(pVCpu, NewEFER);
1766 else
1767 pCtx->msrEFER = NewEFER;
1768 Assert(pCtx->msrEFER == NewEFER);
1769 }
1770
1771 /*
1772 * Inform PGM.
1773 */
1774 if (IEM_VERIFICATION_ENABLED(pIemCpu))
1775 {
1776 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
1777 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
1778 {
1779 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
1780 AssertRCReturn(rc, rc);
1781 /* ignore informational status codes */
1782 }
1783 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1784 /** @todo Status code management. */
1785 }
1786 else
1787 rcStrict = VINF_SUCCESS;
1788 break;
1789 }
1790
1791 /*
1792 * CR2 can be changed without any restrictions.
1793 */
1794 case 2:
1795 pCtx->cr2 = uNewCrX;
1796 rcStrict = VINF_SUCCESS;
1797 break;
1798
1799 /*
1800 * CR3 is relatively simple, although AMD and Intel have different
1801 * accounts of how setting reserved bits are handled. We take intel's
1802 * word for the lower bits and AMD's for the high bits (63:52).
1803 */
1804 /** @todo Testcase: Setting reserved bits in CR3, especially before
1805 * enabling paging. */
1806 case 3:
1807 {
1808 /* check / mask the value. */
1809 if (uNewCrX & UINT64_C(0xfff0000000000000))
1810 {
1811 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
1812 return iemRaiseGeneralProtectionFault0(pIemCpu);
1813 }
1814
1815 uint64_t fValid;
1816 if ( (pCtx->cr4 & X86_CR4_PAE)
1817 && (pCtx->msrEFER & MSR_K6_EFER_LME))
1818 fValid = UINT64_C(0x000ffffffffff014);
1819 else if (pCtx->cr4 & X86_CR4_PAE)
1820 fValid = UINT64_C(0xfffffff4);
1821 else
1822 fValid = UINT64_C(0xfffff014);
1823 if (uNewCrX & ~fValid)
1824 {
1825 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
1826 uNewCrX, uNewCrX & ~fValid));
1827 uNewCrX &= fValid;
1828 }
1829
1830 /** @todo If we're in PAE mode we should check the PDPTRs for
1831 * invalid bits. */
1832
1833 /* Make the change. */
1834 if (IEM_VERIFICATION_ENABLED(pIemCpu))
1835 {
1836 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
1837 AssertRCSuccessReturn(rc, rc);
1838 }
1839 else
1840 pCtx->cr3 = uNewCrX;
1841
1842 /* Inform PGM. */
1843 if (IEM_VERIFICATION_ENABLED(pIemCpu))
1844 {
1845 if (pCtx->cr0 & X86_CR0_PG)
1846 {
1847 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
1848 AssertRCReturn(rc, rc);
1849 /* ignore informational status codes */
1850 /** @todo status code management */
1851 }
1852 }
1853 rcStrict = VINF_SUCCESS;
1854 break;
1855 }
1856
1857 /*
1858 * CR4 is a bit more tedious as there are bits which cannot be cleared
1859 * under some circumstances and such.
1860 */
1861 case 4:
1862 {
1863 uint64_t const uOldCrX = pCtx->cr0;
1864
1865 /* reserved bits */
1866 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
1867 | X86_CR4_TSD | X86_CR4_DE
1868 | X86_CR4_PSE | X86_CR4_PAE
1869 | X86_CR4_MCE | X86_CR4_PGE
1870 | X86_CR4_PCE | X86_CR4_OSFSXR
1871 | X86_CR4_OSXMMEEXCPT;
1872 //if (xxx)
1873 // fValid |= X86_CR4_VMXE;
1874 //if (xxx)
1875 // fValid |= X86_CR4_OSXSAVE;
1876 if (uNewCrX & ~(uint64_t)fValid)
1877 {
1878 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
1879 return iemRaiseGeneralProtectionFault0(pIemCpu);
1880 }
1881
1882 /* long mode checks. */
1883 if ( (uOldCrX & X86_CR4_PAE)
1884 && !(uNewCrX & X86_CR4_PAE)
1885 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
1886 {
1887 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
1888 return iemRaiseGeneralProtectionFault0(pIemCpu);
1889 }
1890
1891
1892 /*
1893 * Change it.
1894 */
1895 if (IEM_VERIFICATION_ENABLED(pIemCpu))
1896 {
1897 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
1898 AssertRCSuccessReturn(rc, rc);
1899 }
1900 else
1901 pCtx->cr4 = uNewCrX;
1902 Assert(pCtx->cr4 == uNewCrX);
1903
1904 /*
1905 * Notify SELM and PGM.
1906 */
1907 if (IEM_VERIFICATION_ENABLED(pIemCpu))
1908 {
1909 /* SELM - VME may change things wrt to the TSS shadowing. */
1910 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
1911 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1912
1913 /* PGM - flushing and mode. */
1914 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
1915 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
1916 {
1917 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
1918 AssertRCReturn(rc, rc);
1919 /* ignore informational status codes */
1920 }
1921 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1922 /** @todo Status code management. */
1923 }
1924 else
1925 rcStrict = VINF_SUCCESS;
1926 break;
1927 }
1928
1929 /*
1930 * CR8 maps to the APIC TPR.
1931 */
1932 case 8:
1933 if (IEM_VERIFICATION_ENABLED(pIemCpu))
1934 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
1935 else
1936 rcStrict = VINF_SUCCESS;
1937 break;
1938
1939 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
1940 }
1941
1942 /*
1943 * Advance the RIP on success.
1944 */
1945 /** @todo Status code management. */
1946 if (rcStrict == VINF_SUCCESS)
1947 iemRegAddToRip(pIemCpu, cbInstr);
1948 return rcStrict;
1949
1950}
1951
1952
1953/**
1954 * Implements mov CRx,GReg.
1955 *
1956 * @param iCrReg The CRx register to write (valid).
1957 * @param iGReg The general register to store the CRx value in.
1958 */
1959IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
1960{
1961 if (pIemCpu->uCpl != 0)
1962 return iemRaiseGeneralProtectionFault0(pIemCpu);
1963 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
1964
1965 /*
1966 * Read the new value from the source register and call common worker.
1967 */
1968 uint64_t uNewCrX;
1969 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1970 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
1971 else
1972 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
1973 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
1974}
1975
1976
1977/**
1978 * Implements 'LMSW r/m16'
1979 *
1980 * @param u16NewMsw The new value.
1981 */
1982IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
1983{
1984 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1985
1986 if (pIemCpu->uCpl != 0)
1987 return iemRaiseGeneralProtectionFault0(pIemCpu);
1988 Assert(!pCtx->eflags.Bits.u1VM);
1989
1990 /*
1991 * Compose the new CR0 value and call common worker.
1992 */
1993 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
1994 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
1995 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
1996}
1997
1998
1999/**
2000 * Implements 'CLTS'.
2001 */
2002IEM_CIMPL_DEF_0(iemOpCImpl_clts)
2003{
2004 if (pIemCpu->uCpl != 0)
2005 return iemRaiseGeneralProtectionFault0(pIemCpu);
2006
2007 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2008 uint64_t uNewCr0 = pCtx->cr0;
2009 uNewCr0 &= ~X86_CR0_TS;
2010 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
2011}
2012
2013
2014/**
2015 * Implements 'IN eAX, port'.
2016 *
2017 * @param u16Port The source port.
2018 * @param cbReg The register size.
2019 */
2020IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
2021{
2022 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2023
2024 /*
2025 * CPL check
2026 */
2027 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
2028 if (rcStrict != VINF_SUCCESS)
2029 return rcStrict;
2030
2031 /*
2032 * Perform the I/O.
2033 */
2034 uint32_t u32Value;
2035 if (IEM_VERIFICATION_ENABLED(pIemCpu))
2036 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
2037 else
2038 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
2039 if (IOM_SUCCESS(rcStrict))
2040 {
2041 switch (cbReg)
2042 {
2043 case 1: pCtx->al = (uint8_t)u32Value; break;
2044 case 2: pCtx->ax = (uint16_t)u32Value; break;
2045 case 4: pCtx->rax = u32Value; break;
2046 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
2047 }
2048 iemRegAddToRip(pIemCpu, cbInstr);
2049 pIemCpu->cPotentialExits++;
2050 }
2051 /** @todo massage rcStrict. */
2052 return rcStrict;
2053}
2054
2055
2056/**
2057 * Implements 'IN eAX, DX'.
2058 *
2059 * @param cbReg The register size.
2060 */
2061IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
2062{
2063 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
2064}
2065
2066
2067/**
2068 * Implements 'OUT port, eAX'.
2069 *
2070 * @param u16Port The destination port.
2071 * @param cbReg The register size.
2072 */
2073IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
2074{
2075 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2076
2077 /*
2078 * CPL check
2079 */
2080 if ( (pCtx->cr0 & X86_CR0_PE)
2081 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
2082 || pCtx->eflags.Bits.u1VM) )
2083 {
2084 /** @todo I/O port permission bitmap check */
2085 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
2086 }
2087
2088 /*
2089 * Perform the I/O.
2090 */
2091 uint32_t u32Value;
2092 switch (cbReg)
2093 {
2094 case 1: u32Value = pCtx->al; break;
2095 case 2: u32Value = pCtx->ax; break;
2096 case 4: u32Value = pCtx->eax; break;
2097 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
2098 }
2099 VBOXSTRICTRC rc;
2100 if (IEM_VERIFICATION_ENABLED(pIemCpu))
2101 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
2102 else
2103 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
2104 if (IOM_SUCCESS(rc))
2105 {
2106 iemRegAddToRip(pIemCpu, cbInstr);
2107 pIemCpu->cPotentialExits++;
2108 /** @todo massage rc. */
2109 }
2110 return rc;
2111}
2112
2113
2114/**
2115 * Implements 'OUT DX, eAX'.
2116 *
2117 * @param cbReg The register size.
2118 */
2119IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
2120{
2121 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
2122}
2123
2124
2125/**
2126 * Implements 'CLI'.
2127 */
2128IEM_CIMPL_DEF_0(iemCImpl_cli)
2129{
2130 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2131
2132 if (pCtx->cr0 & X86_CR0_PE)
2133 {
2134 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
2135 if (!pCtx->eflags.Bits.u1VM)
2136 {
2137 if (pIemCpu->uCpl <= uIopl)
2138 pCtx->eflags.Bits.u1IF = 0;
2139 else if ( pIemCpu->uCpl == 3
2140 && (pCtx->cr4 & X86_CR4_PVI) )
2141 pCtx->eflags.Bits.u1VIF = 0;
2142 else
2143 return iemRaiseGeneralProtectionFault0(pIemCpu);
2144 }
2145 /* V8086 */
2146 else if (uIopl == 3)
2147 pCtx->eflags.Bits.u1IF = 0;
2148 else if ( uIopl < 3
2149 && (pCtx->cr4 & X86_CR4_VME) )
2150 pCtx->eflags.Bits.u1VIF = 0;
2151 else
2152 return iemRaiseGeneralProtectionFault0(pIemCpu);
2153 }
2154 /* real mode */
2155 else
2156 pCtx->eflags.Bits.u1IF = 0;
2157 iemRegAddToRip(pIemCpu, cbInstr);
2158 return VINF_SUCCESS;
2159}
2160
2161
2162/**
2163 * Implements 'STI'.
2164 */
2165IEM_CIMPL_DEF_0(iemCImpl_sti)
2166{
2167 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2168
2169 if (pCtx->cr0 & X86_CR0_PE)
2170 {
2171 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
2172 if (!pCtx->eflags.Bits.u1VM)
2173 {
2174 if (pIemCpu->uCpl <= uIopl)
2175 pCtx->eflags.Bits.u1IF = 1;
2176 else if ( pIemCpu->uCpl == 3
2177 && (pCtx->cr4 & X86_CR4_PVI)
2178 && !pCtx->eflags.Bits.u1VIP )
2179 pCtx->eflags.Bits.u1VIF = 1;
2180 else
2181 return iemRaiseGeneralProtectionFault0(pIemCpu);
2182 }
2183 /* V8086 */
2184 else if (uIopl == 3)
2185 pCtx->eflags.Bits.u1IF = 1;
2186 else if ( uIopl < 3
2187 && (pCtx->cr4 & X86_CR4_VME)
2188 && !pCtx->eflags.Bits.u1VIP )
2189 pCtx->eflags.Bits.u1VIF = 1;
2190 else
2191 return iemRaiseGeneralProtectionFault0(pIemCpu);
2192 }
2193 /* real mode */
2194 else
2195 pCtx->eflags.Bits.u1IF = 1;
2196
2197 iemRegAddToRip(pIemCpu, cbInstr);
2198 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2199 return VINF_SUCCESS;
2200}
2201
2202
2203/**
2204 * Implements 'HLT'.
2205 */
2206IEM_CIMPL_DEF_0(iemCImpl_hlt)
2207{
2208 if (pIemCpu->uCpl != 0)
2209 return iemRaiseGeneralProtectionFault0(pIemCpu);
2210 iemRegAddToRip(pIemCpu, cbInstr);
2211 return VINF_EM_HALT;
2212}
2213
2214
2215/**
2216 * Implements 'CPUID'.
2217 */
2218IEM_CIMPL_DEF_0(iemOpCImpl_cpuid)
2219{
2220 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2221
2222 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
2223 pCtx->rax &= UINT32_C(0xffffffff);
2224 pCtx->rbx &= UINT32_C(0xffffffff);
2225 pCtx->rcx &= UINT32_C(0xffffffff);
2226 pCtx->rdx &= UINT32_C(0xffffffff);
2227
2228 iemRegAddToRip(pIemCpu, cbInstr);
2229 return VINF_SUCCESS;
2230}
2231
2232
2233/*
2234 * Instantiate the various string operation combinations.
2235 */
2236#define OP_SIZE 8
2237#define ADDR_SIZE 16
2238#include "IEMAllCImplStrInstr.cpp.h"
2239#define OP_SIZE 8
2240#define ADDR_SIZE 32
2241#include "IEMAllCImplStrInstr.cpp.h"
2242#define OP_SIZE 8
2243#define ADDR_SIZE 64
2244#include "IEMAllCImplStrInstr.cpp.h"
2245
2246#define OP_SIZE 16
2247#define ADDR_SIZE 16
2248#include "IEMAllCImplStrInstr.cpp.h"
2249#define OP_SIZE 16
2250#define ADDR_SIZE 32
2251#include "IEMAllCImplStrInstr.cpp.h"
2252#define OP_SIZE 16
2253#define ADDR_SIZE 64
2254#include "IEMAllCImplStrInstr.cpp.h"
2255
2256#define OP_SIZE 32
2257#define ADDR_SIZE 16
2258#include "IEMAllCImplStrInstr.cpp.h"
2259#define OP_SIZE 32
2260#define ADDR_SIZE 32
2261#include "IEMAllCImplStrInstr.cpp.h"
2262#define OP_SIZE 32
2263#define ADDR_SIZE 64
2264#include "IEMAllCImplStrInstr.cpp.h"
2265
2266#define OP_SIZE 64
2267#define ADDR_SIZE 32
2268#include "IEMAllCImplStrInstr.cpp.h"
2269#define OP_SIZE 64
2270#define ADDR_SIZE 64
2271#include "IEMAllCImplStrInstr.cpp.h"
2272
2273
2274/** @} */
2275
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette