VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 37034

Last change on this file since 37034 was 37034, checked in by vboxsync, 14 years ago

IEM: Hacking in progress...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 98.6 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 37034 2011-05-10 18:10:46Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 /** @todo I/O port permission bitmap check */
42 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
43 }
44 return VINF_SUCCESS;
45}
46
47/** @} */
48
49/** @name C Implementations
50 * @{
51 */
52
53/**
54 * Implements a 16-bit popa.
55 */
56IEM_CIMPL_DEF_0(iemCImpl_popa_16)
57{
58 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
59 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
60 RTGCPTR GCPtrLast = GCPtrStart + 15;
61 VBOXSTRICTRC rcStrict;
62
63 /*
64 * The docs are a bit hard to comprehend here, but it looks like we wrap
65 * around in real mode as long as none of the individual "popa" crosses the
66 * end of the stack segment. In protected mode we check the whole access
67 * in one go. For efficiency, only do the word-by-word thing if we're in
68 * danger of wrapping around.
69 */
70 /** @todo do popa boundary / wrap-around checks. */
71 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
72 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
73 {
74 /* word-by-word */
75 RTUINT64U TmpRsp;
76 TmpRsp.u = pCtx->rsp;
77 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
78 if (rcStrict == VINF_SUCCESS)
79 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
80 if (rcStrict == VINF_SUCCESS)
81 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
82 if (rcStrict == VINF_SUCCESS)
83 {
84 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
85 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
86 }
87 if (rcStrict == VINF_SUCCESS)
88 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
89 if (rcStrict == VINF_SUCCESS)
90 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
91 if (rcStrict == VINF_SUCCESS)
92 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
93 if (rcStrict == VINF_SUCCESS)
94 {
95 pCtx->rsp = TmpRsp.u;
96 iemRegAddToRip(pIemCpu, cbInstr);
97 }
98 }
99 else
100 {
101 uint16_t const *pa16Mem = NULL;
102 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
103 if (rcStrict == VINF_SUCCESS)
104 {
105 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
106 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
107 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
108 /* skip sp */
109 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
110 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
111 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
112 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
113 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
114 if (rcStrict == VINF_SUCCESS)
115 {
116 iemRegAddToRsp(pCtx, 16);
117 iemRegAddToRip(pIemCpu, cbInstr);
118 }
119 }
120 }
121 return rcStrict;
122}
123
124
125/**
126 * Implements a 32-bit popa.
127 */
128IEM_CIMPL_DEF_0(iemCImpl_popa_32)
129{
130 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
131 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
132 RTGCPTR GCPtrLast = GCPtrStart + 31;
133 VBOXSTRICTRC rcStrict;
134
135 /*
136 * The docs are a bit hard to comprehend here, but it looks like we wrap
137 * around in real mode as long as none of the individual "popa" crosses the
138 * end of the stack segment. In protected mode we check the whole access
139 * in one go. For efficiency, only do the word-by-word thing if we're in
140 * danger of wrapping around.
141 */
142 /** @todo do popa boundary / wrap-around checks. */
143 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
144 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
145 {
146 /* word-by-word */
147 RTUINT64U TmpRsp;
148 TmpRsp.u = pCtx->rsp;
149 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
150 if (rcStrict == VINF_SUCCESS)
151 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
152 if (rcStrict == VINF_SUCCESS)
153 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
154 if (rcStrict == VINF_SUCCESS)
155 {
156 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
157 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
158 }
159 if (rcStrict == VINF_SUCCESS)
160 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
161 if (rcStrict == VINF_SUCCESS)
162 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
163 if (rcStrict == VINF_SUCCESS)
164 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
165 if (rcStrict == VINF_SUCCESS)
166 {
167#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
168 pCtx->rdi &= UINT32_MAX;
169 pCtx->rsi &= UINT32_MAX;
170 pCtx->rbp &= UINT32_MAX;
171 pCtx->rbx &= UINT32_MAX;
172 pCtx->rdx &= UINT32_MAX;
173 pCtx->rcx &= UINT32_MAX;
174 pCtx->rax &= UINT32_MAX;
175#endif
176 pCtx->rsp = TmpRsp.u;
177 iemRegAddToRip(pIemCpu, cbInstr);
178 }
179 }
180 else
181 {
182 uint32_t const *pa32Mem;
183 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
184 if (rcStrict == VINF_SUCCESS)
185 {
186 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
187 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
188 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
189 /* skip esp */
190 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
191 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
192 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
193 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
194 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
195 if (rcStrict == VINF_SUCCESS)
196 {
197 iemRegAddToRsp(pCtx, 32);
198 iemRegAddToRip(pIemCpu, cbInstr);
199 }
200 }
201 }
202 return rcStrict;
203}
204
205
206/**
207 * Implements a 16-bit pusha.
208 */
209IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
210{
211 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
212 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
213 RTGCPTR GCPtrBottom = GCPtrTop - 15;
214 VBOXSTRICTRC rcStrict;
215
216 /*
217 * The docs are a bit hard to comprehend here, but it looks like we wrap
218 * around in real mode as long as none of the individual "pushd" crosses the
219 * end of the stack segment. In protected mode we check the whole access
220 * in one go. For efficiency, only do the word-by-word thing if we're in
221 * danger of wrapping around.
222 */
223 /** @todo do pusha boundary / wrap-around checks. */
224 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
225 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
226 {
227 /* word-by-word */
228 RTUINT64U TmpRsp;
229 TmpRsp.u = pCtx->rsp;
230 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
231 if (rcStrict == VINF_SUCCESS)
232 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
233 if (rcStrict == VINF_SUCCESS)
234 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
235 if (rcStrict == VINF_SUCCESS)
236 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
237 if (rcStrict == VINF_SUCCESS)
238 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
239 if (rcStrict == VINF_SUCCESS)
240 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
241 if (rcStrict == VINF_SUCCESS)
242 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
243 if (rcStrict == VINF_SUCCESS)
244 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
245 if (rcStrict == VINF_SUCCESS)
246 {
247 pCtx->rsp = TmpRsp.u;
248 iemRegAddToRip(pIemCpu, cbInstr);
249 }
250 }
251 else
252 {
253 GCPtrBottom--;
254 uint16_t *pa16Mem = NULL;
255 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
256 if (rcStrict == VINF_SUCCESS)
257 {
258 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
259 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
260 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
261 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
262 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
263 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
264 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
265 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
266 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
267 if (rcStrict == VINF_SUCCESS)
268 {
269 iemRegSubFromRsp(pCtx, 16);
270 iemRegAddToRip(pIemCpu, cbInstr);
271 }
272 }
273 }
274 return rcStrict;
275}
276
277
278/**
279 * Implements a 32-bit pusha.
280 */
281IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
282{
283 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
284 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
285 RTGCPTR GCPtrBottom = GCPtrTop - 31;
286 VBOXSTRICTRC rcStrict;
287
288 /*
289 * The docs are a bit hard to comprehend here, but it looks like we wrap
290 * around in real mode as long as none of the individual "pusha" crosses the
291 * end of the stack segment. In protected mode we check the whole access
292 * in one go. For efficiency, only do the word-by-word thing if we're in
293 * danger of wrapping around.
294 */
295 /** @todo do pusha boundary / wrap-around checks. */
296 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
297 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
298 {
299 /* word-by-word */
300 RTUINT64U TmpRsp;
301 TmpRsp.u = pCtx->rsp;
302 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
303 if (rcStrict == VINF_SUCCESS)
304 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
305 if (rcStrict == VINF_SUCCESS)
306 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
307 if (rcStrict == VINF_SUCCESS)
308 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
309 if (rcStrict == VINF_SUCCESS)
310 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
311 if (rcStrict == VINF_SUCCESS)
312 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
313 if (rcStrict == VINF_SUCCESS)
314 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
315 if (rcStrict == VINF_SUCCESS)
316 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
317 if (rcStrict == VINF_SUCCESS)
318 {
319 pCtx->rsp = TmpRsp.u;
320 iemRegAddToRip(pIemCpu, cbInstr);
321 }
322 }
323 else
324 {
325 GCPtrBottom--;
326 uint32_t *pa32Mem;
327 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
328 if (rcStrict == VINF_SUCCESS)
329 {
330 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
331 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
332 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
333 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
334 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
335 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
336 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
337 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
338 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
339 if (rcStrict == VINF_SUCCESS)
340 {
341 iemRegSubFromRsp(pCtx, 32);
342 iemRegAddToRip(pIemCpu, cbInstr);
343 }
344 }
345 }
346 return rcStrict;
347}
348
349
350/**
351 * Implements pushf.
352 *
353 *
354 * @param enmEffOpSize The effective operand size.
355 */
356IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
357{
358 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
359
360 /*
361 * If we're in V8086 mode some care is required (which is why we're in
362 * doing this in a C implementation).
363 */
364 uint32_t fEfl = pCtx->eflags.u;
365 if ( (fEfl & X86_EFL_VM)
366 && X86_EFL_GET_IOPL(fEfl) != 3 )
367 {
368 Assert(pCtx->cr0 & X86_CR0_PE);
369 if ( enmEffOpSize != IEMMODE_16BIT
370 || !(pCtx->cr4 & X86_CR4_VME))
371 return iemRaiseGeneralProtectionFault0(pIemCpu);
372 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
373 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
374 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
375 }
376
377 /*
378 * Ok, clear RF and VM and push the flags.
379 */
380 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
381
382 VBOXSTRICTRC rcStrict;
383 switch (enmEffOpSize)
384 {
385 case IEMMODE_16BIT:
386 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
387 break;
388 case IEMMODE_32BIT:
389 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
390 break;
391 case IEMMODE_64BIT:
392 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
393 break;
394 IEM_NOT_REACHED_DEFAULT_CASE_RET();
395 }
396 if (rcStrict != VINF_SUCCESS)
397 return rcStrict;
398
399 iemRegAddToRip(pIemCpu, cbInstr);
400 return VINF_SUCCESS;
401}
402
403
404/**
405 * Implements popf.
406 *
407 * @param enmEffOpSize The effective operand size.
408 */
409IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
410{
411 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
412 uint32_t const fEflOld = pCtx->eflags.u;
413 VBOXSTRICTRC rcStrict;
414 uint32_t fEflNew;
415
416 /*
417 * V8086 is special as usual.
418 */
419 if (fEflOld & X86_EFL_VM)
420 {
421 /*
422 * Almost anything goes if IOPL is 3.
423 */
424 if (X86_EFL_GET_IOPL(fEflOld) == 3)
425 {
426 switch (enmEffOpSize)
427 {
428 case IEMMODE_16BIT:
429 {
430 uint16_t u16Value;
431 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
432 if (rcStrict != VINF_SUCCESS)
433 return rcStrict;
434 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
435 break;
436 }
437 case IEMMODE_32BIT:
438 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
439 if (rcStrict != VINF_SUCCESS)
440 return rcStrict;
441 break;
442 IEM_NOT_REACHED_DEFAULT_CASE_RET();
443 }
444
445 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
446 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
447 }
448 /*
449 * Interrupt flag virtualization with CR4.VME=1.
450 */
451 else if ( enmEffOpSize == IEMMODE_16BIT
452 && (pCtx->cr4 & X86_CR4_VME) )
453 {
454 uint16_t u16Value;
455 RTUINT64U TmpRsp;
456 TmpRsp.u = pCtx->rsp;
457 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
458 if (rcStrict != VINF_SUCCESS)
459 return rcStrict;
460
461 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
462 * or before? */
463 if ( ( (u16Value & X86_EFL_IF)
464 && (fEflOld & X86_EFL_VIP))
465 || (u16Value & X86_EFL_TF) )
466 return iemRaiseGeneralProtectionFault0(pIemCpu);
467
468 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
469 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
470 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
471 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
472
473 pCtx->rsp = TmpRsp.u;
474 }
475 else
476 return iemRaiseGeneralProtectionFault0(pIemCpu);
477
478 }
479 /*
480 * Not in V8086 mode.
481 */
482 else
483 {
484 /* Pop the flags. */
485 switch (enmEffOpSize)
486 {
487 case IEMMODE_16BIT:
488 {
489 uint16_t u16Value;
490 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
491 if (rcStrict != VINF_SUCCESS)
492 return rcStrict;
493 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
494 break;
495 }
496 case IEMMODE_32BIT:
497 case IEMMODE_64BIT:
498 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
499 if (rcStrict != VINF_SUCCESS)
500 return rcStrict;
501 break;
502 IEM_NOT_REACHED_DEFAULT_CASE_RET();
503 }
504
505 /* Merge them with the current flags. */
506 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
507 || pIemCpu->uCpl == 0)
508 {
509 fEflNew &= X86_EFL_POPF_BITS;
510 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
511 }
512 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
513 {
514 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
515 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
516 }
517 else
518 {
519 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
520 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
521 }
522 }
523
524 /*
525 * Commit the flags.
526 */
527 Assert(fEflNew & RT_BIT_32(1));
528 pCtx->eflags.u = fEflNew;
529 iemRegAddToRip(pIemCpu, cbInstr);
530
531 return VINF_SUCCESS;
532}
533
534
535/**
536 * Implements an indirect call.
537 *
538 * @param uNewPC The new program counter (RIP) value (loaded from the
539 * operand).
540 * @param enmEffOpSize The effective operand size.
541 */
542IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
543{
544 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
545 uint16_t uOldPC = pCtx->ip + cbInstr;
546 if (uNewPC > pCtx->csHid.u32Limit)
547 return iemRaiseGeneralProtectionFault0(pIemCpu);
548
549 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
550 if (rcStrict != VINF_SUCCESS)
551 return rcStrict;
552
553 pCtx->rip = uNewPC;
554 return VINF_SUCCESS;
555
556}
557
558
559/**
560 * Implements a 16-bit relative call.
561 *
562 * @param offDisp The displacment offset.
563 */
564IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
565{
566 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
567 uint16_t uOldPC = pCtx->ip + cbInstr;
568 uint16_t uNewPC = uOldPC + offDisp;
569 if (uNewPC > pCtx->csHid.u32Limit)
570 return iemRaiseGeneralProtectionFault0(pIemCpu);
571
572 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
573 if (rcStrict != VINF_SUCCESS)
574 return rcStrict;
575
576 pCtx->rip = uNewPC;
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Implements a 32-bit indirect call.
583 *
584 * @param uNewPC The new program counter (RIP) value (loaded from the
585 * operand).
586 * @param enmEffOpSize The effective operand size.
587 */
588IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
589{
590 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
591 uint32_t uOldPC = pCtx->eip + cbInstr;
592 if (uNewPC > pCtx->csHid.u32Limit)
593 return iemRaiseGeneralProtectionFault0(pIemCpu);
594
595 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
596 if (rcStrict != VINF_SUCCESS)
597 return rcStrict;
598
599 pCtx->rip = uNewPC;
600 return VINF_SUCCESS;
601
602}
603
604
605/**
606 * Implements a 32-bit relative call.
607 *
608 * @param offDisp The displacment offset.
609 */
610IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
611{
612 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
613 uint32_t uOldPC = pCtx->eip + cbInstr;
614 uint32_t uNewPC = uOldPC + offDisp;
615 if (uNewPC > pCtx->csHid.u32Limit)
616 return iemRaiseGeneralProtectionFault0(pIemCpu);
617
618 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
619 if (rcStrict != VINF_SUCCESS)
620 return rcStrict;
621
622 pCtx->rip = uNewPC;
623 return VINF_SUCCESS;
624}
625
626
627/**
628 * Implements a 64-bit indirect call.
629 *
630 * @param uNewPC The new program counter (RIP) value (loaded from the
631 * operand).
632 * @param enmEffOpSize The effective operand size.
633 */
634IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
635{
636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
637 uint64_t uOldPC = pCtx->rip + cbInstr;
638 if (!IEM_IS_CANONICAL(uNewPC))
639 return iemRaiseGeneralProtectionFault0(pIemCpu);
640
641 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
642 if (rcStrict != VINF_SUCCESS)
643 return rcStrict;
644
645 pCtx->rip = uNewPC;
646 return VINF_SUCCESS;
647
648}
649
650
651/**
652 * Implements a 64-bit relative call.
653 *
654 * @param offDisp The displacment offset.
655 */
656IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
657{
658 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
659 uint64_t uOldPC = pCtx->rip + cbInstr;
660 uint64_t uNewPC = uOldPC + offDisp;
661 if (!IEM_IS_CANONICAL(uNewPC))
662 return iemRaiseNotCanonical(pIemCpu);
663
664 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
665 if (rcStrict != VINF_SUCCESS)
666 return rcStrict;
667
668 pCtx->rip = uNewPC;
669 return VINF_SUCCESS;
670}
671
672
673/**
674 * Implements far jumps.
675 *
676 * @param uSel The selector.
677 * @param offSeg The segment offset.
678 * @param enmEffOpSize The effective operand size.
679 */
680IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint32_t, offSeg, IEMMODE, enmEffOpSize)
681{
682 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
683
684 /*
685 * Real mode and V8086 mode are easy. The only snag seems to be that
686 * CS.limit doesn't change and the limit check is done against the current
687 * limit.
688 */
689 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
690 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
691 {
692 if (offSeg > pCtx->csHid.u32Limit)
693 return iemRaiseGeneralProtectionFault0(pIemCpu);
694
695 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
696 pCtx->rip = offSeg;
697 else
698 pCtx->rip = offSeg & UINT16_MAX;
699 pCtx->cs = uSel;
700 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
701 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
702 * PE. Check with VT-x and AMD-V. */
703#ifdef IEM_VERIFICATION_MODE
704 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
705#endif
706 return VINF_SUCCESS;
707 }
708
709 /*
710 * Protected mode. Need to parse the specified descriptor...
711 */
712 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
713 {
714 Log(("jmpf %04x:%08x -> invalid selector, #GP(0)\n", uSel, offSeg));
715 return iemRaiseGeneralProtectionFault0(pIemCpu);
716 }
717
718 /* Fetch the descriptor. */
719 IEMSELDESC Desc;
720 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
721 if (rcStrict != VINF_SUCCESS)
722 return rcStrict;
723
724 /* Is it there? */
725 if (!Desc.Legacy.Gen.u1Present)
726 {
727 Log(("jmpf %04x:%08x -> segment not present\n", uSel, offSeg));
728 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
729 }
730
731 /*
732 * Deal with it according to its type.
733 */
734 if (Desc.Legacy.Gen.u1DescType)
735 {
736 /* Only code segments. */
737 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
738 {
739 Log(("jmpf %04x:%08x -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
740 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
741 }
742
743 /* L vs D. */
744 if ( Desc.Legacy.Gen.u1Long
745 && Desc.Legacy.Gen.u1DefBig
746 && IEM_IS_LONG_MODE(pIemCpu))
747 {
748 Log(("jmpf %04x:%08x -> both L and D are set.\n", uSel, offSeg));
749 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
750 }
751
752 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
753 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
754 {
755 if (Desc.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
756 {
757 Log(("jmpf %04x:%08x -> DPL violation (conforming); DPL=%d CPL=%u\n",
758 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
759 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
760 }
761 }
762 else
763 {
764 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
765 {
766 Log(("jmpf %04x:%08x -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
767 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
768 }
769 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
770 {
771 Log(("jmpf %04x:%08x -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
772 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
773 }
774 }
775
776 /* Limit check. (Should alternatively check for non-canonical addresses
777 here, but that is ruled out by offSeg being 32-bit, right?) */
778 uint64_t u64Base;
779 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
780 if (Desc.Legacy.Gen.u1Granularity)
781 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
782 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
783 u64Base = 0;
784 else
785 {
786 if (offSeg > cbLimit)
787 {
788 Log(("jmpf %04x:%08x -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
789 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
790 }
791 u64Base = X86DESC_BASE(Desc.Legacy);
792 }
793
794 /*
795 * Ok, everything checked out fine. Now set the accessed bit before
796 * committing the result into CS, CSHID and RIP.
797 */
798 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
799 {
800 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
801 if (rcStrict != VINF_SUCCESS)
802 return rcStrict;
803#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
804 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
805#endif
806 }
807
808 /* commit */
809 pCtx->rip = offSeg;
810 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
811 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
812 pCtx->csHid.Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff);
813 pCtx->csHid.u32Limit = cbLimit;
814 pCtx->csHid.u64Base = u64Base;
815 /** @todo check if the hidden bits are loaded correctly for 64-bit
816 * mode. */
817 return VINF_SUCCESS;
818 }
819
820 /*
821 * System selector.
822 */
823 if (IEM_IS_LONG_MODE(pIemCpu))
824 switch (Desc.Legacy.Gen.u4Type)
825 {
826 case AMD64_SEL_TYPE_SYS_LDT:
827 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
828 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
829 case AMD64_SEL_TYPE_SYS_CALL_GATE:
830 case AMD64_SEL_TYPE_SYS_INT_GATE:
831 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
832 /* Call various functions to do the work. */
833 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
834 default:
835 Log(("jmpf %04x:%08x -> wrong sys selector (64-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
836 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
837
838 }
839 switch (Desc.Legacy.Gen.u4Type)
840 {
841 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
842 case X86_SEL_TYPE_SYS_LDT:
843 case X86_SEL_TYPE_SYS_286_CALL_GATE:
844 case X86_SEL_TYPE_SYS_TASK_GATE:
845 case X86_SEL_TYPE_SYS_286_INT_GATE:
846 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
847 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
848 case X86_SEL_TYPE_SYS_386_CALL_GATE:
849 case X86_SEL_TYPE_SYS_386_INT_GATE:
850 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
851 /* Call various functions to do the work. */
852 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
853
854 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
855 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
856 /* Call various functions to do the work. */
857 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
858
859 default:
860 Log(("jmpf %04x:%08x -> wrong sys selector (32-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
861 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
862 }
863}
864
865
866/**
867 * Implements far calls.
868 *
869 * @param uSel The selector.
870 * @param offSeg The segment offset.
871 * @param enmOpSize The operand size (in case we need it).
872 */
873IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize)
874{
875 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
876 VBOXSTRICTRC rcStrict;
877 uint64_t uNewRsp;
878 void *pvRet;
879
880 /*
881 * Real mode and V8086 mode are easy. The only snag seems to be that
882 * CS.limit doesn't change and the limit check is done against the current
883 * limit.
884 */
885 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
886 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
887 {
888 Assert(enmOpSize == IEMMODE_16BIT || enmOpSize == IEMMODE_32BIT);
889
890 /* Check stack first - may #SS(0). */
891 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmOpSize == IEMMODE_32BIT ? 6 : 4,
892 &pvRet, &uNewRsp);
893 if (rcStrict != VINF_SUCCESS)
894 return rcStrict;
895
896 /* Check the target address range. */
897 if (offSeg > UINT32_MAX)
898 return iemRaiseGeneralProtectionFault0(pIemCpu);
899
900 /* Everything is fine, push the return address. */
901 if (enmOpSize == IEMMODE_16BIT)
902 {
903 ((uint16_t *)pvRet)[0] = pCtx->ip + cbInstr;
904 ((uint16_t *)pvRet)[1] = pCtx->cs;
905 }
906 else
907 {
908 ((uint32_t *)pvRet)[0] = pCtx->eip + cbInstr;
909 ((uint16_t *)pvRet)[3] = pCtx->cs;
910 }
911 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pvRet, uNewRsp);
912 if (rcStrict != VINF_SUCCESS)
913 return rcStrict;
914
915 /* Branch. */
916 pCtx->rip = offSeg;
917 pCtx->cs = uSel;
918 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
919 /** @todo Does REM reset the accessed bit here to? (See on jmp far16
920 * after disabling PE.) Check with VT-x and AMD-V. */
921#ifdef IEM_VERIFICATION_MODE
922 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
923#endif
924 return VINF_SUCCESS;
925 }
926
927 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
928}
929
930
931/**
932 * Implements retf.
933 *
934 * @param enmEffOpSize The effective operand size.
935 * @param cbPop The amount of arguments to pop from the stack
936 * (bytes).
937 */
938IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
939{
940 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
941 VBOXSTRICTRC rcStrict;
942 uint64_t uNewRsp;
943
944 /*
945 * Real mode and V8086 mode are easy.
946 */
947 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
948 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
949 {
950 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
951 uint16_t const *pu16Frame;
952 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 8 : 4,
953 (void const **)&pu16Frame, &uNewRsp);
954 if (rcStrict != VINF_SUCCESS)
955 return rcStrict;
956 uint32_t uNewEip;
957 uint16_t uNewCs;
958 if (enmEffOpSize == IEMMODE_32BIT)
959 {
960 uNewCs = pu16Frame[2];
961 uNewEip = RT_MAKE_U32(pu16Frame[0], pu16Frame[1]);
962 }
963 else
964 {
965 uNewCs = pu16Frame[1];
966 uNewEip = pu16Frame[0];
967 }
968 /** @todo check how this is supposed to work if sp=0xfffe. */
969
970 /* Check the limit of the new EIP. */
971 /** @todo Intel pseudo code only does the limit check for 16-bit
972 * operands, AMD does not make any distinction. What is right? */
973 if (uNewEip > pCtx->csHid.u32Limit)
974 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
975
976 /* commit the operation. */
977 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
978 if (rcStrict != VINF_SUCCESS)
979 return rcStrict;
980 pCtx->rip = uNewEip;
981 pCtx->cs = uNewCs;
982 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
983 /** @todo do we load attribs and limit as well? */
984 if (cbPop)
985 iemRegAddToRsp(pCtx, cbPop);
986 return VINF_SUCCESS;
987 }
988
989 AssertFailed();
990 return VERR_NOT_IMPLEMENTED;
991}
992
993
994/**
995 * Implements retn.
996 *
997 * We're doing this in C because of the \#GP that might be raised if the popped
998 * program counter is out of bounds.
999 *
1000 * @param enmEffOpSize The effective operand size.
1001 * @param cbPop The amount of arguments to pop from the stack
1002 * (bytes).
1003 */
1004IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1005{
1006 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1007
1008 /* Fetch the RSP from the stack. */
1009 VBOXSTRICTRC rcStrict;
1010 RTUINT64U NewRip;
1011 RTUINT64U NewRsp;
1012 NewRsp.u = pCtx->rsp;
1013 switch (enmEffOpSize)
1014 {
1015 case IEMMODE_16BIT:
1016 NewRip.u = 0;
1017 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1018 break;
1019 case IEMMODE_32BIT:
1020 NewRip.u = 0;
1021 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1022 break;
1023 case IEMMODE_64BIT:
1024 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1025 break;
1026 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1027 }
1028 if (rcStrict != VINF_SUCCESS)
1029 return rcStrict;
1030
1031 /* Check the new RSP before loading it. */
1032 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1033 * of it. The canonical test is performed here and for call. */
1034 if (enmEffOpSize != IEMMODE_64BIT)
1035 {
1036 if (NewRip.DWords.dw0 > pCtx->csHid.u32Limit)
1037 {
1038 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->csHid.u32Limit));
1039 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1040 }
1041 }
1042 else
1043 {
1044 if (!IEM_IS_CANONICAL(NewRip.u))
1045 {
1046 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1047 return iemRaiseNotCanonical(pIemCpu);
1048 }
1049 }
1050
1051 /* Commit it. */
1052 pCtx->rip = NewRip.u;
1053 pCtx->rsp = NewRsp.u;
1054 if (cbPop)
1055 iemRegAddToRsp(pCtx, cbPop);
1056
1057 return VINF_SUCCESS;
1058}
1059
1060
1061/**
1062 * Implements leave.
1063 *
1064 * We're doing this in C because messing with the stack registers is annoying
1065 * since they depends on SS attributes.
1066 *
1067 * @param enmEffOpSize The effective operand size.
1068 */
1069IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1070{
1071 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1072
1073 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1074 RTUINT64U NewRsp;
1075 if (pCtx->ssHid.Attr.n.u1Long)
1076 {
1077 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1078 NewRsp.u = pCtx->rsp;
1079 NewRsp.Words.w0 = pCtx->bp;
1080 }
1081 else if (pCtx->ssHid.Attr.n.u1DefBig)
1082 NewRsp.u = pCtx->ebp;
1083 else
1084 NewRsp.u = pCtx->rbp;
1085
1086 /* Pop RBP according to the operand size. */
1087 VBOXSTRICTRC rcStrict;
1088 RTUINT64U NewRbp;
1089 switch (enmEffOpSize)
1090 {
1091 case IEMMODE_16BIT:
1092 NewRbp.u = pCtx->rbp;
1093 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1094 break;
1095 case IEMMODE_32BIT:
1096 NewRbp.u = 0;
1097 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1098 break;
1099 case IEMMODE_64BIT:
1100 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1101 break;
1102 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1103 }
1104 if (rcStrict != VINF_SUCCESS)
1105 return rcStrict;
1106
1107
1108 /* Commit it. */
1109 pCtx->rbp = NewRbp.u;
1110 pCtx->rsp = NewRsp.u;
1111 iemRegAddToRip(pIemCpu, cbInstr);
1112
1113 return VINF_SUCCESS;
1114}
1115
1116
1117/** @name IEM_XCPT_FLAGS_XXX - flags for iemCImpl_RaiseXcptOrInt.
1118 * @{ */
1119/** CPU exception. */
1120#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1121/** External interrupt (from PIC, APIC, whatever). */
1122#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1123/** Software interrupt (int, into or bound). */
1124#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1125/** Takes an error code. */
1126#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1127/** Takes a CR2. */
1128#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1129/** Generated by the breakpoint instruction. */
1130#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1131/** Mask out the nesting level. */
1132#define IEM_XCPT_FLAGS_NESTING_MASK UINT32_C(0xff000000)
1133/** Shift count for the nesting level. */
1134#define IEM_XCPT_FLAGS_NESTING_SHIFT 24
1135/** Mask out the nesting level after shifting. */
1136#define IEM_XCPT_FLAGS_NESTING_SMASK UINT32_C(0x000000ff)
1137/** @} */
1138
1139
1140/**
1141 * Adjust the CPU state according to the exception being raised.
1142 *
1143 * @param pCtx The CPU context.
1144 * @param u8Vector The exception that has been raised.
1145 */
1146DECLINLINE(void) iemCImpl_RaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1147{
1148 switch (u8Vector)
1149 {
1150 case X86_XCPT_DB:
1151 pCtx->dr[7] &= ~X86_DR7_GD;
1152 break;
1153 /** @todo Read the AMD and Intel exception reference... */
1154 }
1155}
1156
1157
1158/**
1159 * Implements exceptions and interrupts for real mode.
1160 *
1161 * @returns VBox strict status code.
1162 * @param pIemCpu The IEM per CPU instance data.
1163 * @param pCtx The CPU context.
1164 * @param cbInstr The number of bytes to offset rIP by in the return
1165 * address.
1166 * @param u8Vector The interrupt / exception vector number.
1167 * @param fFlags The flags.
1168 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1169 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1170 */
1171static VBOXSTRICTRC
1172iemCImpl_RaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1173 PCPUMCTX pCtx,
1174 uint8_t cbInstr,
1175 uint8_t u8Vector,
1176 uint32_t fFlags,
1177 uint16_t uErr,
1178 uint64_t uCr2)
1179{
1180 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1181
1182 /*
1183 * Read the IDT entry.
1184 */
1185 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1186 {
1187 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1188 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1189 }
1190 RTFAR16 Idte;
1191 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1192 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1193 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1194 return rcStrict;
1195
1196 /*
1197 * Push the stack frame.
1198 */
1199 uint16_t *pu16Frame;
1200 uint64_t uNewRsp;
1201 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1202 if (rcStrict != VINF_SUCCESS)
1203 return rcStrict;
1204
1205 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
1206 pu16Frame[1] = (uint16_t)pCtx->cs;
1207 pu16Frame[0] = pCtx->ip + cbInstr;
1208 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1209 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1210 return rcStrict;
1211
1212 /*
1213 * Load the vector address into cs:ip and make exception specific state
1214 * adjustments.
1215 */
1216 pCtx->cs = Idte.sel;
1217 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;
1218 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1219 pCtx->rip = Idte.off;
1220 pCtx->eflags.Bits.u1IF = 0;
1221
1222 /** @todo do we actually do this in real mode? */
1223 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1224 iemCImpl_RaiseXcptAdjustState(pCtx, u8Vector);
1225
1226 return VINF_SUCCESS;
1227}
1228
1229
1230/**
1231 * Implements exceptions and interrupts for protected mode.
1232 *
1233 * @returns VBox strict status code.
1234 * @param pIemCpu The IEM per CPU instance data.
1235 * @param pCtx The CPU context.
1236 * @param cbInstr The number of bytes to offset rIP by in the return
1237 * address.
1238 * @param u8Vector The interrupt / exception vector number.
1239 * @param fFlags The flags.
1240 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1241 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1242 */
1243static VBOXSTRICTRC
1244iemCImpl_RaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
1245 PCPUMCTX pCtx,
1246 uint8_t cbInstr,
1247 uint8_t u8Vector,
1248 uint32_t fFlags,
1249 uint16_t uErr,
1250 uint64_t uCr2)
1251{
1252 /*
1253 * Read the IDT entry.
1254 */
1255 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1256 {
1257 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1258 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1259 }
1260 X86DESC Idte;
1261 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pIemCpu, &Idte.u, UINT8_MAX,
1262 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
1263 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1264 return rcStrict;
1265
1266 /*
1267 * Check the descriptor type, DPL and such.
1268 * ASSUMES this is done in the same order as described for selectors.
1269 */
1270 if (Idte.Gate.u1DescType)
1271 {
1272 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1273 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1274 }
1275 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
1276 switch (Idte.Gate.u4Type)
1277 {
1278 case X86_SEL_TYPE_SYS_UNDEFINED:
1279 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1280 case X86_SEL_TYPE_SYS_LDT:
1281 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1282 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1283 case X86_SEL_TYPE_SYS_UNDEFINED2:
1284 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1285 case X86_SEL_TYPE_SYS_UNDEFINED3:
1286 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1287 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1288 case X86_SEL_TYPE_SYS_UNDEFINED4:
1289 {
1290 /** @todo check what actually happens when the type is wrong...
1291 * esp. call gates. */
1292 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1293 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1294 }
1295
1296 case X86_SEL_TYPE_SYS_286_INT_GATE:
1297 case X86_SEL_TYPE_SYS_386_INT_GATE:
1298 fEflToClear |= X86_EFL_IF;
1299 break;
1300
1301 case X86_SEL_TYPE_SYS_TASK_GATE:
1302 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1303 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1304 break;
1305
1306 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1307 }
1308
1309 /* Check DPL against CPL if applicable. */
1310 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1311 {
1312#if 0 /** @todo continue here */
1313 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
1314 {
1315 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
1316 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
1317 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1318 }
1319#endif
1320 }
1321
1322 /* Is it there? */
1323 if (!Idte.Gate.u1Present)
1324 {
1325 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
1326 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1327 }
1328
1329 return VERR_NOT_IMPLEMENTED;
1330}
1331
1332
1333/**
1334 * Implements exceptions and interrupts for long mode.
1335 *
1336 * @returns VBox strict status code.
1337 * @param pIemCpu The IEM per CPU instance data.
1338 * @param pCtx The CPU context.
1339 * @param cbInstr The number of bytes to offset rIP by in the return
1340 * address.
1341 * @param u8Vector The interrupt / exception vector number.
1342 * @param fFlags The flags.
1343 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1344 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1345 */
1346static VBOXSTRICTRC
1347iemCImpl_RaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
1348 PCPUMCTX pCtx,
1349 uint8_t cbInstr,
1350 uint8_t u8Vector,
1351 uint32_t fFlags,
1352 uint16_t uErr,
1353 uint64_t uCr2)
1354{
1355 AssertMsgFailed(("long mode exception / interrupt dispatching\n"));
1356 return VERR_NOT_IMPLEMENTED;
1357}
1358
1359
1360/**
1361 * Implements exceptions and interrupts.
1362 *
1363 * All exceptions and interrupts goes thru this function!
1364 *
1365 * @returns VBox strict status code.
1366 * @param pIemCpu The IEM per CPU instance data.
1367 * @param cbInstr The number of bytes to offset rIP by in the return
1368 * address.
1369 * @param u8Vector The interrupt / exception vector number.
1370 * @param fFlags The flags.
1371 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1372 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1373 */
1374static VBOXSTRICTRC
1375iemCImpl_RaiseXcptOrInt(PIEMCPU pIemCpu,
1376 uint8_t cbInstr,
1377 uint8_t u8Vector,
1378 uint32_t fFlags,
1379 uint16_t uErr,
1380 uint64_t uCr2)
1381{
1382 /*
1383 * Do recursion accounting.
1384 */
1385 uint8_t uPrevXcpt = pIemCpu->uCurXcpt;
1386 if (pIemCpu->cXcptRecursions > 0)
1387 {
1388 /** @todo double and tripple faults. */
1389 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_NOT_IMPLEMENTED);
1390 }
1391 pIemCpu->cXcptRecursions++;
1392 pIemCpu->uCurXcpt = u8Vector;
1393
1394 /*
1395 * Call mode specific worker function.
1396 */
1397 VBOXSTRICTRC rcStrict;
1398 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1399 if (!(pCtx->cr0 & X86_CR0_PE))
1400 rcStrict = iemCImpl_RaiseXcptOrIntInRealMode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
1401 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1402 rcStrict = iemCImpl_RaiseXcptOrIntInLongMode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
1403 else
1404 rcStrict = iemCImpl_RaiseXcptOrIntInProtMode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
1405
1406 /*
1407 * Unwind.
1408 */
1409 pIemCpu->cXcptRecursions--;
1410 pIemCpu->uCurXcpt = uPrevXcpt;
1411 return rcStrict;
1412}
1413
1414
1415/**
1416 * Implements int3 and int XX.
1417 *
1418 * @param u8Int The interrupt vector number.
1419 * @param fIsBpInstr Is it the breakpoint instruction.
1420 */
1421IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1422{
1423 Assert(pIemCpu->cXcptRecursions == 0);
1424 return iemCImpl_RaiseXcptOrInt(pIemCpu,
1425 cbInstr,
1426 u8Int,
1427 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1428 0,
1429 0);
1430}
1431
1432
1433/**
1434 * Implements iret.
1435 *
1436 * @param enmEffOpSize The effective operand size.
1437 */
1438IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
1439{
1440 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1441 VBOXSTRICTRC rcStrict;
1442 uint64_t uNewRsp;
1443
1444 /*
1445 * Real mode is easy, V8086 mode is relative similar.
1446 */
1447 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1448 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1449 {
1450 /* iret throws an exception if VME isn't enabled. */
1451 if ( pCtx->eflags.Bits.u1VM
1452 && !(pCtx->cr4 & X86_CR4_VME))
1453 return iemRaiseGeneralProtectionFault0(pIemCpu);
1454
1455 /* Do the stack bits, but don't commit RSP before everything checks
1456 out right. */
1457 union
1458 {
1459 uint32_t const *pu32;
1460 uint16_t const *pu16;
1461 void const *pv;
1462 } uFrame;
1463 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1464 uint16_t uNewCs;
1465 uint32_t uNewEip;
1466 uint32_t uNewFlags;
1467 if (enmEffOpSize == IEMMODE_32BIT)
1468 {
1469 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1470 if (rcStrict != VINF_SUCCESS)
1471 return rcStrict;
1472 uNewEip = uFrame.pu32[0];
1473 uNewCs = (uint16_t)uFrame.pu32[1];
1474 uNewFlags = uFrame.pu32[2];
1475 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1476 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1477 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1478 | X86_EFL_ID;
1479 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1480 }
1481 else
1482 {
1483 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1484 if (rcStrict != VINF_SUCCESS)
1485 return rcStrict;
1486 uNewEip = uFrame.pu16[0];
1487 uNewCs = uFrame.pu16[1];
1488 uNewFlags = uFrame.pu16[2];
1489 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1490 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1491 uNewFlags |= pCtx->eflags.u & (UINT16_C(0xffff0000) | X86_EFL_1);
1492 /** @todo The intel pseudo code does not indicate what happens to
1493 * reserved flags. We just ignore them. */
1494 }
1495 /** @todo Check how this is supposed to work if sp=0xfffe. */
1496
1497 /* Check the limit of the new EIP. */
1498 /** @todo Only the AMD pseudo code check the limit here, what's
1499 * right? */
1500 if (uNewEip > pCtx->csHid.u32Limit)
1501 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1502
1503 /* V8086 checks and flag adjustments */
1504 if (pCtx->eflags.Bits.u1VM)
1505 {
1506 if (pCtx->eflags.Bits.u2IOPL == 3)
1507 {
1508 /* Preserve IOPL and clear RF. */
1509 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1510 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1511 }
1512 else if ( enmEffOpSize == IEMMODE_16BIT
1513 && ( !(uNewFlags & X86_EFL_IF)
1514 || !pCtx->eflags.Bits.u1VIP )
1515 && !(uNewFlags & X86_EFL_TF) )
1516 {
1517 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1518 uNewFlags &= ~X86_EFL_VIF;
1519 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1520 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1521 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1522 }
1523 else
1524 return iemRaiseGeneralProtectionFault0(pIemCpu);
1525 }
1526
1527 /* commit the operation. */
1528 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1529 if (rcStrict != VINF_SUCCESS)
1530 return rcStrict;
1531 pCtx->rip = uNewEip;
1532 pCtx->cs = uNewCs;
1533 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
1534 /** @todo do we load attribs and limit as well? */
1535 Assert(uNewFlags & X86_EFL_1);
1536 pCtx->eflags.u = uNewFlags;
1537
1538 return VINF_SUCCESS;
1539 }
1540
1541
1542 AssertFailed();
1543 return VERR_NOT_IMPLEMENTED;
1544}
1545
1546
1547/**
1548 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
1549 *
1550 * @param iSegReg The segment register number (valid).
1551 * @param uSel The new selector value.
1552 */
1553IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
1554{
1555 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1556 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
1557 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
1558
1559 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
1560
1561 /*
1562 * Real mode and V8086 mode are easy.
1563 */
1564 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1565 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1566 {
1567 *pSel = uSel;
1568 pHid->u64Base = (uint32_t)uSel << 4;
1569 /** @todo Does the CPU actually load limits and attributes in the
1570 * real/V8086 mode segment load case? It doesn't for CS in far
1571 * jumps... Affects unreal mode. */
1572 pHid->u32Limit = 0xffff;
1573 pHid->Attr.u = 0;
1574 pHid->Attr.n.u1Present = 1;
1575 pHid->Attr.n.u1DescType = 1;
1576 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
1577 ? X86_SEL_TYPE_RW
1578 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
1579
1580 iemRegAddToRip(pIemCpu, cbInstr);
1581 return VINF_SUCCESS;
1582 }
1583
1584 /*
1585 * Protected mode.
1586 *
1587 * Check if it's a null segment selector value first, that's OK for DS, ES,
1588 * FS and GS. If not null, then we have to load and parse the descriptor.
1589 */
1590 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
1591 {
1592 if (iSegReg == X86_SREG_SS)
1593 {
1594 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
1595 || pIemCpu->uCpl != 0
1596 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
1597 {
1598 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
1599 return iemRaiseGeneralProtectionFault0(pIemCpu);
1600 }
1601
1602 /* In 64-bit kernel mode, the stack can be 0 because of the way
1603 interrupts are dispatched when in kernel ctx. Just load the
1604 selector value into the register and leave the hidden bits
1605 as is. */
1606 *pSel = uSel;
1607 iemRegAddToRip(pIemCpu, cbInstr);
1608 return VINF_SUCCESS;
1609 }
1610
1611 *pSel = uSel; /* Not RPL, remember :-) */
1612 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
1613 && iSegReg != X86_SREG_FS
1614 && iSegReg != X86_SREG_GS)
1615 {
1616 /** @todo figure out what this actually does, it works. Needs
1617 * testcase! */
1618 pHid->Attr.u = 0;
1619 pHid->Attr.n.u1Present = 1;
1620 pHid->Attr.n.u1Long = 1;
1621 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
1622 pHid->Attr.n.u2Dpl = 3;
1623 pHid->u32Limit = 0;
1624 pHid->u64Base = 0;
1625 }
1626 else
1627 {
1628 pHid->Attr.u = 0;
1629 pHid->u32Limit = 0;
1630 pHid->u64Base = 0;
1631 }
1632 iemRegAddToRip(pIemCpu, cbInstr);
1633 return VINF_SUCCESS;
1634 }
1635
1636 /* Fetch the descriptor. */
1637 IEMSELDESC Desc;
1638 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1639 if (rcStrict != VINF_SUCCESS)
1640 return rcStrict;
1641
1642 /* Check GPs first. */
1643 if (!Desc.Legacy.Gen.u1DescType)
1644 {
1645 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
1646 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1647 }
1648 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
1649 {
1650 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1651 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1652 {
1653 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
1654 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1655 }
1656 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1657 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1658 {
1659 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
1660 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1661 }
1662 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
1663 {
1664 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
1665 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1666 }
1667 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
1668 {
1669 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1670 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1671 }
1672 }
1673 else
1674 {
1675 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
1676 {
1677 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
1678 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1679 }
1680 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1681 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1682 {
1683#if 0 /* this is what intel says. */
1684 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
1685 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
1686 {
1687 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
1688 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
1689 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1690 }
1691#else /* this is what makes more sense. */
1692 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
1693 {
1694 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
1695 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
1696 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1697 }
1698 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
1699 {
1700 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
1701 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
1702 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
1703 }
1704#endif
1705 }
1706 }
1707
1708 /* Is it there? */
1709 if (!Desc.Legacy.Gen.u1Present)
1710 {
1711 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
1712 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1713 }
1714
1715 /* The the base and limit. */
1716 uint64_t u64Base;
1717 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
1718 if (Desc.Legacy.Gen.u1Granularity)
1719 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1720
1721 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
1722 && iSegReg < X86_SREG_FS)
1723 u64Base = 0;
1724 else
1725 u64Base = X86DESC_BASE(Desc.Legacy);
1726
1727 /*
1728 * Ok, everything checked out fine. Now set the accessed bit before
1729 * committing the result into the registers.
1730 */
1731 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1732 {
1733 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1734 if (rcStrict != VINF_SUCCESS)
1735 return rcStrict;
1736 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1737 }
1738
1739 /* commit */
1740 *pSel = uSel;
1741 pHid->Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff); /** @todo do we have a define for 0xf0ff? */
1742 pHid->u32Limit = cbLimit;
1743 pHid->u64Base = u64Base;
1744
1745 /** @todo check if the hidden bits are loaded correctly for 64-bit
1746 * mode. */
1747
1748 iemRegAddToRip(pIemCpu, cbInstr);
1749 return VINF_SUCCESS;
1750}
1751
1752
1753/**
1754 * Implements 'mov SReg, r/m'.
1755 *
1756 * @param iSegReg The segment register number (valid).
1757 * @param uSel The new selector value.
1758 */
1759IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
1760{
1761 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1762 if (rcStrict == VINF_SUCCESS)
1763 {
1764 if (iSegReg == X86_SREG_SS)
1765 {
1766 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1767 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
1768 }
1769 }
1770 return rcStrict;
1771}
1772
1773
1774/**
1775 * Implements 'pop SReg'.
1776 *
1777 * @param iSegReg The segment register number (valid).
1778 * @param enmEffOpSize The efficient operand size (valid).
1779 */
1780IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
1781{
1782 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1783 VBOXSTRICTRC rcStrict;
1784
1785 /*
1786 * Read the selector off the stack and join paths with mov ss, reg.
1787 */
1788 RTUINT64U TmpRsp;
1789 TmpRsp.u = pCtx->rsp;
1790 switch (enmEffOpSize)
1791 {
1792 case IEMMODE_16BIT:
1793 {
1794 uint16_t uSel;
1795 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
1796 if (rcStrict == VINF_SUCCESS)
1797 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1798 break;
1799 }
1800
1801 case IEMMODE_32BIT:
1802 {
1803 uint32_t u32Value;
1804 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
1805 if (rcStrict == VINF_SUCCESS)
1806 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
1807 break;
1808 }
1809
1810 case IEMMODE_64BIT:
1811 {
1812 uint64_t u64Value;
1813 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
1814 if (rcStrict == VINF_SUCCESS)
1815 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
1816 break;
1817 }
1818 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1819 }
1820
1821 /*
1822 * Commit the stack on success.
1823 */
1824 if (rcStrict == VINF_SUCCESS)
1825 {
1826 pCtx->rsp = TmpRsp.u;
1827 if (iSegReg == X86_SREG_SS)
1828 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
1829 }
1830 return rcStrict;
1831}
1832
1833
1834/**
1835 * Implements lgs, lfs, les, lds & lss.
1836 */
1837IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
1838 uint16_t, uSel,
1839 uint64_t, offSeg,
1840 uint8_t, iSegReg,
1841 uint8_t, iGReg,
1842 IEMMODE, enmEffOpSize)
1843{
1844 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1845 VBOXSTRICTRC rcStrict;
1846
1847 /*
1848 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
1849 */
1850 /** @todo verify and test that mov, pop and lXs works the segment
1851 * register loading in the exact same way. */
1852 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
1853 if (rcStrict == VINF_SUCCESS)
1854 {
1855 switch (enmEffOpSize)
1856 {
1857 case IEMMODE_16BIT:
1858 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1859 break;
1860 case IEMMODE_32BIT:
1861 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1862 break;
1863 case IEMMODE_64BIT:
1864 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
1865 break;
1866 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1867 }
1868 }
1869
1870 return rcStrict;
1871}
1872
1873
1874/**
1875 * Implements lgdt.
1876 *
1877 * @param iEffSeg The segment of the new ldtr contents
1878 * @param GCPtrEffSrc The address of the new ldtr contents.
1879 * @param enmEffOpSize The effective operand size.
1880 */
1881IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
1882{
1883 if (pIemCpu->uCpl != 0)
1884 return iemRaiseGeneralProtectionFault0(pIemCpu);
1885 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
1886
1887 /*
1888 * Fetch the limit and base address.
1889 */
1890 uint16_t cbLimit;
1891 RTGCPTR GCPtrBase;
1892 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
1893 if (rcStrict == VINF_SUCCESS)
1894 {
1895 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1896 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
1897 else
1898 {
1899 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1900 pCtx->gdtr.cbGdt = cbLimit;
1901 pCtx->gdtr.pGdt = GCPtrBase;
1902 }
1903 if (rcStrict == VINF_SUCCESS)
1904 iemRegAddToRip(pIemCpu, cbInstr);
1905 }
1906 return rcStrict;
1907}
1908
1909
1910/**
1911 * Implements lidt.
1912 *
1913 * @param iEffSeg The segment of the new ldtr contents
1914 * @param GCPtrEffSrc The address of the new ldtr contents.
1915 * @param enmEffOpSize The effective operand size.
1916 */
1917IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
1918{
1919 if (pIemCpu->uCpl != 0)
1920 return iemRaiseGeneralProtectionFault0(pIemCpu);
1921 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
1922
1923 /*
1924 * Fetch the limit and base address.
1925 */
1926 uint16_t cbLimit;
1927 RTGCPTR GCPtrBase;
1928 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
1929 if (rcStrict == VINF_SUCCESS)
1930 {
1931 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1932 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
1933 else
1934 {
1935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1936 pCtx->idtr.cbIdt = cbLimit;
1937 pCtx->idtr.pIdt = GCPtrBase;
1938 }
1939 if (rcStrict == VINF_SUCCESS)
1940 iemRegAddToRip(pIemCpu, cbInstr);
1941 }
1942 return rcStrict;
1943}
1944
1945
1946/**
1947 * Implements lldt.
1948 *
1949 * @param uNewLdt The new LDT selector value.
1950 */
1951IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
1952{
1953 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1954
1955 /*
1956 * Check preconditions.
1957 */
1958 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1959 {
1960 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
1961 return iemRaiseUndefinedOpcode(pIemCpu);
1962 }
1963 if (pIemCpu->uCpl != 0)
1964 {
1965 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
1966 return iemRaiseGeneralProtectionFault0(pIemCpu);
1967 }
1968 if (uNewLdt & X86_SEL_LDT)
1969 {
1970 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
1971 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & (X86_SEL_MASK | X86_SEL_LDT));
1972 }
1973
1974 /*
1975 * Now, loading a NULL selector is easy.
1976 */
1977 if ((uNewLdt & X86_SEL_MASK) == 0)
1978 {
1979 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
1980 /** @todo check if the actual value is loaded or if it's always 0. */
1981 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1982 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), 0);
1983 else
1984 pCtx->ldtr = 0;
1985 pCtx->ldtrHid.Attr.u = 0;
1986 pCtx->ldtrHid.u64Base = 0;
1987 pCtx->ldtrHid.u32Limit = 0;
1988
1989 iemRegAddToRip(pIemCpu, cbInstr);
1990 return VINF_SUCCESS;
1991 }
1992
1993 /*
1994 * Read the descriptor.
1995 */
1996 IEMSELDESC Desc;
1997 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
1998 if (rcStrict != VINF_SUCCESS)
1999 return rcStrict;
2000
2001 /* Check GPs first. */
2002 if (Desc.Legacy.Gen.u1DescType)
2003 {
2004 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2005 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2006 }
2007 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2008 {
2009 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2010 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2011 }
2012 uint64_t u64Base;
2013 if (!IEM_IS_LONG_MODE(pIemCpu))
2014 u64Base = X86DESC_BASE(Desc.Legacy);
2015 else
2016 {
2017 if (Desc.Long.Gen.u5Zeros)
2018 {
2019 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2020 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2021 }
2022
2023 u64Base = X86DESC64_BASE(Desc.Long);
2024 if (!IEM_IS_CANONICAL(u64Base))
2025 {
2026 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2027 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2028 }
2029 }
2030
2031 /* NP */
2032 if (!Desc.Legacy.Gen.u1Present)
2033 {
2034 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2035 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2036 }
2037
2038 /*
2039 * It checks out alright, update the registers.
2040 */
2041/** @todo check if the actual value is loaded or if the RPL is dropped */
2042 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2043 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK);
2044 else
2045 pCtx->ldtr = uNewLdt & X86_SEL_MASK;
2046 pCtx->ldtrHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2047 pCtx->ldtrHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2048 pCtx->ldtrHid.u64Base = u64Base;
2049
2050 iemRegAddToRip(pIemCpu, cbInstr);
2051 return VINF_SUCCESS;
2052}
2053
2054
2055/**
2056 * Implements lldt.
2057 *
2058 * @param uNewLdt The new LDT selector value.
2059 */
2060IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2061{
2062 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2063
2064 /*
2065 * Check preconditions.
2066 */
2067 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2068 {
2069 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2070 return iemRaiseUndefinedOpcode(pIemCpu);
2071 }
2072 if (pIemCpu->uCpl != 0)
2073 {
2074 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2075 return iemRaiseGeneralProtectionFault0(pIemCpu);
2076 }
2077 if (uNewTr & X86_SEL_LDT)
2078 {
2079 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2080 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & (X86_SEL_MASK | X86_SEL_LDT));
2081 }
2082 if ((uNewTr & X86_SEL_MASK) == 0)
2083 {
2084 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2085 return iemRaiseGeneralProtectionFault0(pIemCpu);
2086 }
2087
2088 /*
2089 * Read the descriptor.
2090 */
2091 IEMSELDESC Desc;
2092 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2093 if (rcStrict != VINF_SUCCESS)
2094 return rcStrict;
2095
2096 /* Check GPs first. */
2097 if (Desc.Legacy.Gen.u1DescType)
2098 {
2099 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2100 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2101 }
2102 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2103 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2104 || IEM_IS_LONG_MODE(pIemCpu)) )
2105 {
2106 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2107 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2108 }
2109 uint64_t u64Base;
2110 if (!IEM_IS_LONG_MODE(pIemCpu))
2111 u64Base = X86DESC_BASE(Desc.Legacy);
2112 else
2113 {
2114 if (Desc.Long.Gen.u5Zeros)
2115 {
2116 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2117 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2118 }
2119
2120 u64Base = X86DESC64_BASE(Desc.Long);
2121 if (!IEM_IS_CANONICAL(u64Base))
2122 {
2123 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2124 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2125 }
2126 }
2127
2128 /* NP */
2129 if (!Desc.Legacy.Gen.u1Present)
2130 {
2131 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
2132 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
2133 }
2134
2135 /*
2136 * Set it busy.
2137 * Note! Intel says this should lock down the whole descriptor, but we'll
2138 * restrict our selves to 32-bit for now due to lack of inline
2139 * assembly and such.
2140 */
2141 void *pvDesc;
2142 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2143 if (rcStrict != VINF_SUCCESS)
2144 return rcStrict;
2145 switch ((uintptr_t)pvDesc & 3)
2146 {
2147 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
2148 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
2149 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
2150 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
2151 }
2152 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2153 if (rcStrict != VINF_SUCCESS)
2154 return rcStrict;
2155 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2156
2157 /*
2158 * It checks out alright, update the registers.
2159 */
2160/** @todo check if the actual value is loaded or if the RPL is dropped */
2161 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2162 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK);
2163 else
2164 pCtx->tr = uNewTr & X86_SEL_MASK;
2165 pCtx->trHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy);
2166 pCtx->trHid.u32Limit = X86DESC_LIMIT(Desc.Legacy);
2167 pCtx->trHid.u64Base = u64Base;
2168
2169 iemRegAddToRip(pIemCpu, cbInstr);
2170 return VINF_SUCCESS;
2171}
2172
2173
2174/**
2175 * Implements mov GReg,CRx.
2176 *
2177 * @param iGReg The general register to store the CRx value in.
2178 * @param iCrReg The CRx register to read (valid).
2179 */
2180IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
2181{
2182 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2183 if (pIemCpu->uCpl != 0)
2184 return iemRaiseGeneralProtectionFault0(pIemCpu);
2185 Assert(!pCtx->eflags.Bits.u1VM);
2186
2187 /* read it */
2188 uint64_t crX;
2189 switch (iCrReg)
2190 {
2191 case 0: crX = pCtx->cr0; break;
2192 case 2: crX = pCtx->cr2; break;
2193 case 3: crX = pCtx->cr3; break;
2194 case 4: crX = pCtx->cr4; break;
2195 case 8:
2196 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2197 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
2198 else
2199 crX = 0xff;
2200 break;
2201 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2202 }
2203
2204 /* store it */
2205 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2206 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
2207 else
2208 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
2209
2210 iemRegAddToRip(pIemCpu, cbInstr);
2211 return VINF_SUCCESS;
2212}
2213
2214
2215/**
2216 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
2217 *
2218 * @param iCrReg The CRx register to write (valid).
2219 * @param uNewCrX The new value.
2220 */
2221IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
2222{
2223 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2224 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2225 VBOXSTRICTRC rcStrict;
2226 int rc;
2227
2228 /*
2229 * Try store it.
2230 * Unfortunately, CPUM only does a tiny bit of the work.
2231 */
2232 switch (iCrReg)
2233 {
2234 case 0:
2235 {
2236 /*
2237 * Perform checks.
2238 */
2239 uint64_t const uOldCrX = pCtx->cr0;
2240 uNewCrX |= X86_CR0_ET; /* hardcoded */
2241
2242 /* Check for reserved bits. */
2243 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
2244 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
2245 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
2246 if (uNewCrX & ~(uint64_t)fValid)
2247 {
2248 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2249 return iemRaiseGeneralProtectionFault0(pIemCpu);
2250 }
2251
2252 /* Check for invalid combinations. */
2253 if ( (uNewCrX & X86_CR0_PG)
2254 && !(uNewCrX & X86_CR0_PE) )
2255 {
2256 Log(("Trying to set CR0.PG without CR0.PE\n"));
2257 return iemRaiseGeneralProtectionFault0(pIemCpu);
2258 }
2259
2260 if ( !(uNewCrX & X86_CR0_CD)
2261 && (uNewCrX & X86_CR0_NW) )
2262 {
2263 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
2264 return iemRaiseGeneralProtectionFault0(pIemCpu);
2265 }
2266
2267 /* Long mode consistency checks. */
2268 if ( (uNewCrX & X86_CR0_PG)
2269 && !(uOldCrX & X86_CR0_PG)
2270 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2271 {
2272 if (!(pCtx->cr4 & X86_CR4_PAE))
2273 {
2274 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
2275 return iemRaiseGeneralProtectionFault0(pIemCpu);
2276 }
2277 if (pCtx->csHid.Attr.n.u1Long)
2278 {
2279 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
2280 return iemRaiseGeneralProtectionFault0(pIemCpu);
2281 }
2282 }
2283
2284 /** @todo check reserved PDPTR bits as AMD states. */
2285
2286 /*
2287 * Change CR0.
2288 */
2289 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2290 {
2291 rc = CPUMSetGuestCR0(pVCpu, uNewCrX);
2292 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
2293 }
2294 else
2295 pCtx->cr0 = uNewCrX;
2296 Assert(pCtx->cr0 == uNewCrX);
2297
2298 /*
2299 * Change EFER.LMA if entering or leaving long mode.
2300 */
2301 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
2302 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2303 {
2304 uint64_t NewEFER = pCtx->msrEFER;
2305 if (uNewCrX & X86_CR0_PG)
2306 NewEFER |= MSR_K6_EFER_LME;
2307 else
2308 NewEFER &= ~MSR_K6_EFER_LME;
2309
2310 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2311 CPUMSetGuestEFER(pVCpu, NewEFER);
2312 else
2313 pCtx->msrEFER = NewEFER;
2314 Assert(pCtx->msrEFER == NewEFER);
2315 }
2316
2317 /*
2318 * Inform PGM.
2319 */
2320 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2321 {
2322 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
2323 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
2324 {
2325 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
2326 AssertRCReturn(rc, rc);
2327 /* ignore informational status codes */
2328 }
2329 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2330 /** @todo Status code management. */
2331 }
2332 else
2333 rcStrict = VINF_SUCCESS;
2334 break;
2335 }
2336
2337 /*
2338 * CR2 can be changed without any restrictions.
2339 */
2340 case 2:
2341 pCtx->cr2 = uNewCrX;
2342 rcStrict = VINF_SUCCESS;
2343 break;
2344
2345 /*
2346 * CR3 is relatively simple, although AMD and Intel have different
2347 * accounts of how setting reserved bits are handled. We take intel's
2348 * word for the lower bits and AMD's for the high bits (63:52).
2349 */
2350 /** @todo Testcase: Setting reserved bits in CR3, especially before
2351 * enabling paging. */
2352 case 3:
2353 {
2354 /* check / mask the value. */
2355 if (uNewCrX & UINT64_C(0xfff0000000000000))
2356 {
2357 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
2358 return iemRaiseGeneralProtectionFault0(pIemCpu);
2359 }
2360
2361 uint64_t fValid;
2362 if ( (pCtx->cr4 & X86_CR4_PAE)
2363 && (pCtx->msrEFER & MSR_K6_EFER_LME))
2364 fValid = UINT64_C(0x000ffffffffff014);
2365 else if (pCtx->cr4 & X86_CR4_PAE)
2366 fValid = UINT64_C(0xfffffff4);
2367 else
2368 fValid = UINT64_C(0xfffff014);
2369 if (uNewCrX & ~fValid)
2370 {
2371 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
2372 uNewCrX, uNewCrX & ~fValid));
2373 uNewCrX &= fValid;
2374 }
2375
2376 /** @todo If we're in PAE mode we should check the PDPTRs for
2377 * invalid bits. */
2378
2379 /* Make the change. */
2380 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2381 {
2382 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
2383 AssertRCSuccessReturn(rc, rc);
2384 }
2385 else
2386 pCtx->cr3 = uNewCrX;
2387
2388 /* Inform PGM. */
2389 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2390 {
2391 if (pCtx->cr0 & X86_CR0_PG)
2392 {
2393 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
2394 AssertRCReturn(rc, rc);
2395 /* ignore informational status codes */
2396 /** @todo status code management */
2397 }
2398 }
2399 rcStrict = VINF_SUCCESS;
2400 break;
2401 }
2402
2403 /*
2404 * CR4 is a bit more tedious as there are bits which cannot be cleared
2405 * under some circumstances and such.
2406 */
2407 case 4:
2408 {
2409 uint64_t const uOldCrX = pCtx->cr0;
2410
2411 /* reserved bits */
2412 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
2413 | X86_CR4_TSD | X86_CR4_DE
2414 | X86_CR4_PSE | X86_CR4_PAE
2415 | X86_CR4_MCE | X86_CR4_PGE
2416 | X86_CR4_PCE | X86_CR4_OSFSXR
2417 | X86_CR4_OSXMMEEXCPT;
2418 //if (xxx)
2419 // fValid |= X86_CR4_VMXE;
2420 //if (xxx)
2421 // fValid |= X86_CR4_OSXSAVE;
2422 if (uNewCrX & ~(uint64_t)fValid)
2423 {
2424 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2425 return iemRaiseGeneralProtectionFault0(pIemCpu);
2426 }
2427
2428 /* long mode checks. */
2429 if ( (uOldCrX & X86_CR4_PAE)
2430 && !(uNewCrX & X86_CR4_PAE)
2431 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
2432 {
2433 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
2434 return iemRaiseGeneralProtectionFault0(pIemCpu);
2435 }
2436
2437
2438 /*
2439 * Change it.
2440 */
2441 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2442 {
2443 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
2444 AssertRCSuccessReturn(rc, rc);
2445 }
2446 else
2447 pCtx->cr4 = uNewCrX;
2448 Assert(pCtx->cr4 == uNewCrX);
2449
2450 /*
2451 * Notify SELM and PGM.
2452 */
2453 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2454 {
2455 /* SELM - VME may change things wrt to the TSS shadowing. */
2456 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
2457 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2458
2459 /* PGM - flushing and mode. */
2460 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
2461 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
2462 {
2463 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
2464 AssertRCReturn(rc, rc);
2465 /* ignore informational status codes */
2466 }
2467 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2468 /** @todo Status code management. */
2469 }
2470 else
2471 rcStrict = VINF_SUCCESS;
2472 break;
2473 }
2474
2475 /*
2476 * CR8 maps to the APIC TPR.
2477 */
2478 case 8:
2479 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2480 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
2481 else
2482 rcStrict = VINF_SUCCESS;
2483 break;
2484
2485 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2486 }
2487
2488 /*
2489 * Advance the RIP on success.
2490 */
2491 /** @todo Status code management. */
2492 if (rcStrict == VINF_SUCCESS)
2493 iemRegAddToRip(pIemCpu, cbInstr);
2494 return rcStrict;
2495
2496}
2497
2498
2499/**
2500 * Implements mov CRx,GReg.
2501 *
2502 * @param iCrReg The CRx register to write (valid).
2503 * @param iGReg The general register to load the DRx value from.
2504 */
2505IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
2506{
2507 if (pIemCpu->uCpl != 0)
2508 return iemRaiseGeneralProtectionFault0(pIemCpu);
2509 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2510
2511 /*
2512 * Read the new value from the source register and call common worker.
2513 */
2514 uint64_t uNewCrX;
2515 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2516 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
2517 else
2518 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
2519 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
2520}
2521
2522
2523/**
2524 * Implements 'LMSW r/m16'
2525 *
2526 * @param u16NewMsw The new value.
2527 */
2528IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
2529{
2530 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2531
2532 if (pIemCpu->uCpl != 0)
2533 return iemRaiseGeneralProtectionFault0(pIemCpu);
2534 Assert(!pCtx->eflags.Bits.u1VM);
2535
2536 /*
2537 * Compose the new CR0 value and call common worker.
2538 */
2539 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2540 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2541 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
2542}
2543
2544
2545/**
2546 * Implements 'CLTS'.
2547 */
2548IEM_CIMPL_DEF_0(iemCImpl_clts)
2549{
2550 if (pIemCpu->uCpl != 0)
2551 return iemRaiseGeneralProtectionFault0(pIemCpu);
2552
2553 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2554 uint64_t uNewCr0 = pCtx->cr0;
2555 uNewCr0 &= ~X86_CR0_TS;
2556 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
2557}
2558
2559
2560/**
2561 * Implements mov GReg,DRx.
2562 *
2563 * @param iGReg The general register to store the DRx value in.
2564 * @param iDrReg The DRx register to read (0-7).
2565 */
2566IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
2567{
2568 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2569
2570 /*
2571 * Check preconditions.
2572 */
2573
2574 /* Raise GPs. */
2575 if (pIemCpu->uCpl != 0)
2576 return iemRaiseGeneralProtectionFault0(pIemCpu);
2577 Assert(!pCtx->eflags.Bits.u1VM);
2578
2579 if ( (iDrReg == 4 || iDrReg == 5)
2580 && (pCtx->cr4 & X86_CR4_DE) )
2581 {
2582 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
2583 return iemRaiseGeneralProtectionFault0(pIemCpu);
2584 }
2585
2586 /* Raise #DB if general access detect is enabled. */
2587 if (pCtx->dr[7] & X86_DR7_GD)
2588 {
2589 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
2590 return iemRaiseDebugException(pIemCpu);
2591 }
2592
2593 /*
2594 * Read the debug register and store it in the specified general register.
2595 */
2596 uint64_t drX;
2597 switch (iDrReg)
2598 {
2599 case 0: drX = pCtx->dr[0]; break;
2600 case 1: drX = pCtx->dr[1]; break;
2601 case 2: drX = pCtx->dr[2]; break;
2602 case 3: drX = pCtx->dr[3]; break;
2603 case 6:
2604 case 4:
2605 drX = pCtx->dr[6];
2606 drX &= ~RT_BIT_32(12);
2607 drX |= UINT32_C(0xffff0ff0);
2608 break;
2609 case 7:
2610 case 5:
2611 drX = pCtx->dr[7];
2612 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
2613 drX |= RT_BIT_32(10);
2614 break;
2615 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2616 }
2617
2618 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2619 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
2620 else
2621 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
2622
2623 iemRegAddToRip(pIemCpu, cbInstr);
2624 return VINF_SUCCESS;
2625}
2626
2627
2628/**
2629 * Implements mov DRx,GReg.
2630 *
2631 * @param iDrReg The DRx register to write (valid).
2632 * @param iGReg The general register to load the DRx value from.
2633 */
2634IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
2635{
2636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2637
2638 /*
2639 * Check preconditions.
2640 */
2641 if (pIemCpu->uCpl != 0)
2642 return iemRaiseGeneralProtectionFault0(pIemCpu);
2643 Assert(!pCtx->eflags.Bits.u1VM);
2644
2645 if ( (iDrReg == 4 || iDrReg == 5)
2646 && (pCtx->cr4 & X86_CR4_DE) )
2647 {
2648 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
2649 return iemRaiseGeneralProtectionFault0(pIemCpu);
2650 }
2651
2652 /* Raise #DB if general access detect is enabled. */
2653 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
2654 * \#GP? */
2655 if (pCtx->dr[7] & X86_DR7_GD)
2656 {
2657 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
2658 return iemRaiseDebugException(pIemCpu);
2659 }
2660
2661 /*
2662 * Read the new value from the source register.
2663 */
2664 uint64_t uNewDrX;
2665 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2666 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
2667 else
2668 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
2669
2670 /*
2671 * Adjust it.
2672 */
2673 switch (iDrReg)
2674 {
2675 case 0:
2676 case 1:
2677 case 2:
2678 case 3:
2679 /* nothing to adjust */
2680 break;
2681
2682 case 6:
2683 case 4:
2684 if (uNewDrX & UINT64_C(0xffffffff00000000))
2685 {
2686 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
2687 return iemRaiseGeneralProtectionFault0(pIemCpu);
2688 }
2689 uNewDrX &= ~RT_BIT_32(12);
2690 uNewDrX |= UINT32_C(0xffff0ff0);
2691 break;
2692
2693 case 7:
2694 case 5:
2695 if (uNewDrX & UINT64_C(0xffffffff00000000))
2696 {
2697 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
2698 return iemRaiseGeneralProtectionFault0(pIemCpu);
2699 }
2700 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
2701 uNewDrX |= RT_BIT_32(10);
2702 break;
2703
2704 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2705 }
2706
2707 /*
2708 * Do the actual setting.
2709 */
2710 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2711 {
2712 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
2713 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
2714 }
2715 else
2716 pCtx->dr[iDrReg] = uNewDrX;
2717
2718 iemRegAddToRip(pIemCpu, cbInstr);
2719 return VINF_SUCCESS;
2720}
2721
2722
2723/**
2724 * Implements RDTSC.
2725 */
2726IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
2727{
2728 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2729
2730 /*
2731 * Check preconditions.
2732 */
2733 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
2734 return iemRaiseUndefinedOpcode(pIemCpu);
2735
2736 if ( (pCtx->cr4 & X86_CR4_TSD)
2737 && pIemCpu->uCpl != 0)
2738 {
2739 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
2740 return iemRaiseGeneralProtectionFault0(pIemCpu);
2741 }
2742
2743 /*
2744 * Do the job.
2745 */
2746 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
2747 pCtx->rax = (uint32_t)uTicks;
2748 pCtx->rdx = uTicks >> 32;
2749
2750 iemRegAddToRip(pIemCpu, cbInstr);
2751 return VINF_SUCCESS;
2752}
2753
2754
2755/**
2756 * Implements 'IN eAX, port'.
2757 *
2758 * @param u16Port The source port.
2759 * @param cbReg The register size.
2760 */
2761IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
2762{
2763 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2764
2765 /*
2766 * CPL check
2767 */
2768 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
2769 if (rcStrict != VINF_SUCCESS)
2770 return rcStrict;
2771
2772 /*
2773 * Perform the I/O.
2774 */
2775 uint32_t u32Value;
2776 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2777 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
2778 else
2779 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
2780 if (IOM_SUCCESS(rcStrict))
2781 {
2782 switch (cbReg)
2783 {
2784 case 1: pCtx->al = (uint8_t)u32Value; break;
2785 case 2: pCtx->ax = (uint16_t)u32Value; break;
2786 case 4: pCtx->rax = u32Value; break;
2787 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
2788 }
2789 iemRegAddToRip(pIemCpu, cbInstr);
2790 pIemCpu->cPotentialExits++;
2791 }
2792 /** @todo massage rcStrict. */
2793 return rcStrict;
2794}
2795
2796
2797/**
2798 * Implements 'IN eAX, DX'.
2799 *
2800 * @param cbReg The register size.
2801 */
2802IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
2803{
2804 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
2805}
2806
2807
2808/**
2809 * Implements 'OUT port, eAX'.
2810 *
2811 * @param u16Port The destination port.
2812 * @param cbReg The register size.
2813 */
2814IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
2815{
2816 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2817
2818 /*
2819 * CPL check
2820 */
2821 if ( (pCtx->cr0 & X86_CR0_PE)
2822 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
2823 || pCtx->eflags.Bits.u1VM) )
2824 {
2825 /** @todo I/O port permission bitmap check */
2826 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
2827 }
2828
2829 /*
2830 * Perform the I/O.
2831 */
2832 uint32_t u32Value;
2833 switch (cbReg)
2834 {
2835 case 1: u32Value = pCtx->al; break;
2836 case 2: u32Value = pCtx->ax; break;
2837 case 4: u32Value = pCtx->eax; break;
2838 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
2839 }
2840 VBOXSTRICTRC rc;
2841 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2842 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
2843 else
2844 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
2845 if (IOM_SUCCESS(rc))
2846 {
2847 iemRegAddToRip(pIemCpu, cbInstr);
2848 pIemCpu->cPotentialExits++;
2849 /** @todo massage rc. */
2850 }
2851 return rc;
2852}
2853
2854
2855/**
2856 * Implements 'OUT DX, eAX'.
2857 *
2858 * @param cbReg The register size.
2859 */
2860IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
2861{
2862 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
2863}
2864
2865
2866/**
2867 * Implements 'CLI'.
2868 */
2869IEM_CIMPL_DEF_0(iemCImpl_cli)
2870{
2871 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2872
2873 if (pCtx->cr0 & X86_CR0_PE)
2874 {
2875 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
2876 if (!pCtx->eflags.Bits.u1VM)
2877 {
2878 if (pIemCpu->uCpl <= uIopl)
2879 pCtx->eflags.Bits.u1IF = 0;
2880 else if ( pIemCpu->uCpl == 3
2881 && (pCtx->cr4 & X86_CR4_PVI) )
2882 pCtx->eflags.Bits.u1VIF = 0;
2883 else
2884 return iemRaiseGeneralProtectionFault0(pIemCpu);
2885 }
2886 /* V8086 */
2887 else if (uIopl == 3)
2888 pCtx->eflags.Bits.u1IF = 0;
2889 else if ( uIopl < 3
2890 && (pCtx->cr4 & X86_CR4_VME) )
2891 pCtx->eflags.Bits.u1VIF = 0;
2892 else
2893 return iemRaiseGeneralProtectionFault0(pIemCpu);
2894 }
2895 /* real mode */
2896 else
2897 pCtx->eflags.Bits.u1IF = 0;
2898 iemRegAddToRip(pIemCpu, cbInstr);
2899 return VINF_SUCCESS;
2900}
2901
2902
2903/**
2904 * Implements 'STI'.
2905 */
2906IEM_CIMPL_DEF_0(iemCImpl_sti)
2907{
2908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2909
2910 if (pCtx->cr0 & X86_CR0_PE)
2911 {
2912 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
2913 if (!pCtx->eflags.Bits.u1VM)
2914 {
2915 if (pIemCpu->uCpl <= uIopl)
2916 pCtx->eflags.Bits.u1IF = 1;
2917 else if ( pIemCpu->uCpl == 3
2918 && (pCtx->cr4 & X86_CR4_PVI)
2919 && !pCtx->eflags.Bits.u1VIP )
2920 pCtx->eflags.Bits.u1VIF = 1;
2921 else
2922 return iemRaiseGeneralProtectionFault0(pIemCpu);
2923 }
2924 /* V8086 */
2925 else if (uIopl == 3)
2926 pCtx->eflags.Bits.u1IF = 1;
2927 else if ( uIopl < 3
2928 && (pCtx->cr4 & X86_CR4_VME)
2929 && !pCtx->eflags.Bits.u1VIP )
2930 pCtx->eflags.Bits.u1VIF = 1;
2931 else
2932 return iemRaiseGeneralProtectionFault0(pIemCpu);
2933 }
2934 /* real mode */
2935 else
2936 pCtx->eflags.Bits.u1IF = 1;
2937
2938 iemRegAddToRip(pIemCpu, cbInstr);
2939 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2940 return VINF_SUCCESS;
2941}
2942
2943
2944/**
2945 * Implements 'HLT'.
2946 */
2947IEM_CIMPL_DEF_0(iemCImpl_hlt)
2948{
2949 if (pIemCpu->uCpl != 0)
2950 return iemRaiseGeneralProtectionFault0(pIemCpu);
2951 iemRegAddToRip(pIemCpu, cbInstr);
2952 return VINF_EM_HALT;
2953}
2954
2955
2956/**
2957 * Implements 'CPUID'.
2958 */
2959IEM_CIMPL_DEF_0(iemCImpl_cpuid)
2960{
2961 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2962
2963 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
2964 pCtx->rax &= UINT32_C(0xffffffff);
2965 pCtx->rbx &= UINT32_C(0xffffffff);
2966 pCtx->rcx &= UINT32_C(0xffffffff);
2967 pCtx->rdx &= UINT32_C(0xffffffff);
2968
2969 iemRegAddToRip(pIemCpu, cbInstr);
2970 return VINF_SUCCESS;
2971}
2972
2973
2974/*
2975 * Instantiate the various string operation combinations.
2976 */
2977#define OP_SIZE 8
2978#define ADDR_SIZE 16
2979#include "IEMAllCImplStrInstr.cpp.h"
2980#define OP_SIZE 8
2981#define ADDR_SIZE 32
2982#include "IEMAllCImplStrInstr.cpp.h"
2983#define OP_SIZE 8
2984#define ADDR_SIZE 64
2985#include "IEMAllCImplStrInstr.cpp.h"
2986
2987#define OP_SIZE 16
2988#define ADDR_SIZE 16
2989#include "IEMAllCImplStrInstr.cpp.h"
2990#define OP_SIZE 16
2991#define ADDR_SIZE 32
2992#include "IEMAllCImplStrInstr.cpp.h"
2993#define OP_SIZE 16
2994#define ADDR_SIZE 64
2995#include "IEMAllCImplStrInstr.cpp.h"
2996
2997#define OP_SIZE 32
2998#define ADDR_SIZE 16
2999#include "IEMAllCImplStrInstr.cpp.h"
3000#define OP_SIZE 32
3001#define ADDR_SIZE 32
3002#include "IEMAllCImplStrInstr.cpp.h"
3003#define OP_SIZE 32
3004#define ADDR_SIZE 64
3005#include "IEMAllCImplStrInstr.cpp.h"
3006
3007#define OP_SIZE 64
3008#define ADDR_SIZE 32
3009#include "IEMAllCImplStrInstr.cpp.h"
3010#define OP_SIZE 64
3011#define ADDR_SIZE 64
3012#include "IEMAllCImplStrInstr.cpp.h"
3013
3014
3015/**
3016 * Implements 'FINIT' and 'FNINIT'.
3017 *
3018 * @param fCheckXcpts Whether to check for umasked pending exceptions or
3019 * not.
3020 */
3021IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
3022{
3023 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3024
3025 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
3026 return iemRaiseDeviceNotAvailable(pIemCpu);
3027 /** @todo trigger pending exceptions:
3028 if (fCheckXcpts && TODO )
3029 return iemRaiseMathFault(pIemCpu);
3030 */
3031
3032 if (iemFRegIsFxSaveFormat(pIemCpu))
3033 {
3034 pCtx->fpu.FCW = 0x37f;
3035 pCtx->fpu.FSW = 0;
3036 pCtx->fpu.FTW = 0xff;
3037 pCtx->fpu.FPUDP = 0;
3038 pCtx->fpu.DS = 0; //??
3039 pCtx->fpu.FPUIP = 0;
3040 pCtx->fpu.CS = 0; //??
3041 pCtx->fpu.FOP = 0;
3042 }
3043 else
3044 {
3045 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
3046 pFpu->FCW = 0x37f;
3047 pFpu->FSW = 0;
3048 pFpu->FTW = 0xffff;
3049 pFpu->FPUOO = 0; //??
3050 pFpu->FPUOS = 0; //??
3051 pFpu->FPUIP = 0;
3052 pFpu->CS = 0; //??
3053 pFpu->FOP = 0;
3054 }
3055
3056 iemRegAddToRip(pIemCpu, cbInstr);
3057 return VINF_SUCCESS;
3058}
3059
3060
3061/** @} */
3062
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette