VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 47666

Last change on this file since 47666 was 47666, checked in by vboxsync, 11 years ago

Use the LAR mask that AMD documents for now.

  • Property svn:eol-style set to native
File size: 200.6 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232#ifdef VBOX
233 /* Trying to load a selector with CPL=1? */
234 /** @todo this is a hack to correct the incorrect checking order for pending interrupts in the patm iret replacement code (corrected in the ring-1 version) */
235 /** @todo in theory the iret could fault and we'd still need this. */
236 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0) && !EMIsRawRing1Enabled(env->pVM))
237 {
238 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
239 selector = selector & 0xfffc;
240 }
241#endif /* VBOX */
242
243 if (selector & 0x4)
244 dt = &env->ldt;
245 else
246 dt = &env->gdt;
247 index = selector & ~7;
248 if ((index + 7) > dt->limit)
249 return -1;
250 ptr = dt->base + index;
251 *e1_ptr = ldl_kernel(ptr);
252 *e2_ptr = ldl_kernel(ptr + 4);
253 return 0;
254}
255
256static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257{
258 unsigned int limit;
259 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
260 if (e2 & DESC_G_MASK)
261 limit = (limit << 12) | 0xfff;
262 return limit;
263}
264
265static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266{
267 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
268}
269
270static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271{
272 sc->base = get_seg_base(e1, e2);
273 sc->limit = get_seg_limit(e1, e2);
274 sc->flags = e2;
275#ifdef VBOX
276 sc->flags &= ~DESC_INTEL_UNUSABLE;
277 sc->newselector = 0;
278 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
279#endif
280}
281
282/* init the segment cache in vm86 mode. */
283static inline void load_seg_vm(int seg, int selector)
284{
285 selector &= 0xffff;
286#ifdef VBOX
287 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
288 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
289 flags |= (3 << DESC_DPL_SHIFT);
290
291 cpu_x86_load_seg_cache(env, seg, selector,
292 (selector << 4), 0xffff, flags);
293#else /* VBOX */
294 cpu_x86_load_seg_cache(env, seg, selector,
295 (selector << 4), 0xffff, 0);
296#endif /* VBOX */
297}
298
299static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
300 uint32_t *esp_ptr, int dpl)
301{
302#ifndef VBOX
303 int type, index, shift;
304#else
305 unsigned int type, index, shift;
306#endif
307
308#if 0
309 {
310 int i;
311 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
312 for(i=0;i<env->tr.limit;i++) {
313 printf("%02x ", env->tr.base[i]);
314 if ((i & 7) == 7) printf("\n");
315 }
316 printf("\n");
317 }
318#endif
319
320 if (!(env->tr.flags & DESC_P_MASK))
321 cpu_abort(env, "invalid tss");
322 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
323 if ((type & 7) != 1)
324 cpu_abort(env, "invalid tss type");
325 shift = type >> 3;
326 index = (dpl * 4 + 2) << shift;
327 if (index + (4 << shift) - 1 > env->tr.limit)
328 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
329 if (shift == 0) {
330 *esp_ptr = lduw_kernel(env->tr.base + index);
331 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
332 } else {
333 *esp_ptr = ldl_kernel(env->tr.base + index);
334 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
335 }
336}
337
338/* XXX: merge with load_seg() */
339static void tss_load_seg(int seg_reg, int selector)
340{
341 uint32_t e1, e2;
342 int rpl, dpl, cpl;
343
344#ifdef VBOX
345 e1 = e2 = 0; /* gcc warning? */
346 cpl = env->hflags & HF_CPL_MASK;
347 /* Trying to load a selector with CPL=1? */
348 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
349 {
350 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
351 selector = selector & 0xfffc;
352 }
353#endif /* VBOX */
354
355 if ((selector & 0xfffc) != 0) {
356 if (load_segment(&e1, &e2, selector) != 0)
357 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
358 if (!(e2 & DESC_S_MASK))
359 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
360 rpl = selector & 3;
361 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
362 cpl = env->hflags & HF_CPL_MASK;
363 if (seg_reg == R_CS) {
364 if (!(e2 & DESC_CS_MASK))
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
366 /* XXX: is it correct ? */
367 if (dpl != rpl)
368 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
369 if ((e2 & DESC_C_MASK) && dpl > rpl)
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 } else if (seg_reg == R_SS) {
372 /* SS must be writable data */
373 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 if (dpl != cpl || dpl != rpl)
376 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
377 } else {
378 /* not readable code */
379 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
380 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
381 /* if data or non conforming code, checks the rights */
382 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
383 if (dpl < cpl || dpl < rpl)
384 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
385 }
386 }
387 if (!(e2 & DESC_P_MASK))
388 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
389 cpu_x86_load_seg_cache(env, seg_reg, selector,
390 get_seg_base(e1, e2),
391 get_seg_limit(e1, e2),
392 e2);
393 } else {
394 if (seg_reg == R_SS || seg_reg == R_CS)
395 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
396#ifdef VBOX
397# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
398 cpu_x86_load_seg_cache(env, seg_reg, selector,
399 0, 0, 0);
400# endif
401#endif /* VBOX */
402 }
403}
404
405#define SWITCH_TSS_JMP 0
406#define SWITCH_TSS_IRET 1
407#define SWITCH_TSS_CALL 2
408
409/* XXX: restore CPU state in registers (PowerPC case) */
410static void switch_tss(int tss_selector,
411 uint32_t e1, uint32_t e2, int source,
412 uint32_t next_eip)
413{
414 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
415 target_ulong tss_base;
416 uint32_t new_regs[8], new_segs[6];
417 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
418 uint32_t old_eflags, eflags_mask;
419 SegmentCache *dt;
420#ifndef VBOX
421 int index;
422#else
423 unsigned int index;
424#endif
425 target_ulong ptr;
426
427 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
428 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
429
430 /* if task gate, we read the TSS segment and we load it */
431 if (type == 5) {
432 if (!(e2 & DESC_P_MASK))
433 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
434 tss_selector = e1 >> 16;
435 if (tss_selector & 4)
436 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
437 if (load_segment(&e1, &e2, tss_selector) != 0)
438 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
439 if (e2 & DESC_S_MASK)
440 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
441 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
442 if ((type & 7) != 1)
443 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
444 }
445
446 if (!(e2 & DESC_P_MASK))
447 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
448
449 if (type & 8)
450 tss_limit_max = 103;
451 else
452 tss_limit_max = 43;
453 tss_limit = get_seg_limit(e1, e2);
454 tss_base = get_seg_base(e1, e2);
455 if ((tss_selector & 4) != 0 ||
456 tss_limit < tss_limit_max)
457 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
458 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
459 if (old_type & 8)
460 old_tss_limit_max = 103;
461 else
462 old_tss_limit_max = 43;
463
464 /* read all the registers from the new TSS */
465 if (type & 8) {
466 /* 32 bit */
467 new_cr3 = ldl_kernel(tss_base + 0x1c);
468 new_eip = ldl_kernel(tss_base + 0x20);
469 new_eflags = ldl_kernel(tss_base + 0x24);
470 for(i = 0; i < 8; i++)
471 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
472 for(i = 0; i < 6; i++)
473 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
474 new_ldt = lduw_kernel(tss_base + 0x60);
475 new_trap = ldl_kernel(tss_base + 0x64);
476 } else {
477 /* 16 bit */
478 new_cr3 = 0;
479 new_eip = lduw_kernel(tss_base + 0x0e);
480 new_eflags = lduw_kernel(tss_base + 0x10);
481 for(i = 0; i < 8; i++)
482 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
483 for(i = 0; i < 4; i++)
484 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
485 new_ldt = lduw_kernel(tss_base + 0x2a);
486 new_segs[R_FS] = 0;
487 new_segs[R_GS] = 0;
488 new_trap = 0;
489 }
490
491 /* NOTE: we must avoid memory exceptions during the task switch,
492 so we make dummy accesses before */
493 /* XXX: it can still fail in some cases, so a bigger hack is
494 necessary to valid the TLB after having done the accesses */
495
496 v1 = ldub_kernel(env->tr.base);
497 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
498 stb_kernel(env->tr.base, v1);
499 stb_kernel(env->tr.base + old_tss_limit_max, v2);
500
501 /* clear busy bit (it is restartable) */
502 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
503 target_ulong ptr;
504 uint32_t e2;
505 ptr = env->gdt.base + (env->tr.selector & ~7);
506 e2 = ldl_kernel(ptr + 4);
507 e2 &= ~DESC_TSS_BUSY_MASK;
508 stl_kernel(ptr + 4, e2);
509 }
510 old_eflags = compute_eflags();
511 if (source == SWITCH_TSS_IRET)
512 old_eflags &= ~NT_MASK;
513
514 /* save the current state in the old TSS */
515 if (type & 8) {
516 /* 32 bit */
517 stl_kernel(env->tr.base + 0x20, next_eip);
518 stl_kernel(env->tr.base + 0x24, old_eflags);
519 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
520 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
521 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
522 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
523 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
524 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
525 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
526 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
527 for(i = 0; i < 6; i++)
528 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
529#ifdef VBOX
530 /* Must store the ldt as it gets reloaded and might have been changed. */
531 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
532#endif
533#if defined(VBOX) && defined(DEBUG)
534 printf("TSS 32 bits switch\n");
535 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
536#endif
537 } else {
538 /* 16 bit */
539 stw_kernel(env->tr.base + 0x0e, next_eip);
540 stw_kernel(env->tr.base + 0x10, old_eflags);
541 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
542 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
543 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
544 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
545 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
546 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
547 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
548 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
549 for(i = 0; i < 4; i++)
550 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
551#ifdef VBOX
552 /* Must store the ldt as it gets reloaded and might have been changed. */
553 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
554#endif
555 }
556
557 /* now if an exception occurs, it will occurs in the next task
558 context */
559
560 if (source == SWITCH_TSS_CALL) {
561 stw_kernel(tss_base, env->tr.selector);
562 new_eflags |= NT_MASK;
563 }
564
565 /* set busy bit */
566 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
567 target_ulong ptr;
568 uint32_t e2;
569 ptr = env->gdt.base + (tss_selector & ~7);
570 e2 = ldl_kernel(ptr + 4);
571 e2 |= DESC_TSS_BUSY_MASK;
572 stl_kernel(ptr + 4, e2);
573 }
574
575 /* set the new CPU state */
576 /* from this point, any exception which occurs can give problems */
577 env->cr[0] |= CR0_TS_MASK;
578 env->hflags |= HF_TS_MASK;
579 env->tr.selector = tss_selector;
580 env->tr.base = tss_base;
581 env->tr.limit = tss_limit;
582 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
583#ifdef VBOX
584 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
585 env->tr.newselector = 0;
586#endif
587
588 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
589 cpu_x86_update_cr3(env, new_cr3);
590 }
591
592 /* load all registers without an exception, then reload them with
593 possible exception */
594 env->eip = new_eip;
595 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
596 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
597 if (!(type & 8))
598 eflags_mask &= 0xffff;
599 load_eflags(new_eflags, eflags_mask);
600 /* XXX: what to do in 16 bit case ? */
601 EAX = new_regs[0];
602 ECX = new_regs[1];
603 EDX = new_regs[2];
604 EBX = new_regs[3];
605 ESP = new_regs[4];
606 EBP = new_regs[5];
607 ESI = new_regs[6];
608 EDI = new_regs[7];
609 if (new_eflags & VM_MASK) {
610 for(i = 0; i < 6; i++)
611 load_seg_vm(i, new_segs[i]);
612 /* in vm86, CPL is always 3 */
613 cpu_x86_set_cpl(env, 3);
614 } else {
615 /* CPL is set the RPL of CS */
616 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
617 /* first just selectors as the rest may trigger exceptions */
618 for(i = 0; i < 6; i++)
619 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
620 }
621
622 env->ldt.selector = new_ldt & ~4;
623 env->ldt.base = 0;
624 env->ldt.limit = 0;
625 env->ldt.flags = 0;
626#ifdef VBOX
627 env->ldt.flags = DESC_INTEL_UNUSABLE;
628 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
629 env->ldt.newselector = 0;
630#endif
631
632 /* load the LDT */
633 if (new_ldt & 4)
634 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
635
636 if ((new_ldt & 0xfffc) != 0) {
637 dt = &env->gdt;
638 index = new_ldt & ~7;
639 if ((index + 7) > dt->limit)
640 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
641 ptr = dt->base + index;
642 e1 = ldl_kernel(ptr);
643 e2 = ldl_kernel(ptr + 4);
644 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
645 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
646 if (!(e2 & DESC_P_MASK))
647 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
648 load_seg_cache_raw_dt(&env->ldt, e1, e2);
649 }
650
651 /* load the segments */
652 if (!(new_eflags & VM_MASK)) {
653 tss_load_seg(R_CS, new_segs[R_CS]);
654 tss_load_seg(R_SS, new_segs[R_SS]);
655 tss_load_seg(R_ES, new_segs[R_ES]);
656 tss_load_seg(R_DS, new_segs[R_DS]);
657 tss_load_seg(R_FS, new_segs[R_FS]);
658 tss_load_seg(R_GS, new_segs[R_GS]);
659 }
660
661 /* check that EIP is in the CS segment limits */
662 if (new_eip > env->segs[R_CS].limit) {
663 /* XXX: different exception if CALL ? */
664 raise_exception_err(EXCP0D_GPF, 0);
665 }
666
667#ifndef CONFIG_USER_ONLY
668 /* reset local breakpoints */
669 if (env->dr[7] & 0x55) {
670 for (i = 0; i < 4; i++) {
671 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
672 hw_breakpoint_remove(env, i);
673 }
674 env->dr[7] &= ~0x55;
675 }
676#endif
677}
678
679/* check if Port I/O is allowed in TSS */
680static inline void check_io(int addr, int size)
681{
682#ifndef VBOX
683 int io_offset, val, mask;
684#else
685 int val, mask;
686 unsigned int io_offset;
687#endif /* VBOX */
688
689 /* TSS must be a valid 32 bit one */
690 if (!(env->tr.flags & DESC_P_MASK) ||
691 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
692 env->tr.limit < 103)
693 goto fail;
694 io_offset = lduw_kernel(env->tr.base + 0x66);
695 io_offset += (addr >> 3);
696 /* Note: the check needs two bytes */
697 if ((io_offset + 1) > env->tr.limit)
698 goto fail;
699 val = lduw_kernel(env->tr.base + io_offset);
700 val >>= (addr & 7);
701 mask = (1 << size) - 1;
702 /* all bits must be zero to allow the I/O */
703 if ((val & mask) != 0) {
704 fail:
705 raise_exception_err(EXCP0D_GPF, 0);
706 }
707}
708
709#ifdef VBOX
710
711/* Keep in sync with gen_check_external_event() */
712void helper_check_external_event()
713{
714 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
715 | CPU_INTERRUPT_EXTERNAL_EXIT
716 | CPU_INTERRUPT_EXTERNAL_TIMER
717 | CPU_INTERRUPT_EXTERNAL_DMA))
718 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
719 && (env->eflags & IF_MASK)
720 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
721 {
722 helper_external_event();
723 }
724
725}
726
727void helper_sync_seg(uint32_t reg)
728{
729 if (env->segs[reg].newselector)
730 sync_seg(env, reg, env->segs[reg].newselector);
731}
732
733#endif /* VBOX */
734
735void helper_check_iob(uint32_t t0)
736{
737 check_io(t0, 1);
738}
739
740void helper_check_iow(uint32_t t0)
741{
742 check_io(t0, 2);
743}
744
745void helper_check_iol(uint32_t t0)
746{
747 check_io(t0, 4);
748}
749
750void helper_outb(uint32_t port, uint32_t data)
751{
752#ifndef VBOX
753 cpu_outb(port, data & 0xff);
754#else
755 cpu_outb(env, port, data & 0xff);
756#endif
757}
758
759target_ulong helper_inb(uint32_t port)
760{
761#ifndef VBOX
762 return cpu_inb(port);
763#else
764 return cpu_inb(env, port);
765#endif
766}
767
768void helper_outw(uint32_t port, uint32_t data)
769{
770#ifndef VBOX
771 cpu_outw(port, data & 0xffff);
772#else
773 cpu_outw(env, port, data & 0xffff);
774#endif
775}
776
777target_ulong helper_inw(uint32_t port)
778{
779#ifndef VBOX
780 return cpu_inw(port);
781#else
782 return cpu_inw(env, port);
783#endif
784}
785
786void helper_outl(uint32_t port, uint32_t data)
787{
788#ifndef VBOX
789 cpu_outl(port, data);
790#else
791 cpu_outl(env, port, data);
792#endif
793}
794
795target_ulong helper_inl(uint32_t port)
796{
797#ifndef VBOX
798 return cpu_inl(port);
799#else
800 return cpu_inl(env, port);
801#endif
802}
803
804static inline unsigned int get_sp_mask(unsigned int e2)
805{
806 if (e2 & DESC_B_MASK)
807 return 0xffffffff;
808 else
809 return 0xffff;
810}
811
812static int exeption_has_error_code(int intno)
813{
814 switch(intno) {
815 case 8:
816 case 10:
817 case 11:
818 case 12:
819 case 13:
820 case 14:
821 case 17:
822 return 1;
823 }
824 return 0;
825}
826
827#ifdef TARGET_X86_64
828#define SET_ESP(val, sp_mask)\
829do {\
830 if ((sp_mask) == 0xffff)\
831 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
832 else if ((sp_mask) == 0xffffffffLL)\
833 ESP = (uint32_t)(val);\
834 else\
835 ESP = (val);\
836} while (0)
837#else
838#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
839#endif
840
841/* in 64-bit machines, this can overflow. So this segment addition macro
842 * can be used to trim the value to 32-bit whenever needed */
843#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
844
845/* XXX: add a is_user flag to have proper security support */
846#define PUSHW(ssp, sp, sp_mask, val)\
847{\
848 sp -= 2;\
849 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
850}
851
852#define PUSHL(ssp, sp, sp_mask, val)\
853{\
854 sp -= 4;\
855 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
856}
857
858#define POPW(ssp, sp, sp_mask, val)\
859{\
860 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
861 sp += 2;\
862}
863
864#define POPL(ssp, sp, sp_mask, val)\
865{\
866 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
867 sp += 4;\
868}
869
870/* protected mode interrupt */
871static void do_interrupt_protected(int intno, int is_int, int error_code,
872 unsigned int next_eip, int is_hw)
873{
874 SegmentCache *dt;
875 target_ulong ptr, ssp;
876 int type, dpl, selector, ss_dpl, cpl;
877 int has_error_code, new_stack, shift;
878 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
879 uint32_t old_eip, sp_mask;
880
881#ifdef VBOX
882 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
883 cpu_loop_exit();
884#endif
885
886 has_error_code = 0;
887 if (!is_int && !is_hw)
888 has_error_code = exeption_has_error_code(intno);
889 if (is_int)
890 old_eip = next_eip;
891 else
892 old_eip = env->eip;
893
894 dt = &env->idt;
895#ifndef VBOX
896 if (intno * 8 + 7 > dt->limit)
897#else
898 if ((unsigned)intno * 8 + 7 > dt->limit)
899#endif
900 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
901 ptr = dt->base + intno * 8;
902 e1 = ldl_kernel(ptr);
903 e2 = ldl_kernel(ptr + 4);
904 /* check gate type */
905 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
906 switch(type) {
907 case 5: /* task gate */
908#ifdef VBOX
909 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
910 cpl = env->hflags & HF_CPL_MASK;
911 /* check privilege if software int */
912 if (is_int && dpl < cpl)
913 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
914#endif
915 /* must do that check here to return the correct error code */
916 if (!(e2 & DESC_P_MASK))
917 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
918 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
919 if (has_error_code) {
920 int type;
921 uint32_t mask;
922 /* push the error code */
923 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
924 shift = type >> 3;
925 if (env->segs[R_SS].flags & DESC_B_MASK)
926 mask = 0xffffffff;
927 else
928 mask = 0xffff;
929 esp = (ESP - (2 << shift)) & mask;
930 ssp = env->segs[R_SS].base + esp;
931 if (shift)
932 stl_kernel(ssp, error_code);
933 else
934 stw_kernel(ssp, error_code);
935 SET_ESP(esp, mask);
936 }
937 return;
938 case 6: /* 286 interrupt gate */
939 case 7: /* 286 trap gate */
940 case 14: /* 386 interrupt gate */
941 case 15: /* 386 trap gate */
942 break;
943 default:
944 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
945 break;
946 }
947 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
948 cpl = env->hflags & HF_CPL_MASK;
949 /* check privilege if software int */
950 if (is_int && dpl < cpl)
951 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
952 /* check valid bit */
953 if (!(e2 & DESC_P_MASK))
954 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
955 selector = e1 >> 16;
956 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
957 if ((selector & 0xfffc) == 0)
958 raise_exception_err(EXCP0D_GPF, 0);
959
960 if (load_segment(&e1, &e2, selector) != 0)
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962#ifdef VBOX /** @todo figure out when this is done one day... */
963 if (!(e2 & DESC_A_MASK))
964 e2 = set_segment_accessed(selector, e2);
965#endif
966 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
967 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
968 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
969 if (dpl > cpl)
970 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
971 if (!(e2 & DESC_P_MASK))
972 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
973 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
974 /* to inner privilege */
975 get_ss_esp_from_tss(&ss, &esp, dpl);
976 if ((ss & 0xfffc) == 0)
977 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
978 if ((ss & 3) != dpl)
979 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
980 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
981 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
982#ifdef VBOX /** @todo figure out when this is done one day... */
983 if (!(ss_e2 & DESC_A_MASK))
984 ss_e2 = set_segment_accessed(ss, ss_e2);
985#endif
986 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
987 if (ss_dpl != dpl)
988 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
989 if (!(ss_e2 & DESC_S_MASK) ||
990 (ss_e2 & DESC_CS_MASK) ||
991 !(ss_e2 & DESC_W_MASK))
992 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
993 if (!(ss_e2 & DESC_P_MASK))
994#ifdef VBOX /* See page 3-477 of 253666.pdf */
995 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
996#else
997 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
998#endif
999 new_stack = 1;
1000 sp_mask = get_sp_mask(ss_e2);
1001 ssp = get_seg_base(ss_e1, ss_e2);
1002#if defined(VBOX) && defined(DEBUG)
1003 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
1004#endif
1005 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1006 /* to same privilege */
1007 if (env->eflags & VM_MASK)
1008 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1009 new_stack = 0;
1010 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1011 ssp = env->segs[R_SS].base;
1012 esp = ESP;
1013 dpl = cpl;
1014 } else {
1015 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1016 new_stack = 0; /* avoid warning */
1017 sp_mask = 0; /* avoid warning */
1018 ssp = 0; /* avoid warning */
1019 esp = 0; /* avoid warning */
1020 }
1021
1022 shift = type >> 3;
1023
1024#if 0
1025 /* XXX: check that enough room is available */
1026 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1027 if (env->eflags & VM_MASK)
1028 push_size += 8;
1029 push_size <<= shift;
1030#endif
1031 if (shift == 1) {
1032 if (new_stack) {
1033 if (env->eflags & VM_MASK) {
1034 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1035 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1036 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1037 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1038 }
1039 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1040 PUSHL(ssp, esp, sp_mask, ESP);
1041 }
1042 PUSHL(ssp, esp, sp_mask, compute_eflags());
1043 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1044 PUSHL(ssp, esp, sp_mask, old_eip);
1045 if (has_error_code) {
1046 PUSHL(ssp, esp, sp_mask, error_code);
1047 }
1048 } else {
1049 if (new_stack) {
1050 if (env->eflags & VM_MASK) {
1051 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1052 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1053 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1054 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1055 }
1056 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1057 PUSHW(ssp, esp, sp_mask, ESP);
1058 }
1059 PUSHW(ssp, esp, sp_mask, compute_eflags());
1060 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1061 PUSHW(ssp, esp, sp_mask, old_eip);
1062 if (has_error_code) {
1063 PUSHW(ssp, esp, sp_mask, error_code);
1064 }
1065 }
1066
1067 if (new_stack) {
1068 if (env->eflags & VM_MASK) {
1069 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1070 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1071 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1072 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1073 }
1074 ss = (ss & ~3) | dpl;
1075 cpu_x86_load_seg_cache(env, R_SS, ss,
1076 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1077 }
1078 SET_ESP(esp, sp_mask);
1079
1080 selector = (selector & ~3) | dpl;
1081 cpu_x86_load_seg_cache(env, R_CS, selector,
1082 get_seg_base(e1, e2),
1083 get_seg_limit(e1, e2),
1084 e2);
1085 cpu_x86_set_cpl(env, dpl);
1086 env->eip = offset;
1087
1088 /* interrupt gate clear IF mask */
1089 if ((type & 1) == 0) {
1090 env->eflags &= ~IF_MASK;
1091 }
1092#ifndef VBOX
1093 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1094#else
1095 /*
1096 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1097 * gets confused by seemingly changed EFLAGS. See #3491 and
1098 * public bug #2341.
1099 */
1100 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1101#endif
1102}
1103
1104#ifdef VBOX
1105
1106/* check if VME interrupt redirection is enabled in TSS */
1107DECLINLINE(bool) is_vme_irq_redirected(int intno)
1108{
1109 unsigned int io_offset, intredir_offset;
1110 unsigned char val, mask;
1111
1112 /* TSS must be a valid 32 bit one */
1113 if (!(env->tr.flags & DESC_P_MASK) ||
1114 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1115 env->tr.limit < 103)
1116 goto fail;
1117 io_offset = lduw_kernel(env->tr.base + 0x66);
1118 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1119 if (io_offset < 0x68 + 0x20)
1120 io_offset = 0x68 + 0x20;
1121 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1122 intredir_offset = io_offset - 0x20;
1123
1124 intredir_offset += (intno >> 3);
1125 if ((intredir_offset) > env->tr.limit)
1126 goto fail;
1127
1128 val = ldub_kernel(env->tr.base + intredir_offset);
1129 mask = 1 << (unsigned char)(intno & 7);
1130
1131 /* bit set means no redirection. */
1132 if ((val & mask) != 0) {
1133 return false;
1134 }
1135 return true;
1136
1137fail:
1138 raise_exception_err(EXCP0D_GPF, 0);
1139 return true;
1140}
1141
1142/* V86 mode software interrupt with CR4.VME=1 */
1143static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1144{
1145 target_ulong ptr, ssp;
1146 int selector;
1147 uint32_t offset, esp;
1148 uint32_t old_cs, old_eflags;
1149 uint32_t iopl;
1150
1151 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1152
1153 if (!is_vme_irq_redirected(intno))
1154 {
1155 if (iopl == 3)
1156 {
1157 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1158 return;
1159 }
1160 else
1161 raise_exception_err(EXCP0D_GPF, 0);
1162 }
1163
1164 /* virtual mode idt is at linear address 0 */
1165 ptr = 0 + intno * 4;
1166 offset = lduw_kernel(ptr);
1167 selector = lduw_kernel(ptr + 2);
1168 esp = ESP;
1169 ssp = env->segs[R_SS].base;
1170 old_cs = env->segs[R_CS].selector;
1171
1172 old_eflags = compute_eflags();
1173 if (iopl < 3)
1174 {
1175 /* copy VIF into IF and set IOPL to 3 */
1176 if (env->eflags & VIF_MASK)
1177 old_eflags |= IF_MASK;
1178 else
1179 old_eflags &= ~IF_MASK;
1180
1181 old_eflags |= (3 << IOPL_SHIFT);
1182 }
1183
1184 /* XXX: use SS segment size ? */
1185 PUSHW(ssp, esp, 0xffff, old_eflags);
1186 PUSHW(ssp, esp, 0xffff, old_cs);
1187 PUSHW(ssp, esp, 0xffff, next_eip);
1188
1189 /* update processor state */
1190 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1191 env->eip = offset;
1192 env->segs[R_CS].selector = selector;
1193 env->segs[R_CS].base = (selector << 4);
1194 env->eflags &= ~(TF_MASK | RF_MASK);
1195
1196 if (iopl < 3)
1197 env->eflags &= ~VIF_MASK;
1198 else
1199 env->eflags &= ~IF_MASK;
1200}
1201
1202#endif /* VBOX */
1203
1204#ifdef TARGET_X86_64
1205
1206#define PUSHQ(sp, val)\
1207{\
1208 sp -= 8;\
1209 stq_kernel(sp, (val));\
1210}
1211
1212#define POPQ(sp, val)\
1213{\
1214 val = ldq_kernel(sp);\
1215 sp += 8;\
1216}
1217
1218static inline target_ulong get_rsp_from_tss(int level)
1219{
1220 int index;
1221
1222#if 0
1223 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1224 env->tr.base, env->tr.limit);
1225#endif
1226
1227 if (!(env->tr.flags & DESC_P_MASK))
1228 cpu_abort(env, "invalid tss");
1229 index = 8 * level + 4;
1230 if ((index + 7) > env->tr.limit)
1231 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1232 return ldq_kernel(env->tr.base + index);
1233}
1234
1235/* 64 bit interrupt */
1236static void do_interrupt64(int intno, int is_int, int error_code,
1237 target_ulong next_eip, int is_hw)
1238{
1239 SegmentCache *dt;
1240 target_ulong ptr;
1241 int type, dpl, selector, cpl, ist;
1242 int has_error_code, new_stack;
1243 uint32_t e1, e2, e3, ss;
1244 target_ulong old_eip, esp, offset;
1245
1246#ifdef VBOX
1247 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1248 cpu_loop_exit();
1249#endif
1250
1251 has_error_code = 0;
1252 if (!is_int && !is_hw)
1253 has_error_code = exeption_has_error_code(intno);
1254 if (is_int)
1255 old_eip = next_eip;
1256 else
1257 old_eip = env->eip;
1258
1259 dt = &env->idt;
1260 if (intno * 16 + 15 > dt->limit)
1261 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1262 ptr = dt->base + intno * 16;
1263 e1 = ldl_kernel(ptr);
1264 e2 = ldl_kernel(ptr + 4);
1265 e3 = ldl_kernel(ptr + 8);
1266 /* check gate type */
1267 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1268 switch(type) {
1269 case 14: /* 386 interrupt gate */
1270 case 15: /* 386 trap gate */
1271 break;
1272 default:
1273 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1274 break;
1275 }
1276 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1277 cpl = env->hflags & HF_CPL_MASK;
1278 /* check privilege if software int */
1279 if (is_int && dpl < cpl)
1280 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1281 /* check valid bit */
1282 if (!(e2 & DESC_P_MASK))
1283 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1284 selector = e1 >> 16;
1285 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1286 ist = e2 & 7;
1287 if ((selector & 0xfffc) == 0)
1288 raise_exception_err(EXCP0D_GPF, 0);
1289
1290 if (load_segment(&e1, &e2, selector) != 0)
1291 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1292 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1293 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1294 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1295 if (dpl > cpl)
1296 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1297 if (!(e2 & DESC_P_MASK))
1298 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1299 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1300 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1301 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1302 /* to inner privilege */
1303 if (ist != 0)
1304 esp = get_rsp_from_tss(ist + 3);
1305 else
1306 esp = get_rsp_from_tss(dpl);
1307 esp &= ~0xfLL; /* align stack */
1308 ss = 0;
1309 new_stack = 1;
1310 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1311 /* to same privilege */
1312 if (env->eflags & VM_MASK)
1313 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1314 new_stack = 0;
1315 if (ist != 0)
1316 esp = get_rsp_from_tss(ist + 3);
1317 else
1318 esp = ESP;
1319 esp &= ~0xfLL; /* align stack */
1320 dpl = cpl;
1321 } else {
1322 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1323 new_stack = 0; /* avoid warning */
1324 esp = 0; /* avoid warning */
1325 }
1326
1327 PUSHQ(esp, env->segs[R_SS].selector);
1328 PUSHQ(esp, ESP);
1329 PUSHQ(esp, compute_eflags());
1330 PUSHQ(esp, env->segs[R_CS].selector);
1331 PUSHQ(esp, old_eip);
1332 if (has_error_code) {
1333 PUSHQ(esp, error_code);
1334 }
1335
1336 if (new_stack) {
1337 ss = 0 | dpl;
1338#ifndef VBOX
1339 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1340#else
1341 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1342#endif
1343 }
1344 ESP = esp;
1345
1346 selector = (selector & ~3) | dpl;
1347 cpu_x86_load_seg_cache(env, R_CS, selector,
1348 get_seg_base(e1, e2),
1349 get_seg_limit(e1, e2),
1350 e2);
1351 cpu_x86_set_cpl(env, dpl);
1352 env->eip = offset;
1353
1354 /* interrupt gate clear IF mask */
1355 if ((type & 1) == 0) {
1356 env->eflags &= ~IF_MASK;
1357 }
1358#ifndef VBOX
1359 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1360#else /* VBOX */
1361 /*
1362 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1363 * gets confused by seemingly changed EFLAGS. See #3491 and
1364 * public bug #2341.
1365 */
1366 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1367#endif /* VBOX */
1368}
1369#endif
1370
1371#ifdef TARGET_X86_64
1372#if defined(CONFIG_USER_ONLY)
1373void helper_syscall(int next_eip_addend)
1374{
1375 env->exception_index = EXCP_SYSCALL;
1376 env->exception_next_eip = env->eip + next_eip_addend;
1377 cpu_loop_exit();
1378}
1379#else
1380void helper_syscall(int next_eip_addend)
1381{
1382 int selector;
1383
1384 if (!(env->efer & MSR_EFER_SCE)) {
1385 raise_exception_err(EXCP06_ILLOP, 0);
1386 }
1387 selector = (env->star >> 32) & 0xffff;
1388 if (env->hflags & HF_LMA_MASK) {
1389 int code64;
1390
1391 ECX = env->eip + next_eip_addend;
1392 env->regs[11] = compute_eflags();
1393
1394 code64 = env->hflags & HF_CS64_MASK;
1395
1396 cpu_x86_set_cpl(env, 0);
1397 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1398 0, 0xffffffff,
1399 DESC_G_MASK | DESC_P_MASK |
1400 DESC_S_MASK |
1401 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1402 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1403 0, 0xffffffff,
1404 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1405 DESC_S_MASK |
1406 DESC_W_MASK | DESC_A_MASK);
1407 env->eflags &= ~env->fmask;
1408 load_eflags(env->eflags, 0);
1409 if (code64)
1410 env->eip = env->lstar;
1411 else
1412 env->eip = env->cstar;
1413 } else {
1414 ECX = (uint32_t)(env->eip + next_eip_addend);
1415
1416 cpu_x86_set_cpl(env, 0);
1417 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1418 0, 0xffffffff,
1419 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1420 DESC_S_MASK |
1421 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1422 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1423 0, 0xffffffff,
1424 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1425 DESC_S_MASK |
1426 DESC_W_MASK | DESC_A_MASK);
1427 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1428 env->eip = (uint32_t)env->star;
1429 }
1430}
1431#endif
1432#endif
1433
1434#ifdef TARGET_X86_64
1435void helper_sysret(int dflag)
1436{
1437 int cpl, selector;
1438
1439 if (!(env->efer & MSR_EFER_SCE)) {
1440 raise_exception_err(EXCP06_ILLOP, 0);
1441 }
1442 cpl = env->hflags & HF_CPL_MASK;
1443 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1444 raise_exception_err(EXCP0D_GPF, 0);
1445 }
1446 selector = (env->star >> 48) & 0xffff;
1447 if (env->hflags & HF_LMA_MASK) {
1448 if (dflag == 2) {
1449 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1450 0, 0xffffffff,
1451 DESC_G_MASK | DESC_P_MASK |
1452 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1453 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1454 DESC_L_MASK);
1455 env->eip = ECX;
1456 } else {
1457 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1458 0, 0xffffffff,
1459 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1460 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1461 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1462 env->eip = (uint32_t)ECX;
1463 }
1464 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1465 0, 0xffffffff,
1466 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1467 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1468 DESC_W_MASK | DESC_A_MASK);
1469 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1470 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1471 cpu_x86_set_cpl(env, 3);
1472 } else {
1473 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1474 0, 0xffffffff,
1475 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1476 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1477 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1478 env->eip = (uint32_t)ECX;
1479 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1480 0, 0xffffffff,
1481 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1482 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1483 DESC_W_MASK | DESC_A_MASK);
1484 env->eflags |= IF_MASK;
1485 cpu_x86_set_cpl(env, 3);
1486 }
1487}
1488#endif
1489
1490#ifdef VBOX
1491
1492/**
1493 * Checks and processes external VMM events.
1494 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1495 */
1496void helper_external_event(void)
1497{
1498# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1499 uintptr_t uSP;
1500# ifdef RT_ARCH_AMD64
1501 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1502# else
1503 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1504# endif
1505 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1506# endif
1507 /* Keep in sync with flags checked by gen_check_external_event() */
1508 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1509 {
1510 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1511 ~CPU_INTERRUPT_EXTERNAL_HARD);
1512 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1513 }
1514 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1515 {
1516 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1517 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1518 cpu_exit(env);
1519 }
1520 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1521 {
1522 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1523 ~CPU_INTERRUPT_EXTERNAL_DMA);
1524 remR3DmaRun(env);
1525 }
1526 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1527 {
1528 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1529 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1530 remR3TimersRun(env);
1531 }
1532 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1533 {
1534 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1535 ~CPU_INTERRUPT_EXTERNAL_HARD);
1536 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1537 }
1538}
1539
1540/* helper for recording call instruction addresses for later scanning */
1541void helper_record_call()
1542{
1543 if ( !(env->state & CPU_RAW_RING0)
1544 && (env->cr[0] & CR0_PG_MASK)
1545 && !(env->eflags & X86_EFL_IF))
1546 remR3RecordCall(env);
1547}
1548
1549#endif /* VBOX */
1550
1551/* real mode interrupt */
1552static void do_interrupt_real(int intno, int is_int, int error_code,
1553 unsigned int next_eip)
1554{
1555 SegmentCache *dt;
1556 target_ulong ptr, ssp;
1557 int selector;
1558 uint32_t offset, esp;
1559 uint32_t old_cs, old_eip;
1560
1561 /* real mode (simpler !) */
1562 dt = &env->idt;
1563#ifndef VBOX
1564 if (intno * 4 + 3 > dt->limit)
1565#else
1566 if ((unsigned)intno * 4 + 3 > dt->limit)
1567#endif
1568 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1569 ptr = dt->base + intno * 4;
1570 offset = lduw_kernel(ptr);
1571 selector = lduw_kernel(ptr + 2);
1572 esp = ESP;
1573 ssp = env->segs[R_SS].base;
1574 if (is_int)
1575 old_eip = next_eip;
1576 else
1577 old_eip = env->eip;
1578 old_cs = env->segs[R_CS].selector;
1579 /* XXX: use SS segment size ? */
1580 PUSHW(ssp, esp, 0xffff, compute_eflags());
1581 PUSHW(ssp, esp, 0xffff, old_cs);
1582 PUSHW(ssp, esp, 0xffff, old_eip);
1583
1584 /* update processor state */
1585 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1586 env->eip = offset;
1587 env->segs[R_CS].selector = selector;
1588 env->segs[R_CS].base = (selector << 4);
1589 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1590}
1591
1592/* fake user mode interrupt */
1593void do_interrupt_user(int intno, int is_int, int error_code,
1594 target_ulong next_eip)
1595{
1596 SegmentCache *dt;
1597 target_ulong ptr;
1598 int dpl, cpl, shift;
1599 uint32_t e2;
1600
1601 dt = &env->idt;
1602 if (env->hflags & HF_LMA_MASK) {
1603 shift = 4;
1604 } else {
1605 shift = 3;
1606 }
1607 ptr = dt->base + (intno << shift);
1608 e2 = ldl_kernel(ptr + 4);
1609
1610 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1611 cpl = env->hflags & HF_CPL_MASK;
1612 /* check privilege if software int */
1613 if (is_int && dpl < cpl)
1614 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1615
1616 /* Since we emulate only user space, we cannot do more than
1617 exiting the emulation with the suitable exception and error
1618 code */
1619 if (is_int)
1620 EIP = next_eip;
1621}
1622
1623#if !defined(CONFIG_USER_ONLY)
1624static void handle_even_inj(int intno, int is_int, int error_code,
1625 int is_hw, int rm)
1626{
1627 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1628 if (!(event_inj & SVM_EVTINJ_VALID)) {
1629 int type;
1630 if (is_int)
1631 type = SVM_EVTINJ_TYPE_SOFT;
1632 else
1633 type = SVM_EVTINJ_TYPE_EXEPT;
1634 event_inj = intno | type | SVM_EVTINJ_VALID;
1635 if (!rm && exeption_has_error_code(intno)) {
1636 event_inj |= SVM_EVTINJ_VALID_ERR;
1637 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1638 }
1639 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1640 }
1641}
1642#endif
1643
1644/*
1645 * Begin execution of an interruption. is_int is TRUE if coming from
1646 * the int instruction. next_eip is the EIP value AFTER the interrupt
1647 * instruction. It is only relevant if is_int is TRUE.
1648 */
1649void do_interrupt(int intno, int is_int, int error_code,
1650 target_ulong next_eip, int is_hw)
1651{
1652 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1653 if ((env->cr[0] & CR0_PE_MASK)) {
1654 static int count;
1655 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1656 count, intno, error_code, is_int,
1657 env->hflags & HF_CPL_MASK,
1658 env->segs[R_CS].selector, EIP,
1659 (int)env->segs[R_CS].base + EIP,
1660 env->segs[R_SS].selector, ESP);
1661 if (intno == 0x0e) {
1662 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1663 } else {
1664 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1665 }
1666 qemu_log("\n");
1667 log_cpu_state(env, X86_DUMP_CCOP);
1668#if 0
1669 {
1670 int i;
1671 uint8_t *ptr;
1672 qemu_log(" code=");
1673 ptr = env->segs[R_CS].base + env->eip;
1674 for(i = 0; i < 16; i++) {
1675 qemu_log(" %02x", ldub(ptr + i));
1676 }
1677 qemu_log("\n");
1678 }
1679#endif
1680 count++;
1681 }
1682 }
1683#ifdef VBOX
1684 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1685 if (is_int) {
1686 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1687 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1688 } else {
1689 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1690 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1691 }
1692 }
1693#endif
1694 if (env->cr[0] & CR0_PE_MASK) {
1695#if !defined(CONFIG_USER_ONLY)
1696 if (env->hflags & HF_SVMI_MASK)
1697 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1698#endif
1699#ifdef TARGET_X86_64
1700 if (env->hflags & HF_LMA_MASK) {
1701 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1702 } else
1703#endif
1704 {
1705#ifdef VBOX
1706 /* int xx *, v86 code and VME enabled? */
1707 if ( (env->eflags & VM_MASK)
1708 && (env->cr[4] & CR4_VME_MASK)
1709 && is_int
1710 && !is_hw
1711 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1712 )
1713 do_soft_interrupt_vme(intno, error_code, next_eip);
1714 else
1715#endif /* VBOX */
1716 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1717 }
1718 } else {
1719#if !defined(CONFIG_USER_ONLY)
1720 if (env->hflags & HF_SVMI_MASK)
1721 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1722#endif
1723 do_interrupt_real(intno, is_int, error_code, next_eip);
1724 }
1725
1726#if !defined(CONFIG_USER_ONLY)
1727 if (env->hflags & HF_SVMI_MASK) {
1728 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1729 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1730 }
1731#endif
1732}
1733
1734/* This should come from sysemu.h - if we could include it here... */
1735void qemu_system_reset_request(void);
1736
1737/*
1738 * Check nested exceptions and change to double or triple fault if
1739 * needed. It should only be called, if this is not an interrupt.
1740 * Returns the new exception number.
1741 */
1742static int check_exception(int intno, int *error_code)
1743{
1744 int first_contributory = env->old_exception == 0 ||
1745 (env->old_exception >= 10 &&
1746 env->old_exception <= 13);
1747 int second_contributory = intno == 0 ||
1748 (intno >= 10 && intno <= 13);
1749
1750 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1751 env->old_exception, intno);
1752
1753#if !defined(CONFIG_USER_ONLY)
1754 if (env->old_exception == EXCP08_DBLE) {
1755 if (env->hflags & HF_SVMI_MASK)
1756 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1757
1758 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1759
1760# ifndef VBOX
1761 qemu_system_reset_request();
1762# else
1763 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1764# endif
1765 return EXCP_HLT;
1766 }
1767#endif
1768
1769 if ((first_contributory && second_contributory)
1770 || (env->old_exception == EXCP0E_PAGE &&
1771 (second_contributory || (intno == EXCP0E_PAGE)))) {
1772 intno = EXCP08_DBLE;
1773 *error_code = 0;
1774 }
1775
1776 if (second_contributory || (intno == EXCP0E_PAGE) ||
1777 (intno == EXCP08_DBLE))
1778 env->old_exception = intno;
1779
1780 return intno;
1781}
1782
1783/*
1784 * Signal an interruption. It is executed in the main CPU loop.
1785 * is_int is TRUE if coming from the int instruction. next_eip is the
1786 * EIP value AFTER the interrupt instruction. It is only relevant if
1787 * is_int is TRUE.
1788 */
1789static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1790 int next_eip_addend)
1791{
1792#if defined(VBOX) && defined(DEBUG)
1793 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1794#endif
1795 if (!is_int) {
1796 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1797 intno = check_exception(intno, &error_code);
1798 } else {
1799 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1800 }
1801
1802 env->exception_index = intno;
1803 env->error_code = error_code;
1804 env->exception_is_int = is_int;
1805 env->exception_next_eip = env->eip + next_eip_addend;
1806 cpu_loop_exit();
1807}
1808
1809/* shortcuts to generate exceptions */
1810
1811void raise_exception_err(int exception_index, int error_code)
1812{
1813 raise_interrupt(exception_index, 0, error_code, 0);
1814}
1815
1816void raise_exception(int exception_index)
1817{
1818 raise_interrupt(exception_index, 0, 0, 0);
1819}
1820
1821void raise_exception_env(int exception_index, CPUState *nenv)
1822{
1823 env = nenv;
1824 raise_exception(exception_index);
1825}
1826/* SMM support */
1827
1828#if defined(CONFIG_USER_ONLY)
1829
1830void do_smm_enter(void)
1831{
1832}
1833
1834void helper_rsm(void)
1835{
1836}
1837
1838#else
1839
1840#ifdef TARGET_X86_64
1841#define SMM_REVISION_ID 0x00020064
1842#else
1843#define SMM_REVISION_ID 0x00020000
1844#endif
1845
1846void do_smm_enter(void)
1847{
1848 target_ulong sm_state;
1849 SegmentCache *dt;
1850 int i, offset;
1851
1852 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1853 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1854
1855 env->hflags |= HF_SMM_MASK;
1856 cpu_smm_update(env);
1857
1858 sm_state = env->smbase + 0x8000;
1859
1860#ifdef TARGET_X86_64
1861 for(i = 0; i < 6; i++) {
1862 dt = &env->segs[i];
1863 offset = 0x7e00 + i * 16;
1864 stw_phys(sm_state + offset, dt->selector);
1865 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1866 stl_phys(sm_state + offset + 4, dt->limit);
1867 stq_phys(sm_state + offset + 8, dt->base);
1868 }
1869
1870 stq_phys(sm_state + 0x7e68, env->gdt.base);
1871 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1872
1873 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1874 stq_phys(sm_state + 0x7e78, env->ldt.base);
1875 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1876 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1877
1878 stq_phys(sm_state + 0x7e88, env->idt.base);
1879 stl_phys(sm_state + 0x7e84, env->idt.limit);
1880
1881 stw_phys(sm_state + 0x7e90, env->tr.selector);
1882 stq_phys(sm_state + 0x7e98, env->tr.base);
1883 stl_phys(sm_state + 0x7e94, env->tr.limit);
1884 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1885
1886 stq_phys(sm_state + 0x7ed0, env->efer);
1887
1888 stq_phys(sm_state + 0x7ff8, EAX);
1889 stq_phys(sm_state + 0x7ff0, ECX);
1890 stq_phys(sm_state + 0x7fe8, EDX);
1891 stq_phys(sm_state + 0x7fe0, EBX);
1892 stq_phys(sm_state + 0x7fd8, ESP);
1893 stq_phys(sm_state + 0x7fd0, EBP);
1894 stq_phys(sm_state + 0x7fc8, ESI);
1895 stq_phys(sm_state + 0x7fc0, EDI);
1896 for(i = 8; i < 16; i++)
1897 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1898 stq_phys(sm_state + 0x7f78, env->eip);
1899 stl_phys(sm_state + 0x7f70, compute_eflags());
1900 stl_phys(sm_state + 0x7f68, env->dr[6]);
1901 stl_phys(sm_state + 0x7f60, env->dr[7]);
1902
1903 stl_phys(sm_state + 0x7f48, env->cr[4]);
1904 stl_phys(sm_state + 0x7f50, env->cr[3]);
1905 stl_phys(sm_state + 0x7f58, env->cr[0]);
1906
1907 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1908 stl_phys(sm_state + 0x7f00, env->smbase);
1909#else
1910 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1911 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1912 stl_phys(sm_state + 0x7ff4, compute_eflags());
1913 stl_phys(sm_state + 0x7ff0, env->eip);
1914 stl_phys(sm_state + 0x7fec, EDI);
1915 stl_phys(sm_state + 0x7fe8, ESI);
1916 stl_phys(sm_state + 0x7fe4, EBP);
1917 stl_phys(sm_state + 0x7fe0, ESP);
1918 stl_phys(sm_state + 0x7fdc, EBX);
1919 stl_phys(sm_state + 0x7fd8, EDX);
1920 stl_phys(sm_state + 0x7fd4, ECX);
1921 stl_phys(sm_state + 0x7fd0, EAX);
1922 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1923 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1924
1925 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1926 stl_phys(sm_state + 0x7f64, env->tr.base);
1927 stl_phys(sm_state + 0x7f60, env->tr.limit);
1928 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1929
1930 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1931 stl_phys(sm_state + 0x7f80, env->ldt.base);
1932 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1933 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1934
1935 stl_phys(sm_state + 0x7f74, env->gdt.base);
1936 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1937
1938 stl_phys(sm_state + 0x7f58, env->idt.base);
1939 stl_phys(sm_state + 0x7f54, env->idt.limit);
1940
1941 for(i = 0; i < 6; i++) {
1942 dt = &env->segs[i];
1943 if (i < 3)
1944 offset = 0x7f84 + i * 12;
1945 else
1946 offset = 0x7f2c + (i - 3) * 12;
1947 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1948 stl_phys(sm_state + offset + 8, dt->base);
1949 stl_phys(sm_state + offset + 4, dt->limit);
1950 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1951 }
1952 stl_phys(sm_state + 0x7f14, env->cr[4]);
1953
1954 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1955 stl_phys(sm_state + 0x7ef8, env->smbase);
1956#endif
1957 /* init SMM cpu state */
1958
1959#ifdef TARGET_X86_64
1960 cpu_load_efer(env, 0);
1961#endif
1962 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1963 env->eip = 0x00008000;
1964 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1965 0xffffffff, 0);
1966 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1967 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1968 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1969 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1970 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1971
1972 cpu_x86_update_cr0(env,
1973 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1974 cpu_x86_update_cr4(env, 0);
1975 env->dr[7] = 0x00000400;
1976 CC_OP = CC_OP_EFLAGS;
1977}
1978
1979void helper_rsm(void)
1980{
1981#ifdef VBOX
1982 cpu_abort(env, "helper_rsm");
1983#else /* !VBOX */
1984 target_ulong sm_state;
1985 int i, offset;
1986 uint32_t val;
1987
1988 sm_state = env->smbase + 0x8000;
1989#ifdef TARGET_X86_64
1990 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1991
1992 for(i = 0; i < 6; i++) {
1993 offset = 0x7e00 + i * 16;
1994 cpu_x86_load_seg_cache(env, i,
1995 lduw_phys(sm_state + offset),
1996 ldq_phys(sm_state + offset + 8),
1997 ldl_phys(sm_state + offset + 4),
1998 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1999 }
2000
2001 env->gdt.base = ldq_phys(sm_state + 0x7e68);
2002 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
2003
2004 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
2005 env->ldt.base = ldq_phys(sm_state + 0x7e78);
2006 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
2007 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
2008#ifdef VBOX
2009 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2010 env->ldt.newselector = 0;
2011#endif
2012
2013 env->idt.base = ldq_phys(sm_state + 0x7e88);
2014 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2015
2016 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2017 env->tr.base = ldq_phys(sm_state + 0x7e98);
2018 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2019 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2020#ifdef VBOX
2021 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2022 env->tr.newselector = 0;
2023#endif
2024
2025 EAX = ldq_phys(sm_state + 0x7ff8);
2026 ECX = ldq_phys(sm_state + 0x7ff0);
2027 EDX = ldq_phys(sm_state + 0x7fe8);
2028 EBX = ldq_phys(sm_state + 0x7fe0);
2029 ESP = ldq_phys(sm_state + 0x7fd8);
2030 EBP = ldq_phys(sm_state + 0x7fd0);
2031 ESI = ldq_phys(sm_state + 0x7fc8);
2032 EDI = ldq_phys(sm_state + 0x7fc0);
2033 for(i = 8; i < 16; i++)
2034 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2035 env->eip = ldq_phys(sm_state + 0x7f78);
2036 load_eflags(ldl_phys(sm_state + 0x7f70),
2037 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2038 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2039 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2040
2041 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2042 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2043 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2044
2045 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2046 if (val & 0x20000) {
2047 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2048 }
2049#else
2050 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2051 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2052 load_eflags(ldl_phys(sm_state + 0x7ff4),
2053 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2054 env->eip = ldl_phys(sm_state + 0x7ff0);
2055 EDI = ldl_phys(sm_state + 0x7fec);
2056 ESI = ldl_phys(sm_state + 0x7fe8);
2057 EBP = ldl_phys(sm_state + 0x7fe4);
2058 ESP = ldl_phys(sm_state + 0x7fe0);
2059 EBX = ldl_phys(sm_state + 0x7fdc);
2060 EDX = ldl_phys(sm_state + 0x7fd8);
2061 ECX = ldl_phys(sm_state + 0x7fd4);
2062 EAX = ldl_phys(sm_state + 0x7fd0);
2063 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2064 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2065
2066 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2067 env->tr.base = ldl_phys(sm_state + 0x7f64);
2068 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2069 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2070#ifdef VBOX
2071 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2072 env->tr.newselector = 0;
2073#endif
2074
2075 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2076 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2077 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2078 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2079#ifdef VBOX
2080 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2081 env->ldt.newselector = 0;
2082#endif
2083
2084 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2085 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2086
2087 env->idt.base = ldl_phys(sm_state + 0x7f58);
2088 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2089
2090 for(i = 0; i < 6; i++) {
2091 if (i < 3)
2092 offset = 0x7f84 + i * 12;
2093 else
2094 offset = 0x7f2c + (i - 3) * 12;
2095 cpu_x86_load_seg_cache(env, i,
2096 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2097 ldl_phys(sm_state + offset + 8),
2098 ldl_phys(sm_state + offset + 4),
2099 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2100 }
2101 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2102
2103 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2104 if (val & 0x20000) {
2105 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2106 }
2107#endif
2108 CC_OP = CC_OP_EFLAGS;
2109 env->hflags &= ~HF_SMM_MASK;
2110 cpu_smm_update(env);
2111
2112 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2113 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2114#endif /* !VBOX */
2115}
2116
2117#endif /* !CONFIG_USER_ONLY */
2118
2119
2120/* division, flags are undefined */
2121
2122void helper_divb_AL(target_ulong t0)
2123{
2124 unsigned int num, den, q, r;
2125
2126 num = (EAX & 0xffff);
2127 den = (t0 & 0xff);
2128 if (den == 0) {
2129 raise_exception(EXCP00_DIVZ);
2130 }
2131 q = (num / den);
2132 if (q > 0xff)
2133 raise_exception(EXCP00_DIVZ);
2134 q &= 0xff;
2135 r = (num % den) & 0xff;
2136 EAX = (EAX & ~0xffff) | (r << 8) | q;
2137}
2138
2139void helper_idivb_AL(target_ulong t0)
2140{
2141 int num, den, q, r;
2142
2143 num = (int16_t)EAX;
2144 den = (int8_t)t0;
2145 if (den == 0) {
2146 raise_exception(EXCP00_DIVZ);
2147 }
2148 q = (num / den);
2149 if (q != (int8_t)q)
2150 raise_exception(EXCP00_DIVZ);
2151 q &= 0xff;
2152 r = (num % den) & 0xff;
2153 EAX = (EAX & ~0xffff) | (r << 8) | q;
2154}
2155
2156void helper_divw_AX(target_ulong t0)
2157{
2158 unsigned int num, den, q, r;
2159
2160 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2161 den = (t0 & 0xffff);
2162 if (den == 0) {
2163 raise_exception(EXCP00_DIVZ);
2164 }
2165 q = (num / den);
2166 if (q > 0xffff)
2167 raise_exception(EXCP00_DIVZ);
2168 q &= 0xffff;
2169 r = (num % den) & 0xffff;
2170 EAX = (EAX & ~0xffff) | q;
2171 EDX = (EDX & ~0xffff) | r;
2172}
2173
2174void helper_idivw_AX(target_ulong t0)
2175{
2176 int num, den, q, r;
2177
2178 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2179 den = (int16_t)t0;
2180 if (den == 0) {
2181 raise_exception(EXCP00_DIVZ);
2182 }
2183 q = (num / den);
2184 if (q != (int16_t)q)
2185 raise_exception(EXCP00_DIVZ);
2186 q &= 0xffff;
2187 r = (num % den) & 0xffff;
2188 EAX = (EAX & ~0xffff) | q;
2189 EDX = (EDX & ~0xffff) | r;
2190}
2191
2192void helper_divl_EAX(target_ulong t0)
2193{
2194 unsigned int den, r;
2195 uint64_t num, q;
2196
2197 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2198 den = t0;
2199 if (den == 0) {
2200 raise_exception(EXCP00_DIVZ);
2201 }
2202 q = (num / den);
2203 r = (num % den);
2204 if (q > 0xffffffff)
2205 raise_exception(EXCP00_DIVZ);
2206 EAX = (uint32_t)q;
2207 EDX = (uint32_t)r;
2208}
2209
2210void helper_idivl_EAX(target_ulong t0)
2211{
2212 int den, r;
2213 int64_t num, q;
2214
2215 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2216 den = t0;
2217 if (den == 0) {
2218 raise_exception(EXCP00_DIVZ);
2219 }
2220 q = (num / den);
2221 r = (num % den);
2222 if (q != (int32_t)q)
2223 raise_exception(EXCP00_DIVZ);
2224 EAX = (uint32_t)q;
2225 EDX = (uint32_t)r;
2226}
2227
2228/* bcd */
2229
2230/* XXX: exception */
2231void helper_aam(int base)
2232{
2233 int al, ah;
2234 al = EAX & 0xff;
2235 ah = al / base;
2236 al = al % base;
2237 EAX = (EAX & ~0xffff) | al | (ah << 8);
2238 CC_DST = al;
2239}
2240
2241void helper_aad(int base)
2242{
2243 int al, ah;
2244 al = EAX & 0xff;
2245 ah = (EAX >> 8) & 0xff;
2246 al = ((ah * base) + al) & 0xff;
2247 EAX = (EAX & ~0xffff) | al;
2248 CC_DST = al;
2249}
2250
2251void helper_aaa(void)
2252{
2253 int icarry;
2254 int al, ah, af;
2255 int eflags;
2256
2257 eflags = helper_cc_compute_all(CC_OP);
2258 af = eflags & CC_A;
2259 al = EAX & 0xff;
2260 ah = (EAX >> 8) & 0xff;
2261
2262 icarry = (al > 0xf9);
2263 if (((al & 0x0f) > 9 ) || af) {
2264 al = (al + 6) & 0x0f;
2265 ah = (ah + 1 + icarry) & 0xff;
2266 eflags |= CC_C | CC_A;
2267 } else {
2268 eflags &= ~(CC_C | CC_A);
2269 al &= 0x0f;
2270 }
2271 EAX = (EAX & ~0xffff) | al | (ah << 8);
2272 CC_SRC = eflags;
2273}
2274
2275void helper_aas(void)
2276{
2277 int icarry;
2278 int al, ah, af;
2279 int eflags;
2280
2281 eflags = helper_cc_compute_all(CC_OP);
2282 af = eflags & CC_A;
2283 al = EAX & 0xff;
2284 ah = (EAX >> 8) & 0xff;
2285
2286 icarry = (al < 6);
2287 if (((al & 0x0f) > 9 ) || af) {
2288 al = (al - 6) & 0x0f;
2289 ah = (ah - 1 - icarry) & 0xff;
2290 eflags |= CC_C | CC_A;
2291 } else {
2292 eflags &= ~(CC_C | CC_A);
2293 al &= 0x0f;
2294 }
2295 EAX = (EAX & ~0xffff) | al | (ah << 8);
2296 CC_SRC = eflags;
2297}
2298
2299void helper_daa(void)
2300{
2301 int al, af, cf;
2302 int eflags;
2303
2304 eflags = helper_cc_compute_all(CC_OP);
2305 cf = eflags & CC_C;
2306 af = eflags & CC_A;
2307 al = EAX & 0xff;
2308
2309 eflags = 0;
2310 if (((al & 0x0f) > 9 ) || af) {
2311 al = (al + 6) & 0xff;
2312 eflags |= CC_A;
2313 }
2314 if ((al > 0x9f) || cf) {
2315 al = (al + 0x60) & 0xff;
2316 eflags |= CC_C;
2317 }
2318 EAX = (EAX & ~0xff) | al;
2319 /* well, speed is not an issue here, so we compute the flags by hand */
2320 eflags |= (al == 0) << 6; /* zf */
2321 eflags |= parity_table[al]; /* pf */
2322 eflags |= (al & 0x80); /* sf */
2323 CC_SRC = eflags;
2324}
2325
2326void helper_das(void)
2327{
2328 int al, al1, af, cf;
2329 int eflags;
2330
2331 eflags = helper_cc_compute_all(CC_OP);
2332 cf = eflags & CC_C;
2333 af = eflags & CC_A;
2334 al = EAX & 0xff;
2335
2336 eflags = 0;
2337 al1 = al;
2338 if (((al & 0x0f) > 9 ) || af) {
2339 eflags |= CC_A;
2340 if (al < 6 || cf)
2341 eflags |= CC_C;
2342 al = (al - 6) & 0xff;
2343 }
2344 if ((al1 > 0x99) || cf) {
2345 al = (al - 0x60) & 0xff;
2346 eflags |= CC_C;
2347 }
2348 EAX = (EAX & ~0xff) | al;
2349 /* well, speed is not an issue here, so we compute the flags by hand */
2350 eflags |= (al == 0) << 6; /* zf */
2351 eflags |= parity_table[al]; /* pf */
2352 eflags |= (al & 0x80); /* sf */
2353 CC_SRC = eflags;
2354}
2355
2356void helper_into(int next_eip_addend)
2357{
2358 int eflags;
2359 eflags = helper_cc_compute_all(CC_OP);
2360 if (eflags & CC_O) {
2361 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2362 }
2363}
2364
2365void helper_cmpxchg8b(target_ulong a0)
2366{
2367 uint64_t d;
2368 int eflags;
2369
2370 eflags = helper_cc_compute_all(CC_OP);
2371 d = ldq(a0);
2372 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2373 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2374 eflags |= CC_Z;
2375 } else {
2376 /* always do the store */
2377 stq(a0, d);
2378 EDX = (uint32_t)(d >> 32);
2379 EAX = (uint32_t)d;
2380 eflags &= ~CC_Z;
2381 }
2382 CC_SRC = eflags;
2383}
2384
2385#ifdef TARGET_X86_64
2386void helper_cmpxchg16b(target_ulong a0)
2387{
2388 uint64_t d0, d1;
2389 int eflags;
2390
2391 if ((a0 & 0xf) != 0)
2392 raise_exception(EXCP0D_GPF);
2393 eflags = helper_cc_compute_all(CC_OP);
2394 d0 = ldq(a0);
2395 d1 = ldq(a0 + 8);
2396 if (d0 == EAX && d1 == EDX) {
2397 stq(a0, EBX);
2398 stq(a0 + 8, ECX);
2399 eflags |= CC_Z;
2400 } else {
2401 /* always do the store */
2402 stq(a0, d0);
2403 stq(a0 + 8, d1);
2404 EDX = d1;
2405 EAX = d0;
2406 eflags &= ~CC_Z;
2407 }
2408 CC_SRC = eflags;
2409}
2410#endif
2411
2412void helper_single_step(void)
2413{
2414#ifndef CONFIG_USER_ONLY
2415 check_hw_breakpoints(env, 1);
2416 env->dr[6] |= DR6_BS;
2417#endif
2418 raise_exception(EXCP01_DB);
2419}
2420
2421void helper_cpuid(void)
2422{
2423 uint32_t eax, ebx, ecx, edx;
2424
2425 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2426
2427 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2428 EAX = eax;
2429 EBX = ebx;
2430 ECX = ecx;
2431 EDX = edx;
2432}
2433
2434void helper_enter_level(int level, int data32, target_ulong t1)
2435{
2436 target_ulong ssp;
2437 uint32_t esp_mask, esp, ebp;
2438
2439 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2440 ssp = env->segs[R_SS].base;
2441 ebp = EBP;
2442 esp = ESP;
2443 if (data32) {
2444 /* 32 bit */
2445 esp -= 4;
2446 while (--level) {
2447 esp -= 4;
2448 ebp -= 4;
2449 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2450 }
2451 esp -= 4;
2452 stl(ssp + (esp & esp_mask), t1);
2453 } else {
2454 /* 16 bit */
2455 esp -= 2;
2456 while (--level) {
2457 esp -= 2;
2458 ebp -= 2;
2459 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2460 }
2461 esp -= 2;
2462 stw(ssp + (esp & esp_mask), t1);
2463 }
2464}
2465
2466#ifdef TARGET_X86_64
2467void helper_enter64_level(int level, int data64, target_ulong t1)
2468{
2469 target_ulong esp, ebp;
2470 ebp = EBP;
2471 esp = ESP;
2472
2473 if (data64) {
2474 /* 64 bit */
2475 esp -= 8;
2476 while (--level) {
2477 esp -= 8;
2478 ebp -= 8;
2479 stq(esp, ldq(ebp));
2480 }
2481 esp -= 8;
2482 stq(esp, t1);
2483 } else {
2484 /* 16 bit */
2485 esp -= 2;
2486 while (--level) {
2487 esp -= 2;
2488 ebp -= 2;
2489 stw(esp, lduw(ebp));
2490 }
2491 esp -= 2;
2492 stw(esp, t1);
2493 }
2494}
2495#endif
2496
2497void helper_lldt(int selector)
2498{
2499 SegmentCache *dt;
2500 uint32_t e1, e2;
2501#ifndef VBOX
2502 int index, entry_limit;
2503#else
2504 unsigned int index, entry_limit;
2505#endif
2506 target_ulong ptr;
2507
2508#ifdef VBOX
2509 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2510 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2511#endif
2512
2513 selector &= 0xffff;
2514 if ((selector & 0xfffc) == 0) {
2515 /* XXX: NULL selector case: invalid LDT */
2516 env->ldt.base = 0;
2517 env->ldt.limit = 0;
2518#ifdef VBOX
2519 env->ldt.flags = DESC_INTEL_UNUSABLE;
2520 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2521 env->ldt.newselector = 0;
2522#endif
2523 } else {
2524 if (selector & 0x4)
2525 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2526 dt = &env->gdt;
2527 index = selector & ~7;
2528#ifdef TARGET_X86_64
2529 if (env->hflags & HF_LMA_MASK)
2530 entry_limit = 15;
2531 else
2532#endif
2533 entry_limit = 7;
2534 if ((index + entry_limit) > dt->limit)
2535 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2536 ptr = dt->base + index;
2537 e1 = ldl_kernel(ptr);
2538 e2 = ldl_kernel(ptr + 4);
2539 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2540 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2541 if (!(e2 & DESC_P_MASK))
2542 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2543#ifdef TARGET_X86_64
2544 if (env->hflags & HF_LMA_MASK) {
2545 uint32_t e3;
2546 e3 = ldl_kernel(ptr + 8);
2547 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2548 env->ldt.base |= (target_ulong)e3 << 32;
2549 } else
2550#endif
2551 {
2552 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2553 }
2554 }
2555 env->ldt.selector = selector;
2556#ifdef VBOX
2557 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2558 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2559#endif
2560}
2561
2562void helper_ltr(int selector)
2563{
2564 SegmentCache *dt;
2565 uint32_t e1, e2;
2566#ifndef VBOX
2567 int index, type, entry_limit;
2568#else
2569 unsigned int index;
2570 int type, entry_limit;
2571#endif
2572 target_ulong ptr;
2573
2574#ifdef VBOX
2575 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2576 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2577 env->tr.flags, (RTSEL)(selector & 0xffff)));
2578#endif
2579 selector &= 0xffff;
2580 if ((selector & 0xfffc) == 0) {
2581 /* NULL selector case: invalid TR */
2582 env->tr.base = 0;
2583 env->tr.limit = 0;
2584 env->tr.flags = 0;
2585#ifdef VBOX
2586 env->tr.flags = DESC_INTEL_UNUSABLE;
2587 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2588 env->tr.newselector = 0;
2589#endif
2590 } else {
2591 if (selector & 0x4)
2592 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2593 dt = &env->gdt;
2594 index = selector & ~7;
2595#ifdef TARGET_X86_64
2596 if (env->hflags & HF_LMA_MASK)
2597 entry_limit = 15;
2598 else
2599#endif
2600 entry_limit = 7;
2601 if ((index + entry_limit) > dt->limit)
2602 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2603 ptr = dt->base + index;
2604 e1 = ldl_kernel(ptr);
2605 e2 = ldl_kernel(ptr + 4);
2606 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2607 if ((e2 & DESC_S_MASK) ||
2608 (type != 1 && type != 9))
2609 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2610 if (!(e2 & DESC_P_MASK))
2611 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2612#ifdef TARGET_X86_64
2613 if (env->hflags & HF_LMA_MASK) {
2614 uint32_t e3, e4;
2615 e3 = ldl_kernel(ptr + 8);
2616 e4 = ldl_kernel(ptr + 12);
2617 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2618 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2619 load_seg_cache_raw_dt(&env->tr, e1, e2);
2620 env->tr.base |= (target_ulong)e3 << 32;
2621 } else
2622#endif
2623 {
2624 load_seg_cache_raw_dt(&env->tr, e1, e2);
2625 }
2626 e2 |= DESC_TSS_BUSY_MASK;
2627 stl_kernel(ptr + 4, e2);
2628 }
2629 env->tr.selector = selector;
2630#ifdef VBOX
2631 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2632 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2633 env->tr.flags, (RTSEL)(selector & 0xffff)));
2634#endif
2635}
2636
2637/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2638void helper_load_seg(int seg_reg, int selector)
2639{
2640 uint32_t e1, e2;
2641 int cpl, dpl, rpl;
2642 SegmentCache *dt;
2643#ifndef VBOX
2644 int index;
2645#else
2646 unsigned int index;
2647#endif
2648 target_ulong ptr;
2649
2650 selector &= 0xffff;
2651 cpl = env->hflags & HF_CPL_MASK;
2652#ifdef VBOX
2653
2654 /* Trying to load a selector with CPL=1? */
2655 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2656 {
2657 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2658 selector = selector & 0xfffc;
2659 }
2660#endif /* VBOX */
2661 if ((selector & 0xfffc) == 0) {
2662 /* null selector case */
2663#ifndef VBOX
2664 if (seg_reg == R_SS
2665#ifdef TARGET_X86_64
2666 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2667#endif
2668 )
2669 raise_exception_err(EXCP0D_GPF, 0);
2670 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2671#else
2672 if (seg_reg == R_SS) {
2673 if (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2674 raise_exception_err(EXCP0D_GPF, 0);
2675 e2 = (cpl << DESC_DPL_SHIFT) | DESC_INTEL_UNUSABLE;
2676 } else {
2677 e2 = DESC_INTEL_UNUSABLE;
2678 }
2679 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, e2);
2680#endif
2681 } else {
2682
2683 if (selector & 0x4)
2684 dt = &env->ldt;
2685 else
2686 dt = &env->gdt;
2687 index = selector & ~7;
2688 if ((index + 7) > dt->limit)
2689 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2690 ptr = dt->base + index;
2691 e1 = ldl_kernel(ptr);
2692 e2 = ldl_kernel(ptr + 4);
2693
2694 if (!(e2 & DESC_S_MASK))
2695 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2696 rpl = selector & 3;
2697 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2698 if (seg_reg == R_SS) {
2699 /* must be writable segment */
2700 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2701 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2702 if (rpl != cpl || dpl != cpl)
2703 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2704 } else {
2705 /* must be readable segment */
2706 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2707 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2708
2709 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2710 /* if not conforming code, test rights */
2711 if (dpl < cpl || dpl < rpl)
2712 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2713 }
2714 }
2715
2716 if (!(e2 & DESC_P_MASK)) {
2717 if (seg_reg == R_SS)
2718 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2719 else
2720 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2721 }
2722
2723 /* set the access bit if not already set */
2724 if (!(e2 & DESC_A_MASK)) {
2725 e2 |= DESC_A_MASK;
2726 stl_kernel(ptr + 4, e2);
2727 }
2728
2729 cpu_x86_load_seg_cache(env, seg_reg, selector,
2730 get_seg_base(e1, e2),
2731 get_seg_limit(e1, e2),
2732 e2);
2733#if 0
2734 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2735 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2736#endif
2737 }
2738}
2739
2740/* protected mode jump */
2741void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2742 int next_eip_addend)
2743{
2744 int gate_cs, type;
2745 uint32_t e1, e2, cpl, dpl, rpl, limit;
2746 target_ulong next_eip;
2747
2748#ifdef VBOX /** @todo Why do we do this? */
2749 e1 = e2 = 0;
2750#endif
2751 if ((new_cs & 0xfffc) == 0)
2752 raise_exception_err(EXCP0D_GPF, 0);
2753 if (load_segment(&e1, &e2, new_cs) != 0)
2754 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2755 cpl = env->hflags & HF_CPL_MASK;
2756 if (e2 & DESC_S_MASK) {
2757 if (!(e2 & DESC_CS_MASK))
2758 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2759 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2760 if (e2 & DESC_C_MASK) {
2761 /* conforming code segment */
2762 if (dpl > cpl)
2763 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2764 } else {
2765 /* non conforming code segment */
2766 rpl = new_cs & 3;
2767 if (rpl > cpl)
2768 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2769 if (dpl != cpl)
2770 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2771 }
2772 if (!(e2 & DESC_P_MASK))
2773 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2774 limit = get_seg_limit(e1, e2);
2775 if (new_eip > limit &&
2776 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2777 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2778#ifdef VBOX
2779 if (!(e2 & DESC_A_MASK))
2780 e2 = set_segment_accessed(new_cs, e2);
2781#endif
2782 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2783 get_seg_base(e1, e2), limit, e2);
2784 EIP = new_eip;
2785 } else {
2786 /* jump to call or task gate */
2787 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2788 rpl = new_cs & 3;
2789 cpl = env->hflags & HF_CPL_MASK;
2790 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2791 switch(type) {
2792 case 1: /* 286 TSS */
2793 case 9: /* 386 TSS */
2794 case 5: /* task gate */
2795 if (dpl < cpl || dpl < rpl)
2796 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2797 next_eip = env->eip + next_eip_addend;
2798 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2799 CC_OP = CC_OP_EFLAGS;
2800 break;
2801 case 4: /* 286 call gate */
2802 case 12: /* 386 call gate */
2803 if ((dpl < cpl) || (dpl < rpl))
2804 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2805 if (!(e2 & DESC_P_MASK))
2806 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2807 gate_cs = e1 >> 16;
2808 new_eip = (e1 & 0xffff);
2809 if (type == 12)
2810 new_eip |= (e2 & 0xffff0000);
2811 if (load_segment(&e1, &e2, gate_cs) != 0)
2812 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2813 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2814 /* must be code segment */
2815 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2816 (DESC_S_MASK | DESC_CS_MASK)))
2817 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2818 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2819 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2820 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2821 if (!(e2 & DESC_P_MASK))
2822#ifdef VBOX /* See page 3-514 of 253666.pdf */
2823 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2824#else
2825 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2826#endif
2827 limit = get_seg_limit(e1, e2);
2828 if (new_eip > limit)
2829 raise_exception_err(EXCP0D_GPF, 0);
2830 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2831 get_seg_base(e1, e2), limit, e2);
2832 EIP = new_eip;
2833 break;
2834 default:
2835 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2836 break;
2837 }
2838 }
2839}
2840
2841/* real mode call */
2842void helper_lcall_real(int new_cs, target_ulong new_eip1,
2843 int shift, int next_eip)
2844{
2845 int new_eip;
2846 uint32_t esp, esp_mask;
2847 target_ulong ssp;
2848
2849 new_eip = new_eip1;
2850 esp = ESP;
2851 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2852 ssp = env->segs[R_SS].base;
2853 if (shift) {
2854 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2855 PUSHL(ssp, esp, esp_mask, next_eip);
2856 } else {
2857 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2858 PUSHW(ssp, esp, esp_mask, next_eip);
2859 }
2860
2861 SET_ESP(esp, esp_mask);
2862 env->eip = new_eip;
2863 env->segs[R_CS].selector = new_cs;
2864 env->segs[R_CS].base = (new_cs << 4);
2865}
2866
2867/* protected mode call */
2868void helper_lcall_protected(int new_cs, target_ulong new_eip,
2869 int shift, int next_eip_addend)
2870{
2871 int new_stack, i;
2872 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2873 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2874 uint32_t val, limit, old_sp_mask;
2875 target_ulong ssp, old_ssp, next_eip;
2876
2877#ifdef VBOX /** @todo Why do we do this? */
2878 e1 = e2 = 0;
2879#endif
2880 next_eip = env->eip + next_eip_addend;
2881 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2882 LOG_PCALL_STATE(env);
2883 if ((new_cs & 0xfffc) == 0)
2884 raise_exception_err(EXCP0D_GPF, 0);
2885 if (load_segment(&e1, &e2, new_cs) != 0)
2886 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2887 cpl = env->hflags & HF_CPL_MASK;
2888 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2889 if (e2 & DESC_S_MASK) {
2890 if (!(e2 & DESC_CS_MASK))
2891 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2892 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2893 if (e2 & DESC_C_MASK) {
2894 /* conforming code segment */
2895 if (dpl > cpl)
2896 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2897 } else {
2898 /* non conforming code segment */
2899 rpl = new_cs & 3;
2900 if (rpl > cpl)
2901 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2902 if (dpl != cpl)
2903 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2904 }
2905 if (!(e2 & DESC_P_MASK))
2906 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2907#ifdef VBOX
2908 if (!(e2 & DESC_A_MASK))
2909 e2 = set_segment_accessed(new_cs, e2);
2910#endif
2911
2912#ifdef TARGET_X86_64
2913 /* XXX: check 16/32 bit cases in long mode */
2914 if (shift == 2) {
2915 target_ulong rsp;
2916 /* 64 bit case */
2917 rsp = ESP;
2918 PUSHQ(rsp, env->segs[R_CS].selector);
2919 PUSHQ(rsp, next_eip);
2920 /* from this point, not restartable */
2921 ESP = rsp;
2922 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2923 get_seg_base(e1, e2),
2924 get_seg_limit(e1, e2), e2);
2925 EIP = new_eip;
2926 } else
2927#endif
2928 {
2929 sp = ESP;
2930 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2931 ssp = env->segs[R_SS].base;
2932 if (shift) {
2933 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2934 PUSHL(ssp, sp, sp_mask, next_eip);
2935 } else {
2936 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2937 PUSHW(ssp, sp, sp_mask, next_eip);
2938 }
2939
2940 limit = get_seg_limit(e1, e2);
2941 if (new_eip > limit)
2942 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2943 /* from this point, not restartable */
2944 SET_ESP(sp, sp_mask);
2945 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2946 get_seg_base(e1, e2), limit, e2);
2947 EIP = new_eip;
2948 }
2949 } else {
2950 /* check gate type */
2951 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2952 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2953 rpl = new_cs & 3;
2954 switch(type) {
2955 case 1: /* available 286 TSS */
2956 case 9: /* available 386 TSS */
2957 case 5: /* task gate */
2958 if (dpl < cpl || dpl < rpl)
2959 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2960 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2961 CC_OP = CC_OP_EFLAGS;
2962 return;
2963 case 4: /* 286 call gate */
2964 case 12: /* 386 call gate */
2965 break;
2966 default:
2967 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2968 break;
2969 }
2970 shift = type >> 3;
2971
2972 if (dpl < cpl || dpl < rpl)
2973 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2974 /* check valid bit */
2975 if (!(e2 & DESC_P_MASK))
2976 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2977 selector = e1 >> 16;
2978 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2979 param_count = e2 & 0x1f;
2980 if ((selector & 0xfffc) == 0)
2981 raise_exception_err(EXCP0D_GPF, 0);
2982
2983 if (load_segment(&e1, &e2, selector) != 0)
2984 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2985 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2986 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2987 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2988 if (dpl > cpl)
2989 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2990 if (!(e2 & DESC_P_MASK))
2991 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2992
2993 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2994 /* to inner privilege */
2995 get_ss_esp_from_tss(&ss, &sp, dpl);
2996 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2997 ss, sp, param_count, ESP);
2998 if ((ss & 0xfffc) == 0)
2999 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3000 if ((ss & 3) != dpl)
3001 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3002 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3003 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3004 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3005 if (ss_dpl != dpl)
3006 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3007 if (!(ss_e2 & DESC_S_MASK) ||
3008 (ss_e2 & DESC_CS_MASK) ||
3009 !(ss_e2 & DESC_W_MASK))
3010 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3011 if (!(ss_e2 & DESC_P_MASK))
3012#ifdef VBOX /* See page 3-99 of 253666.pdf */
3013 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3014#else
3015 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3016#endif
3017
3018 // push_size = ((param_count * 2) + 8) << shift;
3019
3020 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3021 old_ssp = env->segs[R_SS].base;
3022
3023 sp_mask = get_sp_mask(ss_e2);
3024 ssp = get_seg_base(ss_e1, ss_e2);
3025 if (shift) {
3026 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3027 PUSHL(ssp, sp, sp_mask, ESP);
3028 for(i = param_count - 1; i >= 0; i--) {
3029 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3030 PUSHL(ssp, sp, sp_mask, val);
3031 }
3032 } else {
3033 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3034 PUSHW(ssp, sp, sp_mask, ESP);
3035 for(i = param_count - 1; i >= 0; i--) {
3036 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3037 PUSHW(ssp, sp, sp_mask, val);
3038 }
3039 }
3040 new_stack = 1;
3041 } else {
3042 /* to same privilege */
3043 sp = ESP;
3044 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3045 ssp = env->segs[R_SS].base;
3046 // push_size = (4 << shift);
3047 new_stack = 0;
3048 }
3049
3050 if (shift) {
3051 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3052 PUSHL(ssp, sp, sp_mask, next_eip);
3053 } else {
3054 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3055 PUSHW(ssp, sp, sp_mask, next_eip);
3056 }
3057
3058 /* from this point, not restartable */
3059
3060 if (new_stack) {
3061 ss = (ss & ~3) | dpl;
3062 cpu_x86_load_seg_cache(env, R_SS, ss,
3063 ssp,
3064 get_seg_limit(ss_e1, ss_e2),
3065 ss_e2);
3066 }
3067
3068 selector = (selector & ~3) | dpl;
3069 cpu_x86_load_seg_cache(env, R_CS, selector,
3070 get_seg_base(e1, e2),
3071 get_seg_limit(e1, e2),
3072 e2);
3073 cpu_x86_set_cpl(env, dpl);
3074 SET_ESP(sp, sp_mask);
3075 EIP = offset;
3076 }
3077}
3078
3079/* real and vm86 mode iret */
3080void helper_iret_real(int shift)
3081{
3082 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3083 target_ulong ssp;
3084 int eflags_mask;
3085#ifdef VBOX
3086 bool fVME = false;
3087
3088 remR3TrapClear(env->pVM);
3089#endif /* VBOX */
3090
3091 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3092 sp = ESP;
3093 ssp = env->segs[R_SS].base;
3094 if (shift == 1) {
3095 /* 32 bits */
3096 POPL(ssp, sp, sp_mask, new_eip);
3097 POPL(ssp, sp, sp_mask, new_cs);
3098 new_cs &= 0xffff;
3099 POPL(ssp, sp, sp_mask, new_eflags);
3100 } else {
3101 /* 16 bits */
3102 POPW(ssp, sp, sp_mask, new_eip);
3103 POPW(ssp, sp, sp_mask, new_cs);
3104 POPW(ssp, sp, sp_mask, new_eflags);
3105 }
3106#ifdef VBOX
3107 if ( (env->eflags & VM_MASK)
3108 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3109 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3110 {
3111 fVME = true;
3112 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3113 /* if TF will be set -> #GP */
3114 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3115 || (new_eflags & TF_MASK))
3116 raise_exception(EXCP0D_GPF);
3117 }
3118#endif /* VBOX */
3119 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3120 env->segs[R_CS].selector = new_cs;
3121 env->segs[R_CS].base = (new_cs << 4);
3122 env->eip = new_eip;
3123#ifdef VBOX
3124 if (fVME)
3125 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3126 else
3127#endif
3128 if (env->eflags & VM_MASK)
3129 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3130 else
3131 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3132 if (shift == 0)
3133 eflags_mask &= 0xffff;
3134 load_eflags(new_eflags, eflags_mask);
3135 env->hflags2 &= ~HF2_NMI_MASK;
3136#ifdef VBOX
3137 if (fVME)
3138 {
3139 if (new_eflags & IF_MASK)
3140 env->eflags |= VIF_MASK;
3141 else
3142 env->eflags &= ~VIF_MASK;
3143 }
3144#endif /* VBOX */
3145}
3146
3147static inline void validate_seg(int seg_reg, int cpl)
3148{
3149 int dpl;
3150 uint32_t e2;
3151
3152 /* XXX: on x86_64, we do not want to nullify FS and GS because
3153 they may still contain a valid base. I would be interested to
3154 know how a real x86_64 CPU behaves */
3155 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3156 (env->segs[seg_reg].selector & 0xfffc) == 0)
3157 return;
3158
3159 e2 = env->segs[seg_reg].flags;
3160 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3161 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3162 /* data or non conforming code segment */
3163 if (dpl < cpl) {
3164 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3165 }
3166 }
3167}
3168
3169/* protected mode iret */
3170static inline void helper_ret_protected(int shift, int is_iret, int addend)
3171{
3172 uint32_t new_cs, new_eflags, new_ss;
3173 uint32_t new_es, new_ds, new_fs, new_gs;
3174 uint32_t e1, e2, ss_e1, ss_e2;
3175 int cpl, dpl, rpl, eflags_mask, iopl;
3176 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3177
3178#ifdef VBOX /** @todo Why do we do this? */
3179 ss_e1 = ss_e2 = e1 = e2 = 0;
3180#endif
3181
3182#ifdef TARGET_X86_64
3183 if (shift == 2)
3184 sp_mask = -1;
3185 else
3186#endif
3187 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3188 sp = ESP;
3189 ssp = env->segs[R_SS].base;
3190 new_eflags = 0; /* avoid warning */
3191#ifdef TARGET_X86_64
3192 if (shift == 2) {
3193 POPQ(sp, new_eip);
3194 POPQ(sp, new_cs);
3195 new_cs &= 0xffff;
3196 if (is_iret) {
3197 POPQ(sp, new_eflags);
3198 }
3199 } else
3200#endif
3201 if (shift == 1) {
3202 /* 32 bits */
3203 POPL(ssp, sp, sp_mask, new_eip);
3204 POPL(ssp, sp, sp_mask, new_cs);
3205 new_cs &= 0xffff;
3206 if (is_iret) {
3207 POPL(ssp, sp, sp_mask, new_eflags);
3208#define LOG_GROUP LOG_GROUP_REM
3209#if defined(VBOX) && defined(DEBUG)
3210 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3211 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3212 Log(("iret: new EFLAGS %08X\n", new_eflags));
3213 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3214#endif
3215 if (new_eflags & VM_MASK)
3216 goto return_to_vm86;
3217 }
3218#ifdef VBOX
3219 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3220 {
3221 if ( !EMIsRawRing1Enabled(env->pVM)
3222 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3223 {
3224 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3225 new_cs = new_cs & 0xfffc;
3226 }
3227 else
3228 {
3229 /* Ugly assumption: assume a genuine switch to ring-1. */
3230 Log(("Genuine switch to ring-1 (iret)\n"));
3231 }
3232 }
3233 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3234 {
3235 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3236 new_cs = (new_cs & 0xfffc) | 1;
3237 }
3238#endif
3239 } else {
3240 /* 16 bits */
3241 POPW(ssp, sp, sp_mask, new_eip);
3242 POPW(ssp, sp, sp_mask, new_cs);
3243 if (is_iret)
3244 POPW(ssp, sp, sp_mask, new_eflags);
3245 }
3246 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3247 new_cs, new_eip, shift, addend);
3248 LOG_PCALL_STATE(env);
3249 if ((new_cs & 0xfffc) == 0)
3250 {
3251#if defined(VBOX) && defined(DEBUG)
3252 Log(("new_cs & 0xfffc) == 0\n"));
3253#endif
3254 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3255 }
3256 if (load_segment(&e1, &e2, new_cs) != 0)
3257 {
3258#if defined(VBOX) && defined(DEBUG)
3259 Log(("load_segment failed\n"));
3260#endif
3261 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3262 }
3263 if (!(e2 & DESC_S_MASK) ||
3264 !(e2 & DESC_CS_MASK))
3265 {
3266#if defined(VBOX) && defined(DEBUG)
3267 Log(("e2 mask %08x\n", e2));
3268#endif
3269 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3270 }
3271 cpl = env->hflags & HF_CPL_MASK;
3272 rpl = new_cs & 3;
3273 if (rpl < cpl)
3274 {
3275#if defined(VBOX) && defined(DEBUG)
3276 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3277#endif
3278 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3279 }
3280 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3281
3282 if (e2 & DESC_C_MASK) {
3283 if (dpl > rpl)
3284 {
3285#if defined(VBOX) && defined(DEBUG)
3286 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3287#endif
3288 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3289 }
3290 } else {
3291 if (dpl != rpl)
3292 {
3293#if defined(VBOX) && defined(DEBUG)
3294 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3295#endif
3296 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3297 }
3298 }
3299 if (!(e2 & DESC_P_MASK))
3300 {
3301#if defined(VBOX) && defined(DEBUG)
3302 Log(("DESC_P_MASK e2=%08x\n", e2));
3303#endif
3304 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3305 }
3306
3307 sp += addend;
3308 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3309 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3310 /* return to same privilege level */
3311#ifdef VBOX
3312 if (!(e2 & DESC_A_MASK))
3313 e2 = set_segment_accessed(new_cs, e2);
3314#endif
3315 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3316 get_seg_base(e1, e2),
3317 get_seg_limit(e1, e2),
3318 e2);
3319 } else {
3320 /* return to different privilege level */
3321#ifdef TARGET_X86_64
3322 if (shift == 2) {
3323 POPQ(sp, new_esp);
3324 POPQ(sp, new_ss);
3325 new_ss &= 0xffff;
3326 } else
3327#endif
3328 if (shift == 1) {
3329 /* 32 bits */
3330 POPL(ssp, sp, sp_mask, new_esp);
3331 POPL(ssp, sp, sp_mask, new_ss);
3332 new_ss &= 0xffff;
3333 } else {
3334 /* 16 bits */
3335 POPW(ssp, sp, sp_mask, new_esp);
3336 POPW(ssp, sp, sp_mask, new_ss);
3337 }
3338 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3339 new_ss, new_esp);
3340 if ((new_ss & 0xfffc) == 0) {
3341#ifdef TARGET_X86_64
3342 /* NULL ss is allowed in long mode if cpl != 3*/
3343# ifndef VBOX
3344 /* XXX: test CS64 ? */
3345 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3346 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3347 0, 0xffffffff,
3348 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3349 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3350 DESC_W_MASK | DESC_A_MASK);
3351 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3352 } else
3353# else /* VBOX */
3354 if ((env->hflags & HF_LMA_MASK) && rpl != 3 && (e2 & DESC_L_MASK)) {
3355 if (!(e2 & DESC_A_MASK))
3356 e2 = set_segment_accessed(new_cs, e2);
3357 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3358 0, 0xffffffff,
3359 DESC_INTEL_UNUSABLE | (rpl << DESC_DPL_SHIFT) );
3360 ss_e2 = DESC_B_MASK; /* not really used */
3361 } else
3362# endif
3363#endif
3364 {
3365#if defined(VBOX) && defined(DEBUG)
3366 Log(("NULL ss, rpl=%d\n", rpl));
3367#endif
3368 raise_exception_err(EXCP0D_GPF, 0);
3369 }
3370 } else {
3371 if ((new_ss & 3) != rpl)
3372 {
3373#if defined(VBOX) && defined(DEBUG)
3374 Log(("new_ss=%x != rpl=%d\n", new_ss, rpl));
3375#endif
3376 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3377 }
3378 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3379 {
3380#if defined(VBOX) && defined(DEBUG)
3381 Log(("new_ss=%x load error\n", new_ss));
3382#endif
3383 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3384 }
3385 if (!(ss_e2 & DESC_S_MASK) ||
3386 (ss_e2 & DESC_CS_MASK) ||
3387 !(ss_e2 & DESC_W_MASK))
3388 {
3389#if defined(VBOX) && defined(DEBUG)
3390 Log(("new_ss=%x ss_e2=%#x bad type\n", new_ss, ss_e2));
3391#endif
3392 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3393 }
3394 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3395 if (dpl != rpl)
3396 {
3397#if defined(VBOX) && defined(DEBUG)
3398 Log(("SS.dpl=%u != rpl=%u\n", dpl, rpl));
3399#endif
3400 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3401 }
3402 if (!(ss_e2 & DESC_P_MASK))
3403 {
3404#if defined(VBOX) && defined(DEBUG)
3405 Log(("new_ss=%#x #NP\n", new_ss));
3406#endif
3407 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3408 }
3409#ifdef VBOX
3410 if (!(e2 & DESC_A_MASK))
3411 e2 = set_segment_accessed(new_cs, e2);
3412 if (!(ss_e2 & DESC_A_MASK))
3413 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3414#endif
3415 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3416 get_seg_base(ss_e1, ss_e2),
3417 get_seg_limit(ss_e1, ss_e2),
3418 ss_e2);
3419 }
3420
3421 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3422 get_seg_base(e1, e2),
3423 get_seg_limit(e1, e2),
3424 e2);
3425 cpu_x86_set_cpl(env, rpl);
3426 sp = new_esp;
3427#ifdef TARGET_X86_64
3428 if (env->hflags & HF_CS64_MASK)
3429 sp_mask = -1;
3430 else
3431#endif
3432 sp_mask = get_sp_mask(ss_e2);
3433
3434 /* validate data segments */
3435 validate_seg(R_ES, rpl);
3436 validate_seg(R_DS, rpl);
3437 validate_seg(R_FS, rpl);
3438 validate_seg(R_GS, rpl);
3439
3440 sp += addend;
3441 }
3442 SET_ESP(sp, sp_mask);
3443 env->eip = new_eip;
3444 if (is_iret) {
3445 /* NOTE: 'cpl' is the _old_ CPL */
3446 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3447 if (cpl == 0)
3448#ifdef VBOX
3449 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3450#else
3451 eflags_mask |= IOPL_MASK;
3452#endif
3453 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3454 if (cpl <= iopl)
3455 eflags_mask |= IF_MASK;
3456 if (shift == 0)
3457 eflags_mask &= 0xffff;
3458 load_eflags(new_eflags, eflags_mask);
3459 }
3460 return;
3461
3462 return_to_vm86:
3463 POPL(ssp, sp, sp_mask, new_esp);
3464 POPL(ssp, sp, sp_mask, new_ss);
3465 POPL(ssp, sp, sp_mask, new_es);
3466 POPL(ssp, sp, sp_mask, new_ds);
3467 POPL(ssp, sp, sp_mask, new_fs);
3468 POPL(ssp, sp, sp_mask, new_gs);
3469
3470 /* modify processor state */
3471 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3472 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3473 load_seg_vm(R_CS, new_cs & 0xffff);
3474 cpu_x86_set_cpl(env, 3);
3475 load_seg_vm(R_SS, new_ss & 0xffff);
3476 load_seg_vm(R_ES, new_es & 0xffff);
3477 load_seg_vm(R_DS, new_ds & 0xffff);
3478 load_seg_vm(R_FS, new_fs & 0xffff);
3479 load_seg_vm(R_GS, new_gs & 0xffff);
3480
3481 env->eip = new_eip & 0xffff;
3482 ESP = new_esp;
3483}
3484
3485void helper_iret_protected(int shift, int next_eip)
3486{
3487 int tss_selector, type;
3488 uint32_t e1, e2;
3489
3490#ifdef VBOX
3491 Log(("iret (shift=%d new_eip=%#x)\n", shift, next_eip));
3492 e1 = e2 = 0; /** @todo Why do we do this? */
3493 remR3TrapClear(env->pVM);
3494#endif
3495
3496 /* specific case for TSS */
3497 if (env->eflags & NT_MASK) {
3498#ifdef TARGET_X86_64
3499 if (env->hflags & HF_LMA_MASK)
3500 {
3501#if defined(VBOX) && defined(DEBUG)
3502 Log(("eflags.NT=1 on iret in long mode\n"));
3503#endif
3504 raise_exception_err(EXCP0D_GPF, 0);
3505 }
3506#endif
3507 tss_selector = lduw_kernel(env->tr.base + 0);
3508 if (tss_selector & 4)
3509 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3510 if (load_segment(&e1, &e2, tss_selector) != 0)
3511 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3512 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3513 /* NOTE: we check both segment and busy TSS */
3514 if (type != 3)
3515 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3516 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3517 } else {
3518 helper_ret_protected(shift, 1, 0);
3519 }
3520 env->hflags2 &= ~HF2_NMI_MASK;
3521}
3522
3523void helper_lret_protected(int shift, int addend)
3524{
3525 helper_ret_protected(shift, 0, addend);
3526}
3527
3528void helper_sysenter(void)
3529{
3530 if (env->sysenter_cs == 0) {
3531 raise_exception_err(EXCP0D_GPF, 0);
3532 }
3533 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3534 cpu_x86_set_cpl(env, 0);
3535
3536#ifdef TARGET_X86_64
3537 if (env->hflags & HF_LMA_MASK) {
3538 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3539 0, 0xffffffff,
3540 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3541 DESC_S_MASK |
3542 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3543 } else
3544#endif
3545 {
3546 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3547 0, 0xffffffff,
3548 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3549 DESC_S_MASK |
3550 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3551 }
3552 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3553 0, 0xffffffff,
3554 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3555 DESC_S_MASK |
3556 DESC_W_MASK | DESC_A_MASK);
3557 ESP = env->sysenter_esp;
3558 EIP = env->sysenter_eip;
3559}
3560
3561void helper_sysexit(int dflag)
3562{
3563 int cpl;
3564
3565 cpl = env->hflags & HF_CPL_MASK;
3566 if (env->sysenter_cs == 0 || cpl != 0) {
3567 raise_exception_err(EXCP0D_GPF, 0);
3568 }
3569 cpu_x86_set_cpl(env, 3);
3570#ifdef TARGET_X86_64
3571 if (dflag == 2) {
3572 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3573 0, 0xffffffff,
3574 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3575 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3576 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3577 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3578 0, 0xffffffff,
3579 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3580 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3581 DESC_W_MASK | DESC_A_MASK);
3582 } else
3583#endif
3584 {
3585 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3586 0, 0xffffffff,
3587 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3588 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3589 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3590 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3591 0, 0xffffffff,
3592 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3593 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3594 DESC_W_MASK | DESC_A_MASK);
3595 }
3596 ESP = ECX;
3597 EIP = EDX;
3598}
3599
3600#if defined(CONFIG_USER_ONLY)
3601target_ulong helper_read_crN(int reg)
3602{
3603 return 0;
3604}
3605
3606void helper_write_crN(int reg, target_ulong t0)
3607{
3608}
3609
3610void helper_movl_drN_T0(int reg, target_ulong t0)
3611{
3612}
3613#else
3614target_ulong helper_read_crN(int reg)
3615{
3616 target_ulong val;
3617
3618 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3619 switch(reg) {
3620 default:
3621 val = env->cr[reg];
3622 break;
3623 case 8:
3624 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3625#ifndef VBOX
3626 val = cpu_get_apic_tpr(env->apic_state);
3627#else /* VBOX */
3628 val = cpu_get_apic_tpr(env);
3629#endif /* VBOX */
3630 } else {
3631 val = env->v_tpr;
3632 }
3633 break;
3634 }
3635 return val;
3636}
3637
3638void helper_write_crN(int reg, target_ulong t0)
3639{
3640 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3641 switch(reg) {
3642 case 0:
3643 cpu_x86_update_cr0(env, t0);
3644 break;
3645 case 3:
3646 cpu_x86_update_cr3(env, t0);
3647 break;
3648 case 4:
3649 cpu_x86_update_cr4(env, t0);
3650 break;
3651 case 8:
3652 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3653#ifndef VBOX
3654 cpu_set_apic_tpr(env->apic_state, t0);
3655#else /* VBOX */
3656 cpu_set_apic_tpr(env, t0);
3657#endif /* VBOX */
3658 }
3659 env->v_tpr = t0 & 0x0f;
3660 break;
3661 default:
3662 env->cr[reg] = t0;
3663 break;
3664 }
3665}
3666
3667void helper_movl_drN_T0(int reg, target_ulong t0)
3668{
3669 int i;
3670
3671 if (reg < 4) {
3672 hw_breakpoint_remove(env, reg);
3673 env->dr[reg] = t0;
3674 hw_breakpoint_insert(env, reg);
3675# ifndef VBOX
3676 } else if (reg == 7) {
3677# else
3678 } else if (reg == 7 || reg == 5) { /* (DR5 is an alias for DR7.) */
3679 if (t0 & X86_DR7_MBZ_MASK)
3680 raise_exception_err(EXCP0D_GPF, 0);
3681 t0 |= X86_DR7_RA1_MASK;
3682 t0 &= ~X86_DR7_RAZ_MASK;
3683# endif
3684 for (i = 0; i < 4; i++)
3685 hw_breakpoint_remove(env, i);
3686 env->dr[7] = t0;
3687 for (i = 0; i < 4; i++)
3688 hw_breakpoint_insert(env, i);
3689 } else {
3690# ifndef VBOX
3691 env->dr[reg] = t0;
3692# else
3693 if (t0 & X86_DR6_MBZ_MASK)
3694 raise_exception_err(EXCP0D_GPF, 0);
3695 t0 |= X86_DR6_RA1_MASK;
3696 t0 &= ~X86_DR6_RAZ_MASK;
3697 env->dr[6] = t0; /* (DR4 is an alias for DR6.) */
3698# endif
3699 }
3700}
3701#endif
3702
3703void helper_lmsw(target_ulong t0)
3704{
3705 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3706 if already set to one. */
3707 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3708 helper_write_crN(0, t0);
3709}
3710
3711void helper_clts(void)
3712{
3713 env->cr[0] &= ~CR0_TS_MASK;
3714 env->hflags &= ~HF_TS_MASK;
3715}
3716
3717void helper_invlpg(target_ulong addr)
3718{
3719 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3720 tlb_flush_page(env, addr);
3721}
3722
3723void helper_rdtsc(void)
3724{
3725 uint64_t val;
3726
3727 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3728 raise_exception(EXCP0D_GPF);
3729 }
3730 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3731
3732 val = cpu_get_tsc(env) + env->tsc_offset;
3733 EAX = (uint32_t)(val);
3734 EDX = (uint32_t)(val >> 32);
3735}
3736
3737void helper_rdtscp(void)
3738{
3739 helper_rdtsc();
3740#ifndef VBOX
3741 ECX = (uint32_t)(env->tsc_aux);
3742#else /* VBOX */
3743 uint64_t val;
3744 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3745 ECX = (uint32_t)(val);
3746 else
3747 ECX = 0;
3748#endif /* VBOX */
3749}
3750
3751void helper_rdpmc(void)
3752{
3753#ifdef VBOX
3754 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3755 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3756 raise_exception(EXCP0D_GPF);
3757 }
3758 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3759 EAX = 0;
3760 EDX = 0;
3761#else /* !VBOX */
3762 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3763 raise_exception(EXCP0D_GPF);
3764 }
3765 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3766
3767 /* currently unimplemented */
3768 raise_exception_err(EXCP06_ILLOP, 0);
3769#endif /* !VBOX */
3770}
3771
3772#if defined(CONFIG_USER_ONLY)
3773void helper_wrmsr(void)
3774{
3775}
3776
3777void helper_rdmsr(void)
3778{
3779}
3780#else
3781void helper_wrmsr(void)
3782{
3783 uint64_t val;
3784
3785 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3786
3787 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3788
3789 switch((uint32_t)ECX) {
3790 case MSR_IA32_SYSENTER_CS:
3791 env->sysenter_cs = val & 0xffff;
3792 break;
3793 case MSR_IA32_SYSENTER_ESP:
3794 env->sysenter_esp = val;
3795 break;
3796 case MSR_IA32_SYSENTER_EIP:
3797 env->sysenter_eip = val;
3798 break;
3799 case MSR_IA32_APICBASE:
3800# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3801 cpu_set_apic_base(env->apic_state, val);
3802# endif
3803 break;
3804 case MSR_EFER:
3805 {
3806 uint64_t update_mask;
3807 update_mask = 0;
3808 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3809 update_mask |= MSR_EFER_SCE;
3810 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3811 update_mask |= MSR_EFER_LME;
3812 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3813 update_mask |= MSR_EFER_FFXSR;
3814 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3815 update_mask |= MSR_EFER_NXE;
3816 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3817 update_mask |= MSR_EFER_SVME;
3818 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3819 update_mask |= MSR_EFER_FFXSR;
3820 cpu_load_efer(env, (env->efer & ~update_mask) |
3821 (val & update_mask));
3822 }
3823 break;
3824 case MSR_STAR:
3825 env->star = val;
3826 break;
3827 case MSR_PAT:
3828 env->pat = val;
3829 break;
3830 case MSR_VM_HSAVE_PA:
3831 env->vm_hsave = val;
3832 break;
3833#ifdef TARGET_X86_64
3834 case MSR_LSTAR:
3835 env->lstar = val;
3836 break;
3837 case MSR_CSTAR:
3838 env->cstar = val;
3839 break;
3840 case MSR_FMASK:
3841 env->fmask = val;
3842 break;
3843 case MSR_FSBASE:
3844 env->segs[R_FS].base = val;
3845 break;
3846 case MSR_GSBASE:
3847 env->segs[R_GS].base = val;
3848 break;
3849 case MSR_KERNELGSBASE:
3850 env->kernelgsbase = val;
3851 break;
3852#endif
3853# ifndef VBOX
3854 case MSR_MTRRphysBase(0):
3855 case MSR_MTRRphysBase(1):
3856 case MSR_MTRRphysBase(2):
3857 case MSR_MTRRphysBase(3):
3858 case MSR_MTRRphysBase(4):
3859 case MSR_MTRRphysBase(5):
3860 case MSR_MTRRphysBase(6):
3861 case MSR_MTRRphysBase(7):
3862 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3863 break;
3864 case MSR_MTRRphysMask(0):
3865 case MSR_MTRRphysMask(1):
3866 case MSR_MTRRphysMask(2):
3867 case MSR_MTRRphysMask(3):
3868 case MSR_MTRRphysMask(4):
3869 case MSR_MTRRphysMask(5):
3870 case MSR_MTRRphysMask(6):
3871 case MSR_MTRRphysMask(7):
3872 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3873 break;
3874 case MSR_MTRRfix64K_00000:
3875 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3876 break;
3877 case MSR_MTRRfix16K_80000:
3878 case MSR_MTRRfix16K_A0000:
3879 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3880 break;
3881 case MSR_MTRRfix4K_C0000:
3882 case MSR_MTRRfix4K_C8000:
3883 case MSR_MTRRfix4K_D0000:
3884 case MSR_MTRRfix4K_D8000:
3885 case MSR_MTRRfix4K_E0000:
3886 case MSR_MTRRfix4K_E8000:
3887 case MSR_MTRRfix4K_F0000:
3888 case MSR_MTRRfix4K_F8000:
3889 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3890 break;
3891 case MSR_MTRRdefType:
3892 env->mtrr_deftype = val;
3893 break;
3894 case MSR_MCG_STATUS:
3895 env->mcg_status = val;
3896 break;
3897 case MSR_MCG_CTL:
3898 if ((env->mcg_cap & MCG_CTL_P)
3899 && (val == 0 || val == ~(uint64_t)0))
3900 env->mcg_ctl = val;
3901 break;
3902 case MSR_TSC_AUX:
3903 env->tsc_aux = val;
3904 break;
3905# endif /* !VBOX */
3906 default:
3907# ifndef VBOX
3908 if ((uint32_t)ECX >= MSR_MC0_CTL
3909 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3910 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3911 if ((offset & 0x3) != 0
3912 || (val == 0 || val == ~(uint64_t)0))
3913 env->mce_banks[offset] = val;
3914 break;
3915 }
3916 /* XXX: exception ? */
3917# endif
3918 break;
3919 }
3920
3921# ifdef VBOX
3922 /* call CPUM. */
3923 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3924 {
3925 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3926 }
3927# endif
3928}
3929
3930void helper_rdmsr(void)
3931{
3932 uint64_t val;
3933
3934 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3935
3936 switch((uint32_t)ECX) {
3937 case MSR_IA32_SYSENTER_CS:
3938 val = env->sysenter_cs;
3939 break;
3940 case MSR_IA32_SYSENTER_ESP:
3941 val = env->sysenter_esp;
3942 break;
3943 case MSR_IA32_SYSENTER_EIP:
3944 val = env->sysenter_eip;
3945 break;
3946 case MSR_IA32_APICBASE:
3947#ifndef VBOX
3948 val = cpu_get_apic_base(env->apic_state);
3949#else /* VBOX */
3950 val = cpu_get_apic_base(env);
3951#endif /* VBOX */
3952 break;
3953 case MSR_EFER:
3954 val = env->efer;
3955 break;
3956 case MSR_STAR:
3957 val = env->star;
3958 break;
3959 case MSR_PAT:
3960 val = env->pat;
3961 break;
3962 case MSR_VM_HSAVE_PA:
3963 val = env->vm_hsave;
3964 break;
3965# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3966 case MSR_IA32_PERF_STATUS:
3967 /* tsc_increment_by_tick */
3968 val = 1000ULL;
3969 /* CPU multiplier */
3970 val |= (((uint64_t)4ULL) << 40);
3971 break;
3972# endif /* !VBOX */
3973#ifdef TARGET_X86_64
3974 case MSR_LSTAR:
3975 val = env->lstar;
3976 break;
3977 case MSR_CSTAR:
3978 val = env->cstar;
3979 break;
3980 case MSR_FMASK:
3981 val = env->fmask;
3982 break;
3983 case MSR_FSBASE:
3984 val = env->segs[R_FS].base;
3985 break;
3986 case MSR_GSBASE:
3987 val = env->segs[R_GS].base;
3988 break;
3989 case MSR_KERNELGSBASE:
3990 val = env->kernelgsbase;
3991 break;
3992# ifndef VBOX
3993 case MSR_TSC_AUX:
3994 val = env->tsc_aux;
3995 break;
3996# endif /*!VBOX*/
3997#endif
3998# ifndef VBOX
3999 case MSR_MTRRphysBase(0):
4000 case MSR_MTRRphysBase(1):
4001 case MSR_MTRRphysBase(2):
4002 case MSR_MTRRphysBase(3):
4003 case MSR_MTRRphysBase(4):
4004 case MSR_MTRRphysBase(5):
4005 case MSR_MTRRphysBase(6):
4006 case MSR_MTRRphysBase(7):
4007 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
4008 break;
4009 case MSR_MTRRphysMask(0):
4010 case MSR_MTRRphysMask(1):
4011 case MSR_MTRRphysMask(2):
4012 case MSR_MTRRphysMask(3):
4013 case MSR_MTRRphysMask(4):
4014 case MSR_MTRRphysMask(5):
4015 case MSR_MTRRphysMask(6):
4016 case MSR_MTRRphysMask(7):
4017 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
4018 break;
4019 case MSR_MTRRfix64K_00000:
4020 val = env->mtrr_fixed[0];
4021 break;
4022 case MSR_MTRRfix16K_80000:
4023 case MSR_MTRRfix16K_A0000:
4024 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
4025 break;
4026 case MSR_MTRRfix4K_C0000:
4027 case MSR_MTRRfix4K_C8000:
4028 case MSR_MTRRfix4K_D0000:
4029 case MSR_MTRRfix4K_D8000:
4030 case MSR_MTRRfix4K_E0000:
4031 case MSR_MTRRfix4K_E8000:
4032 case MSR_MTRRfix4K_F0000:
4033 case MSR_MTRRfix4K_F8000:
4034 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
4035 break;
4036 case MSR_MTRRdefType:
4037 val = env->mtrr_deftype;
4038 break;
4039 case MSR_MTRRcap:
4040 if (env->cpuid_features & CPUID_MTRR)
4041 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
4042 else
4043 /* XXX: exception ? */
4044 val = 0;
4045 break;
4046 case MSR_MCG_CAP:
4047 val = env->mcg_cap;
4048 break;
4049 case MSR_MCG_CTL:
4050 if (env->mcg_cap & MCG_CTL_P)
4051 val = env->mcg_ctl;
4052 else
4053 val = 0;
4054 break;
4055 case MSR_MCG_STATUS:
4056 val = env->mcg_status;
4057 break;
4058# endif /* !VBOX */
4059 default:
4060# ifndef VBOX
4061 if ((uint32_t)ECX >= MSR_MC0_CTL
4062 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
4063 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
4064 val = env->mce_banks[offset];
4065 break;
4066 }
4067 /* XXX: exception ? */
4068 val = 0;
4069# else /* VBOX */
4070 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4071 {
4072 /** @todo be a brave man and raise a \#GP(0) here as we should... */
4073 val = 0;
4074 }
4075# endif /* VBOX */
4076 break;
4077 }
4078 EAX = (uint32_t)(val);
4079 EDX = (uint32_t)(val >> 32);
4080
4081# ifdef VBOX_STRICT
4082 if ((uint32_t)ECX != MSR_IA32_TSC) {
4083 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4084 val = 0;
4085 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
4086 }
4087# endif
4088}
4089#endif
4090
4091target_ulong helper_lsl(target_ulong selector1)
4092{
4093 unsigned int limit;
4094 uint32_t e1, e2, eflags, selector;
4095 int rpl, dpl, cpl, type;
4096
4097 selector = selector1 & 0xffff;
4098 eflags = helper_cc_compute_all(CC_OP);
4099 if ((selector & 0xfffc) == 0)
4100 goto fail;
4101 if (load_segment(&e1, &e2, selector) != 0)
4102 goto fail;
4103 rpl = selector & 3;
4104 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4105 cpl = env->hflags & HF_CPL_MASK;
4106 if (e2 & DESC_S_MASK) {
4107 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4108 /* conforming */
4109 } else {
4110 if (dpl < cpl || dpl < rpl)
4111 goto fail;
4112 }
4113 } else {
4114 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4115 switch(type) {
4116 case 1:
4117 case 2:
4118 case 3:
4119 case 9:
4120 case 11:
4121 break;
4122 default:
4123 goto fail;
4124 }
4125 if (dpl < cpl || dpl < rpl) {
4126 fail:
4127 CC_SRC = eflags & ~CC_Z;
4128 return 0;
4129 }
4130 }
4131 limit = get_seg_limit(e1, e2);
4132 CC_SRC = eflags | CC_Z;
4133 return limit;
4134}
4135
4136target_ulong helper_lar(target_ulong selector1)
4137{
4138 uint32_t e1, e2, eflags, selector;
4139 int rpl, dpl, cpl, type;
4140
4141 selector = selector1 & 0xffff;
4142 eflags = helper_cc_compute_all(CC_OP);
4143 if ((selector & 0xfffc) == 0)
4144 goto fail;
4145 if (load_segment(&e1, &e2, selector) != 0)
4146 goto fail;
4147 rpl = selector & 3;
4148 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4149 cpl = env->hflags & HF_CPL_MASK;
4150 if (e2 & DESC_S_MASK) {
4151 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4152 /* conforming */
4153 } else {
4154 if (dpl < cpl || dpl < rpl)
4155 goto fail;
4156 }
4157 } else {
4158 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4159 switch(type) {
4160 case 1:
4161 case 2:
4162 case 3:
4163 case 4:
4164 case 5:
4165 case 9:
4166 case 11:
4167 case 12:
4168 break;
4169 default:
4170 goto fail;
4171 }
4172 if (dpl < cpl || dpl < rpl) {
4173 fail:
4174 CC_SRC = eflags & ~CC_Z;
4175 return 0;
4176 }
4177 }
4178 CC_SRC = eflags | CC_Z;
4179#ifdef VBOX /* AMD says 0x00ffff00, while intel says 0x00fxff00. Bochs and IEM does like AMD says (x=f). */
4180 return e2 & 0x00ffff00;
4181#else
4182 return e2 & 0x00f0ff00;
4183#endif
4184}
4185
4186void helper_verr(target_ulong selector1)
4187{
4188 uint32_t e1, e2, eflags, selector;
4189 int rpl, dpl, cpl;
4190
4191 selector = selector1 & 0xffff;
4192 eflags = helper_cc_compute_all(CC_OP);
4193 if ((selector & 0xfffc) == 0)
4194 goto fail;
4195 if (load_segment(&e1, &e2, selector) != 0)
4196 goto fail;
4197 if (!(e2 & DESC_S_MASK))
4198 goto fail;
4199 rpl = selector & 3;
4200 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4201 cpl = env->hflags & HF_CPL_MASK;
4202 if (e2 & DESC_CS_MASK) {
4203 if (!(e2 & DESC_R_MASK))
4204 goto fail;
4205 if (!(e2 & DESC_C_MASK)) {
4206 if (dpl < cpl || dpl < rpl)
4207 goto fail;
4208 }
4209 } else {
4210 if (dpl < cpl || dpl < rpl) {
4211 fail:
4212 CC_SRC = eflags & ~CC_Z;
4213 return;
4214 }
4215 }
4216 CC_SRC = eflags | CC_Z;
4217}
4218
4219void helper_verw(target_ulong selector1)
4220{
4221 uint32_t e1, e2, eflags, selector;
4222 int rpl, dpl, cpl;
4223
4224 selector = selector1 & 0xffff;
4225 eflags = helper_cc_compute_all(CC_OP);
4226 if ((selector & 0xfffc) == 0)
4227 goto fail;
4228 if (load_segment(&e1, &e2, selector) != 0)
4229 goto fail;
4230 if (!(e2 & DESC_S_MASK))
4231 goto fail;
4232 rpl = selector & 3;
4233 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4234 cpl = env->hflags & HF_CPL_MASK;
4235 if (e2 & DESC_CS_MASK) {
4236 goto fail;
4237 } else {
4238 if (dpl < cpl || dpl < rpl)
4239 goto fail;
4240 if (!(e2 & DESC_W_MASK)) {
4241 fail:
4242 CC_SRC = eflags & ~CC_Z;
4243 return;
4244 }
4245 }
4246 CC_SRC = eflags | CC_Z;
4247}
4248
4249/* x87 FPU helpers */
4250
4251static void fpu_set_exception(int mask)
4252{
4253 env->fpus |= mask;
4254 if (env->fpus & (~env->fpuc & FPUC_EM))
4255 env->fpus |= FPUS_SE | FPUS_B;
4256}
4257
4258static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4259{
4260 if (b == 0.0)
4261 fpu_set_exception(FPUS_ZE);
4262 return a / b;
4263}
4264
4265static void fpu_raise_exception(void)
4266{
4267 if (env->cr[0] & CR0_NE_MASK) {
4268 raise_exception(EXCP10_COPR);
4269 }
4270#if !defined(CONFIG_USER_ONLY)
4271 else {
4272 cpu_set_ferr(env);
4273 }
4274#endif
4275}
4276
4277void helper_flds_FT0(uint32_t val)
4278{
4279 union {
4280 float32 f;
4281 uint32_t i;
4282 } u;
4283 u.i = val;
4284 FT0 = float32_to_floatx(u.f, &env->fp_status);
4285}
4286
4287void helper_fldl_FT0(uint64_t val)
4288{
4289 union {
4290 float64 f;
4291 uint64_t i;
4292 } u;
4293 u.i = val;
4294 FT0 = float64_to_floatx(u.f, &env->fp_status);
4295}
4296
4297void helper_fildl_FT0(int32_t val)
4298{
4299 FT0 = int32_to_floatx(val, &env->fp_status);
4300}
4301
4302void helper_flds_ST0(uint32_t val)
4303{
4304 int new_fpstt;
4305 union {
4306 float32 f;
4307 uint32_t i;
4308 } u;
4309 new_fpstt = (env->fpstt - 1) & 7;
4310 u.i = val;
4311 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4312 env->fpstt = new_fpstt;
4313 env->fptags[new_fpstt] = 0; /* validate stack entry */
4314}
4315
4316void helper_fldl_ST0(uint64_t val)
4317{
4318 int new_fpstt;
4319 union {
4320 float64 f;
4321 uint64_t i;
4322 } u;
4323 new_fpstt = (env->fpstt - 1) & 7;
4324 u.i = val;
4325 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4326 env->fpstt = new_fpstt;
4327 env->fptags[new_fpstt] = 0; /* validate stack entry */
4328}
4329
4330void helper_fildl_ST0(int32_t val)
4331{
4332 int new_fpstt;
4333 new_fpstt = (env->fpstt - 1) & 7;
4334 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4335 env->fpstt = new_fpstt;
4336 env->fptags[new_fpstt] = 0; /* validate stack entry */
4337}
4338
4339void helper_fildll_ST0(int64_t val)
4340{
4341 int new_fpstt;
4342 new_fpstt = (env->fpstt - 1) & 7;
4343 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4344 env->fpstt = new_fpstt;
4345 env->fptags[new_fpstt] = 0; /* validate stack entry */
4346}
4347
4348#ifndef VBOX
4349uint32_t helper_fsts_ST0(void)
4350#else
4351RTCCUINTREG helper_fsts_ST0(void)
4352#endif
4353{
4354 union {
4355 float32 f;
4356 uint32_t i;
4357 } u;
4358 u.f = floatx_to_float32(ST0, &env->fp_status);
4359 return u.i;
4360}
4361
4362uint64_t helper_fstl_ST0(void)
4363{
4364 union {
4365 float64 f;
4366 uint64_t i;
4367 } u;
4368 u.f = floatx_to_float64(ST0, &env->fp_status);
4369 return u.i;
4370}
4371
4372#ifndef VBOX
4373int32_t helper_fist_ST0(void)
4374#else
4375RTCCINTREG helper_fist_ST0(void)
4376#endif
4377{
4378 int32_t val;
4379 val = floatx_to_int32(ST0, &env->fp_status);
4380 if (val != (int16_t)val)
4381 val = -32768;
4382 return val;
4383}
4384
4385#ifndef VBOX
4386int32_t helper_fistl_ST0(void)
4387#else
4388RTCCINTREG helper_fistl_ST0(void)
4389#endif
4390{
4391 int32_t val;
4392 val = floatx_to_int32(ST0, &env->fp_status);
4393 return val;
4394}
4395
4396int64_t helper_fistll_ST0(void)
4397{
4398 int64_t val;
4399 val = floatx_to_int64(ST0, &env->fp_status);
4400 return val;
4401}
4402
4403#ifndef VBOX
4404int32_t helper_fistt_ST0(void)
4405#else
4406RTCCINTREG helper_fistt_ST0(void)
4407#endif
4408{
4409 int32_t val;
4410 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4411 if (val != (int16_t)val)
4412 val = -32768;
4413 return val;
4414}
4415
4416#ifndef VBOX
4417int32_t helper_fisttl_ST0(void)
4418#else
4419RTCCINTREG helper_fisttl_ST0(void)
4420#endif
4421{
4422 int32_t val;
4423 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4424 return val;
4425}
4426
4427int64_t helper_fisttll_ST0(void)
4428{
4429 int64_t val;
4430 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4431 return val;
4432}
4433
4434void helper_fldt_ST0(target_ulong ptr)
4435{
4436 int new_fpstt;
4437 new_fpstt = (env->fpstt - 1) & 7;
4438 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4439 env->fpstt = new_fpstt;
4440 env->fptags[new_fpstt] = 0; /* validate stack entry */
4441}
4442
4443void helper_fstt_ST0(target_ulong ptr)
4444{
4445 helper_fstt(ST0, ptr);
4446}
4447
4448void helper_fpush(void)
4449{
4450 fpush();
4451}
4452
4453void helper_fpop(void)
4454{
4455 fpop();
4456}
4457
4458void helper_fdecstp(void)
4459{
4460 env->fpstt = (env->fpstt - 1) & 7;
4461 env->fpus &= (~0x4700);
4462}
4463
4464void helper_fincstp(void)
4465{
4466 env->fpstt = (env->fpstt + 1) & 7;
4467 env->fpus &= (~0x4700);
4468}
4469
4470/* FPU move */
4471
4472void helper_ffree_STN(int st_index)
4473{
4474 env->fptags[(env->fpstt + st_index) & 7] = 1;
4475}
4476
4477void helper_fmov_ST0_FT0(void)
4478{
4479 ST0 = FT0;
4480}
4481
4482void helper_fmov_FT0_STN(int st_index)
4483{
4484 FT0 = ST(st_index);
4485}
4486
4487void helper_fmov_ST0_STN(int st_index)
4488{
4489 ST0 = ST(st_index);
4490}
4491
4492void helper_fmov_STN_ST0(int st_index)
4493{
4494 ST(st_index) = ST0;
4495}
4496
4497void helper_fxchg_ST0_STN(int st_index)
4498{
4499 CPU86_LDouble tmp;
4500 tmp = ST(st_index);
4501 ST(st_index) = ST0;
4502 ST0 = tmp;
4503}
4504
4505/* FPU operations */
4506
4507static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4508
4509void helper_fcom_ST0_FT0(void)
4510{
4511 int ret;
4512
4513 ret = floatx_compare(ST0, FT0, &env->fp_status);
4514 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4515}
4516
4517void helper_fucom_ST0_FT0(void)
4518{
4519 int ret;
4520
4521 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4522 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4523}
4524
4525static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4526
4527void helper_fcomi_ST0_FT0(void)
4528{
4529 int eflags;
4530 int ret;
4531
4532 ret = floatx_compare(ST0, FT0, &env->fp_status);
4533 eflags = helper_cc_compute_all(CC_OP);
4534 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4535 CC_SRC = eflags;
4536}
4537
4538void helper_fucomi_ST0_FT0(void)
4539{
4540 int eflags;
4541 int ret;
4542
4543 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4544 eflags = helper_cc_compute_all(CC_OP);
4545 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4546 CC_SRC = eflags;
4547}
4548
4549void helper_fadd_ST0_FT0(void)
4550{
4551 ST0 += FT0;
4552}
4553
4554void helper_fmul_ST0_FT0(void)
4555{
4556 ST0 *= FT0;
4557}
4558
4559void helper_fsub_ST0_FT0(void)
4560{
4561 ST0 -= FT0;
4562}
4563
4564void helper_fsubr_ST0_FT0(void)
4565{
4566 ST0 = FT0 - ST0;
4567}
4568
4569void helper_fdiv_ST0_FT0(void)
4570{
4571 ST0 = helper_fdiv(ST0, FT0);
4572}
4573
4574void helper_fdivr_ST0_FT0(void)
4575{
4576 ST0 = helper_fdiv(FT0, ST0);
4577}
4578
4579/* fp operations between STN and ST0 */
4580
4581void helper_fadd_STN_ST0(int st_index)
4582{
4583 ST(st_index) += ST0;
4584}
4585
4586void helper_fmul_STN_ST0(int st_index)
4587{
4588 ST(st_index) *= ST0;
4589}
4590
4591void helper_fsub_STN_ST0(int st_index)
4592{
4593 ST(st_index) -= ST0;
4594}
4595
4596void helper_fsubr_STN_ST0(int st_index)
4597{
4598 CPU86_LDouble *p;
4599 p = &ST(st_index);
4600 *p = ST0 - *p;
4601}
4602
4603void helper_fdiv_STN_ST0(int st_index)
4604{
4605 CPU86_LDouble *p;
4606 p = &ST(st_index);
4607 *p = helper_fdiv(*p, ST0);
4608}
4609
4610void helper_fdivr_STN_ST0(int st_index)
4611{
4612 CPU86_LDouble *p;
4613 p = &ST(st_index);
4614 *p = helper_fdiv(ST0, *p);
4615}
4616
4617/* misc FPU operations */
4618void helper_fchs_ST0(void)
4619{
4620 ST0 = floatx_chs(ST0);
4621}
4622
4623void helper_fabs_ST0(void)
4624{
4625 ST0 = floatx_abs(ST0);
4626}
4627
4628void helper_fld1_ST0(void)
4629{
4630 ST0 = f15rk[1];
4631}
4632
4633void helper_fldl2t_ST0(void)
4634{
4635 ST0 = f15rk[6];
4636}
4637
4638void helper_fldl2e_ST0(void)
4639{
4640 ST0 = f15rk[5];
4641}
4642
4643void helper_fldpi_ST0(void)
4644{
4645 ST0 = f15rk[2];
4646}
4647
4648void helper_fldlg2_ST0(void)
4649{
4650 ST0 = f15rk[3];
4651}
4652
4653void helper_fldln2_ST0(void)
4654{
4655 ST0 = f15rk[4];
4656}
4657
4658void helper_fldz_ST0(void)
4659{
4660 ST0 = f15rk[0];
4661}
4662
4663void helper_fldz_FT0(void)
4664{
4665 FT0 = f15rk[0];
4666}
4667
4668#ifndef VBOX
4669uint32_t helper_fnstsw(void)
4670#else
4671RTCCUINTREG helper_fnstsw(void)
4672#endif
4673{
4674 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4675}
4676
4677#ifndef VBOX
4678uint32_t helper_fnstcw(void)
4679#else
4680RTCCUINTREG helper_fnstcw(void)
4681#endif
4682{
4683 return env->fpuc;
4684}
4685
4686static void update_fp_status(void)
4687{
4688 int rnd_type;
4689
4690 /* set rounding mode */
4691 switch(env->fpuc & RC_MASK) {
4692 default:
4693 case RC_NEAR:
4694 rnd_type = float_round_nearest_even;
4695 break;
4696 case RC_DOWN:
4697 rnd_type = float_round_down;
4698 break;
4699 case RC_UP:
4700 rnd_type = float_round_up;
4701 break;
4702 case RC_CHOP:
4703 rnd_type = float_round_to_zero;
4704 break;
4705 }
4706 set_float_rounding_mode(rnd_type, &env->fp_status);
4707#ifdef FLOATX80
4708 switch((env->fpuc >> 8) & 3) {
4709 case 0:
4710 rnd_type = 32;
4711 break;
4712 case 2:
4713 rnd_type = 64;
4714 break;
4715 case 3:
4716 default:
4717 rnd_type = 80;
4718 break;
4719 }
4720 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4721#endif
4722}
4723
4724void helper_fldcw(uint32_t val)
4725{
4726 env->fpuc = val;
4727 update_fp_status();
4728}
4729
4730void helper_fclex(void)
4731{
4732 env->fpus &= 0x7f00;
4733}
4734
4735void helper_fwait(void)
4736{
4737 if (env->fpus & FPUS_SE)
4738 fpu_raise_exception();
4739}
4740
4741void helper_fninit(void)
4742{
4743 env->fpus = 0;
4744 env->fpstt = 0;
4745 env->fpuc = 0x37f;
4746 env->fptags[0] = 1;
4747 env->fptags[1] = 1;
4748 env->fptags[2] = 1;
4749 env->fptags[3] = 1;
4750 env->fptags[4] = 1;
4751 env->fptags[5] = 1;
4752 env->fptags[6] = 1;
4753 env->fptags[7] = 1;
4754}
4755
4756/* BCD ops */
4757
4758void helper_fbld_ST0(target_ulong ptr)
4759{
4760 CPU86_LDouble tmp;
4761 uint64_t val;
4762 unsigned int v;
4763 int i;
4764
4765 val = 0;
4766 for(i = 8; i >= 0; i--) {
4767 v = ldub(ptr + i);
4768 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4769 }
4770 tmp = val;
4771 if (ldub(ptr + 9) & 0x80)
4772 tmp = -tmp;
4773 fpush();
4774 ST0 = tmp;
4775}
4776
4777void helper_fbst_ST0(target_ulong ptr)
4778{
4779 int v;
4780 target_ulong mem_ref, mem_end;
4781 int64_t val;
4782
4783 val = floatx_to_int64(ST0, &env->fp_status);
4784 mem_ref = ptr;
4785 mem_end = mem_ref + 9;
4786 if (val < 0) {
4787 stb(mem_end, 0x80);
4788 val = -val;
4789 } else {
4790 stb(mem_end, 0x00);
4791 }
4792 while (mem_ref < mem_end) {
4793 if (val == 0)
4794 break;
4795 v = val % 100;
4796 val = val / 100;
4797 v = ((v / 10) << 4) | (v % 10);
4798 stb(mem_ref++, v);
4799 }
4800 while (mem_ref < mem_end) {
4801 stb(mem_ref++, 0);
4802 }
4803}
4804
4805void helper_f2xm1(void)
4806{
4807 ST0 = pow(2.0,ST0) - 1.0;
4808}
4809
4810void helper_fyl2x(void)
4811{
4812 CPU86_LDouble fptemp;
4813
4814 fptemp = ST0;
4815 if (fptemp>0.0){
4816 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4817 ST1 *= fptemp;
4818 fpop();
4819 } else {
4820 env->fpus &= (~0x4700);
4821 env->fpus |= 0x400;
4822 }
4823}
4824
4825void helper_fptan(void)
4826{
4827 CPU86_LDouble fptemp;
4828
4829 fptemp = ST0;
4830 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4831 env->fpus |= 0x400;
4832 } else {
4833 ST0 = tan(fptemp);
4834 fpush();
4835 ST0 = 1.0;
4836 env->fpus &= (~0x400); /* C2 <-- 0 */
4837 /* the above code is for |arg| < 2**52 only */
4838 }
4839}
4840
4841void helper_fpatan(void)
4842{
4843 CPU86_LDouble fptemp, fpsrcop;
4844
4845 fpsrcop = ST1;
4846 fptemp = ST0;
4847 ST1 = atan2(fpsrcop,fptemp);
4848 fpop();
4849}
4850
4851void helper_fxtract(void)
4852{
4853 CPU86_LDoubleU temp;
4854 unsigned int expdif;
4855
4856 temp.d = ST0;
4857 expdif = EXPD(temp) - EXPBIAS;
4858 /*DP exponent bias*/
4859 ST0 = expdif;
4860 fpush();
4861 BIASEXPONENT(temp);
4862 ST0 = temp.d;
4863}
4864
4865void helper_fprem1(void)
4866{
4867 CPU86_LDouble dblq, fpsrcop, fptemp;
4868 CPU86_LDoubleU fpsrcop1, fptemp1;
4869 int expdif;
4870 signed long long int q;
4871
4872#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4873 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4874#else
4875 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4876#endif
4877 ST0 = 0.0 / 0.0; /* NaN */
4878 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4879 return;
4880 }
4881
4882 fpsrcop = ST0;
4883 fptemp = ST1;
4884 fpsrcop1.d = fpsrcop;
4885 fptemp1.d = fptemp;
4886 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4887
4888 if (expdif < 0) {
4889 /* optimisation? taken from the AMD docs */
4890 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4891 /* ST0 is unchanged */
4892 return;
4893 }
4894
4895 if (expdif < 53) {
4896 dblq = fpsrcop / fptemp;
4897 /* round dblq towards nearest integer */
4898 dblq = rint(dblq);
4899 ST0 = fpsrcop - fptemp * dblq;
4900
4901 /* convert dblq to q by truncating towards zero */
4902 if (dblq < 0.0)
4903 q = (signed long long int)(-dblq);
4904 else
4905 q = (signed long long int)dblq;
4906
4907 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4908 /* (C0,C3,C1) <-- (q2,q1,q0) */
4909 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4910 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4911 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4912 } else {
4913 env->fpus |= 0x400; /* C2 <-- 1 */
4914 fptemp = pow(2.0, expdif - 50);
4915 fpsrcop = (ST0 / ST1) / fptemp;
4916 /* fpsrcop = integer obtained by chopping */
4917 fpsrcop = (fpsrcop < 0.0) ?
4918 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4919 ST0 -= (ST1 * fpsrcop * fptemp);
4920 }
4921}
4922
4923void helper_fprem(void)
4924{
4925 CPU86_LDouble dblq, fpsrcop, fptemp;
4926 CPU86_LDoubleU fpsrcop1, fptemp1;
4927 int expdif;
4928 signed long long int q;
4929
4930#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4931 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4932#else
4933 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4934#endif
4935 ST0 = 0.0 / 0.0; /* NaN */
4936 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4937 return;
4938 }
4939
4940 fpsrcop = (CPU86_LDouble)ST0;
4941 fptemp = (CPU86_LDouble)ST1;
4942 fpsrcop1.d = fpsrcop;
4943 fptemp1.d = fptemp;
4944 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4945
4946 if (expdif < 0) {
4947 /* optimisation? taken from the AMD docs */
4948 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4949 /* ST0 is unchanged */
4950 return;
4951 }
4952
4953 if ( expdif < 53 ) {
4954 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4955 /* round dblq towards zero */
4956 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4957 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4958
4959 /* convert dblq to q by truncating towards zero */
4960 if (dblq < 0.0)
4961 q = (signed long long int)(-dblq);
4962 else
4963 q = (signed long long int)dblq;
4964
4965 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4966 /* (C0,C3,C1) <-- (q2,q1,q0) */
4967 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4968 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4969 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4970 } else {
4971 int N = 32 + (expdif % 32); /* as per AMD docs */
4972 env->fpus |= 0x400; /* C2 <-- 1 */
4973 fptemp = pow(2.0, (double)(expdif - N));
4974 fpsrcop = (ST0 / ST1) / fptemp;
4975 /* fpsrcop = integer obtained by chopping */
4976 fpsrcop = (fpsrcop < 0.0) ?
4977 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4978 ST0 -= (ST1 * fpsrcop * fptemp);
4979 }
4980}
4981
4982void helper_fyl2xp1(void)
4983{
4984 CPU86_LDouble fptemp;
4985
4986 fptemp = ST0;
4987 if ((fptemp+1.0)>0.0) {
4988 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4989 ST1 *= fptemp;
4990 fpop();
4991 } else {
4992 env->fpus &= (~0x4700);
4993 env->fpus |= 0x400;
4994 }
4995}
4996
4997void helper_fsqrt(void)
4998{
4999 CPU86_LDouble fptemp;
5000
5001 fptemp = ST0;
5002 if (fptemp<0.0) {
5003 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5004 env->fpus |= 0x400;
5005 }
5006 ST0 = sqrt(fptemp);
5007}
5008
5009void helper_fsincos(void)
5010{
5011 CPU86_LDouble fptemp;
5012
5013 fptemp = ST0;
5014 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5015 env->fpus |= 0x400;
5016 } else {
5017 ST0 = sin(fptemp);
5018 fpush();
5019 ST0 = cos(fptemp);
5020 env->fpus &= (~0x400); /* C2 <-- 0 */
5021 /* the above code is for |arg| < 2**63 only */
5022 }
5023}
5024
5025void helper_frndint(void)
5026{
5027 ST0 = floatx_round_to_int(ST0, &env->fp_status);
5028}
5029
5030void helper_fscale(void)
5031{
5032 ST0 = ldexp (ST0, (int)(ST1));
5033}
5034
5035void helper_fsin(void)
5036{
5037 CPU86_LDouble fptemp;
5038
5039 fptemp = ST0;
5040 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5041 env->fpus |= 0x400;
5042 } else {
5043 ST0 = sin(fptemp);
5044 env->fpus &= (~0x400); /* C2 <-- 0 */
5045 /* the above code is for |arg| < 2**53 only */
5046 }
5047}
5048
5049void helper_fcos(void)
5050{
5051 CPU86_LDouble fptemp;
5052
5053 fptemp = ST0;
5054 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5055 env->fpus |= 0x400;
5056 } else {
5057 ST0 = cos(fptemp);
5058 env->fpus &= (~0x400); /* C2 <-- 0 */
5059 /* the above code is for |arg5 < 2**63 only */
5060 }
5061}
5062
5063void helper_fxam_ST0(void)
5064{
5065 CPU86_LDoubleU temp;
5066 int expdif;
5067
5068 temp.d = ST0;
5069
5070 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5071 if (SIGND(temp))
5072 env->fpus |= 0x200; /* C1 <-- 1 */
5073
5074 /* XXX: test fptags too */
5075 expdif = EXPD(temp);
5076 if (expdif == MAXEXPD) {
5077#ifdef USE_X86LDOUBLE
5078 if (MANTD(temp) == 0x8000000000000000ULL)
5079#else
5080 if (MANTD(temp) == 0)
5081#endif
5082 env->fpus |= 0x500 /*Infinity*/;
5083 else
5084 env->fpus |= 0x100 /*NaN*/;
5085 } else if (expdif == 0) {
5086 if (MANTD(temp) == 0)
5087 env->fpus |= 0x4000 /*Zero*/;
5088 else
5089 env->fpus |= 0x4400 /*Denormal*/;
5090 } else {
5091 env->fpus |= 0x400;
5092 }
5093}
5094
5095void helper_fstenv(target_ulong ptr, int data32)
5096{
5097 int fpus, fptag, exp, i;
5098 uint64_t mant;
5099 CPU86_LDoubleU tmp;
5100
5101 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5102 fptag = 0;
5103 for (i=7; i>=0; i--) {
5104 fptag <<= 2;
5105 if (env->fptags[i]) {
5106 fptag |= 3;
5107 } else {
5108 tmp.d = env->fpregs[i].d;
5109 exp = EXPD(tmp);
5110 mant = MANTD(tmp);
5111 if (exp == 0 && mant == 0) {
5112 /* zero */
5113 fptag |= 1;
5114 } else if (exp == 0 || exp == MAXEXPD
5115#ifdef USE_X86LDOUBLE
5116 || (mant & (1LL << 63)) == 0
5117#endif
5118 ) {
5119 /* NaNs, infinity, denormal */
5120 fptag |= 2;
5121 }
5122 }
5123 }
5124 if (data32) {
5125 /* 32 bit */
5126 stl(ptr, env->fpuc);
5127 stl(ptr + 4, fpus);
5128 stl(ptr + 8, fptag);
5129 stl(ptr + 12, 0); /* fpip */
5130 stl(ptr + 16, 0); /* fpcs */
5131 stl(ptr + 20, 0); /* fpoo */
5132 stl(ptr + 24, 0); /* fpos */
5133 } else {
5134 /* 16 bit */
5135 stw(ptr, env->fpuc);
5136 stw(ptr + 2, fpus);
5137 stw(ptr + 4, fptag);
5138 stw(ptr + 6, 0);
5139 stw(ptr + 8, 0);
5140 stw(ptr + 10, 0);
5141 stw(ptr + 12, 0);
5142 }
5143}
5144
5145void helper_fldenv(target_ulong ptr, int data32)
5146{
5147 int i, fpus, fptag;
5148
5149 if (data32) {
5150 env->fpuc = lduw(ptr);
5151 fpus = lduw(ptr + 4);
5152 fptag = lduw(ptr + 8);
5153 }
5154 else {
5155 env->fpuc = lduw(ptr);
5156 fpus = lduw(ptr + 2);
5157 fptag = lduw(ptr + 4);
5158 }
5159 env->fpstt = (fpus >> 11) & 7;
5160 env->fpus = fpus & ~0x3800;
5161 for(i = 0;i < 8; i++) {
5162 env->fptags[i] = ((fptag & 3) == 3);
5163 fptag >>= 2;
5164 }
5165}
5166
5167void helper_fsave(target_ulong ptr, int data32)
5168{
5169 CPU86_LDouble tmp;
5170 int i;
5171
5172 helper_fstenv(ptr, data32);
5173
5174 ptr += (14 << data32);
5175 for(i = 0;i < 8; i++) {
5176 tmp = ST(i);
5177 helper_fstt(tmp, ptr);
5178 ptr += 10;
5179 }
5180
5181 /* fninit */
5182 env->fpus = 0;
5183 env->fpstt = 0;
5184 env->fpuc = 0x37f;
5185 env->fptags[0] = 1;
5186 env->fptags[1] = 1;
5187 env->fptags[2] = 1;
5188 env->fptags[3] = 1;
5189 env->fptags[4] = 1;
5190 env->fptags[5] = 1;
5191 env->fptags[6] = 1;
5192 env->fptags[7] = 1;
5193}
5194
5195void helper_frstor(target_ulong ptr, int data32)
5196{
5197 CPU86_LDouble tmp;
5198 int i;
5199
5200 helper_fldenv(ptr, data32);
5201 ptr += (14 << data32);
5202
5203 for(i = 0;i < 8; i++) {
5204 tmp = helper_fldt(ptr);
5205 ST(i) = tmp;
5206 ptr += 10;
5207 }
5208}
5209
5210void helper_fxsave(target_ulong ptr, int data64)
5211{
5212 int fpus, fptag, i, nb_xmm_regs;
5213 CPU86_LDouble tmp;
5214 target_ulong addr;
5215
5216 /* The operand must be 16 byte aligned */
5217 if (ptr & 0xf) {
5218 raise_exception(EXCP0D_GPF);
5219 }
5220
5221 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5222 fptag = 0;
5223 for(i = 0; i < 8; i++) {
5224 fptag |= (env->fptags[i] << i);
5225 }
5226 stw(ptr, env->fpuc);
5227 stw(ptr + 2, fpus);
5228 stw(ptr + 4, fptag ^ 0xff);
5229#ifdef TARGET_X86_64
5230 if (data64) {
5231 stq(ptr + 0x08, 0); /* rip */
5232 stq(ptr + 0x10, 0); /* rdp */
5233 } else
5234#endif
5235 {
5236 stl(ptr + 0x08, 0); /* eip */
5237 stl(ptr + 0x0c, 0); /* sel */
5238 stl(ptr + 0x10, 0); /* dp */
5239 stl(ptr + 0x14, 0); /* sel */
5240 }
5241
5242 addr = ptr + 0x20;
5243 for(i = 0;i < 8; i++) {
5244 tmp = ST(i);
5245 helper_fstt(tmp, addr);
5246 addr += 16;
5247 }
5248
5249 if (env->cr[4] & CR4_OSFXSR_MASK) {
5250 /* XXX: finish it */
5251 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5252 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5253 if (env->hflags & HF_CS64_MASK)
5254 nb_xmm_regs = 16;
5255 else
5256 nb_xmm_regs = 8;
5257 addr = ptr + 0xa0;
5258 /* Fast FXSAVE leaves out the XMM registers */
5259 if (!(env->efer & MSR_EFER_FFXSR)
5260 || (env->hflags & HF_CPL_MASK)
5261 || !(env->hflags & HF_LMA_MASK)) {
5262 for(i = 0; i < nb_xmm_regs; i++) {
5263 stq(addr, env->xmm_regs[i].XMM_Q(0));
5264 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5265 addr += 16;
5266 }
5267 }
5268 }
5269}
5270
5271void helper_fxrstor(target_ulong ptr, int data64)
5272{
5273 int i, fpus, fptag, nb_xmm_regs;
5274 CPU86_LDouble tmp;
5275 target_ulong addr;
5276
5277 /* The operand must be 16 byte aligned */
5278 if (ptr & 0xf) {
5279 raise_exception(EXCP0D_GPF);
5280 }
5281
5282 env->fpuc = lduw(ptr);
5283 fpus = lduw(ptr + 2);
5284 fptag = lduw(ptr + 4);
5285 env->fpstt = (fpus >> 11) & 7;
5286 env->fpus = fpus & ~0x3800;
5287 fptag ^= 0xff;
5288 for(i = 0;i < 8; i++) {
5289 env->fptags[i] = ((fptag >> i) & 1);
5290 }
5291
5292 addr = ptr + 0x20;
5293 for(i = 0;i < 8; i++) {
5294 tmp = helper_fldt(addr);
5295 ST(i) = tmp;
5296 addr += 16;
5297 }
5298
5299 if (env->cr[4] & CR4_OSFXSR_MASK) {
5300 /* XXX: finish it */
5301 env->mxcsr = ldl(ptr + 0x18);
5302 //ldl(ptr + 0x1c);
5303 if (env->hflags & HF_CS64_MASK)
5304 nb_xmm_regs = 16;
5305 else
5306 nb_xmm_regs = 8;
5307 addr = ptr + 0xa0;
5308 /* Fast FXRESTORE leaves out the XMM registers */
5309 if (!(env->efer & MSR_EFER_FFXSR)
5310 || (env->hflags & HF_CPL_MASK)
5311 || !(env->hflags & HF_LMA_MASK)) {
5312 for(i = 0; i < nb_xmm_regs; i++) {
5313#if !defined(VBOX) || __GNUC__ < 4
5314 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5315 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5316#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5317# if 1
5318 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5319 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5320 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5321 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5322# else
5323 /* this works fine on Mac OS X, gcc 4.0.1 */
5324 uint64_t u64 = ldq(addr);
5325 env->xmm_regs[i].XMM_Q(0);
5326 u64 = ldq(addr + 4);
5327 env->xmm_regs[i].XMM_Q(1) = u64;
5328# endif
5329#endif
5330 addr += 16;
5331 }
5332 }
5333 }
5334}
5335
5336#ifndef USE_X86LDOUBLE
5337
5338void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5339{
5340 CPU86_LDoubleU temp;
5341 int e;
5342
5343 temp.d = f;
5344 /* mantissa */
5345 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5346 /* exponent + sign */
5347 e = EXPD(temp) - EXPBIAS + 16383;
5348 e |= SIGND(temp) >> 16;
5349 *pexp = e;
5350}
5351
5352CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5353{
5354 CPU86_LDoubleU temp;
5355 int e;
5356 uint64_t ll;
5357
5358 /* XXX: handle overflow ? */
5359 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5360 e |= (upper >> 4) & 0x800; /* sign */
5361 ll = (mant >> 11) & ((1LL << 52) - 1);
5362#ifdef __arm__
5363 temp.l.upper = (e << 20) | (ll >> 32);
5364 temp.l.lower = ll;
5365#else
5366 temp.ll = ll | ((uint64_t)e << 52);
5367#endif
5368 return temp.d;
5369}
5370
5371#else
5372
5373void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5374{
5375 CPU86_LDoubleU temp;
5376
5377 temp.d = f;
5378 *pmant = temp.l.lower;
5379 *pexp = temp.l.upper;
5380}
5381
5382CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5383{
5384 CPU86_LDoubleU temp;
5385
5386 temp.l.upper = upper;
5387 temp.l.lower = mant;
5388 return temp.d;
5389}
5390#endif
5391
5392#ifdef TARGET_X86_64
5393
5394//#define DEBUG_MULDIV
5395
5396static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5397{
5398 *plow += a;
5399 /* carry test */
5400 if (*plow < a)
5401 (*phigh)++;
5402 *phigh += b;
5403}
5404
5405static void neg128(uint64_t *plow, uint64_t *phigh)
5406{
5407 *plow = ~ *plow;
5408 *phigh = ~ *phigh;
5409 add128(plow, phigh, 1, 0);
5410}
5411
5412/* return TRUE if overflow */
5413static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5414{
5415 uint64_t q, r, a1, a0;
5416 int i, qb, ab;
5417
5418 a0 = *plow;
5419 a1 = *phigh;
5420 if (a1 == 0) {
5421 q = a0 / b;
5422 r = a0 % b;
5423 *plow = q;
5424 *phigh = r;
5425 } else {
5426 if (a1 >= b)
5427 return 1;
5428 /* XXX: use a better algorithm */
5429 for(i = 0; i < 64; i++) {
5430 ab = a1 >> 63;
5431 a1 = (a1 << 1) | (a0 >> 63);
5432 if (ab || a1 >= b) {
5433 a1 -= b;
5434 qb = 1;
5435 } else {
5436 qb = 0;
5437 }
5438 a0 = (a0 << 1) | qb;
5439 }
5440#if defined(DEBUG_MULDIV)
5441 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5442 *phigh, *plow, b, a0, a1);
5443#endif
5444 *plow = a0;
5445 *phigh = a1;
5446 }
5447 return 0;
5448}
5449
5450/* return TRUE if overflow */
5451static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5452{
5453 int sa, sb;
5454 sa = ((int64_t)*phigh < 0);
5455 if (sa)
5456 neg128(plow, phigh);
5457 sb = (b < 0);
5458 if (sb)
5459 b = -b;
5460 if (div64(plow, phigh, b) != 0)
5461 return 1;
5462 if (sa ^ sb) {
5463 if (*plow > (1ULL << 63))
5464 return 1;
5465 *plow = - *plow;
5466 } else {
5467 if (*plow >= (1ULL << 63))
5468 return 1;
5469 }
5470 if (sa)
5471 *phigh = - *phigh;
5472 return 0;
5473}
5474
5475void helper_mulq_EAX_T0(target_ulong t0)
5476{
5477 uint64_t r0, r1;
5478
5479 mulu64(&r0, &r1, EAX, t0);
5480 EAX = r0;
5481 EDX = r1;
5482 CC_DST = r0;
5483 CC_SRC = r1;
5484}
5485
5486void helper_imulq_EAX_T0(target_ulong t0)
5487{
5488 uint64_t r0, r1;
5489
5490 muls64(&r0, &r1, EAX, t0);
5491 EAX = r0;
5492 EDX = r1;
5493 CC_DST = r0;
5494 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5495}
5496
5497target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5498{
5499 uint64_t r0, r1;
5500
5501 muls64(&r0, &r1, t0, t1);
5502 CC_DST = r0;
5503 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5504 return r0;
5505}
5506
5507void helper_divq_EAX(target_ulong t0)
5508{
5509 uint64_t r0, r1;
5510 if (t0 == 0) {
5511 raise_exception(EXCP00_DIVZ);
5512 }
5513 r0 = EAX;
5514 r1 = EDX;
5515 if (div64(&r0, &r1, t0))
5516 raise_exception(EXCP00_DIVZ);
5517 EAX = r0;
5518 EDX = r1;
5519}
5520
5521void helper_idivq_EAX(target_ulong t0)
5522{
5523 uint64_t r0, r1;
5524 if (t0 == 0) {
5525 raise_exception(EXCP00_DIVZ);
5526 }
5527 r0 = EAX;
5528 r1 = EDX;
5529 if (idiv64(&r0, &r1, t0))
5530 raise_exception(EXCP00_DIVZ);
5531 EAX = r0;
5532 EDX = r1;
5533}
5534#endif
5535
5536static void do_hlt(void)
5537{
5538 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5539 env->halted = 1;
5540 env->exception_index = EXCP_HLT;
5541 cpu_loop_exit();
5542}
5543
5544void helper_hlt(int next_eip_addend)
5545{
5546 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5547 EIP += next_eip_addend;
5548
5549 do_hlt();
5550}
5551
5552void helper_monitor(target_ulong ptr)
5553{
5554#ifdef VBOX
5555 if ((uint32_t)ECX > 1)
5556 raise_exception(EXCP0D_GPF);
5557#else /* !VBOX */
5558 if ((uint32_t)ECX != 0)
5559 raise_exception(EXCP0D_GPF);
5560#endif /* !VBOX */
5561 /* XXX: store address ? */
5562 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5563}
5564
5565void helper_mwait(int next_eip_addend)
5566{
5567 if ((uint32_t)ECX != 0)
5568 raise_exception(EXCP0D_GPF);
5569#ifdef VBOX
5570 helper_hlt(next_eip_addend);
5571#else /* !VBOX */
5572 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5573 EIP += next_eip_addend;
5574
5575 /* XXX: not complete but not completely erroneous */
5576 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5577 /* more than one CPU: do not sleep because another CPU may
5578 wake this one */
5579 } else {
5580 do_hlt();
5581 }
5582#endif /* !VBOX */
5583}
5584
5585void helper_debug(void)
5586{
5587 env->exception_index = EXCP_DEBUG;
5588 cpu_loop_exit();
5589}
5590
5591void helper_reset_rf(void)
5592{
5593 env->eflags &= ~RF_MASK;
5594}
5595
5596void helper_raise_interrupt(int intno, int next_eip_addend)
5597{
5598 raise_interrupt(intno, 1, 0, next_eip_addend);
5599}
5600
5601void helper_raise_exception(int exception_index)
5602{
5603 raise_exception(exception_index);
5604}
5605
5606void helper_cli(void)
5607{
5608 env->eflags &= ~IF_MASK;
5609}
5610
5611void helper_sti(void)
5612{
5613 env->eflags |= IF_MASK;
5614}
5615
5616#ifdef VBOX
5617void helper_cli_vme(void)
5618{
5619 env->eflags &= ~VIF_MASK;
5620}
5621
5622void helper_sti_vme(void)
5623{
5624 /* First check, then change eflags according to the AMD manual */
5625 if (env->eflags & VIP_MASK) {
5626 raise_exception(EXCP0D_GPF);
5627 }
5628 env->eflags |= VIF_MASK;
5629}
5630#endif /* VBOX */
5631
5632#if 0
5633/* vm86plus instructions */
5634void helper_cli_vm(void)
5635{
5636 env->eflags &= ~VIF_MASK;
5637}
5638
5639void helper_sti_vm(void)
5640{
5641 env->eflags |= VIF_MASK;
5642 if (env->eflags & VIP_MASK) {
5643 raise_exception(EXCP0D_GPF);
5644 }
5645}
5646#endif
5647
5648void helper_set_inhibit_irq(void)
5649{
5650 env->hflags |= HF_INHIBIT_IRQ_MASK;
5651}
5652
5653void helper_reset_inhibit_irq(void)
5654{
5655 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5656}
5657
5658void helper_boundw(target_ulong a0, int v)
5659{
5660 int low, high;
5661 low = ldsw(a0);
5662 high = ldsw(a0 + 2);
5663 v = (int16_t)v;
5664 if (v < low || v > high) {
5665 raise_exception(EXCP05_BOUND);
5666 }
5667}
5668
5669void helper_boundl(target_ulong a0, int v)
5670{
5671 int low, high;
5672 low = ldl(a0);
5673 high = ldl(a0 + 4);
5674 if (v < low || v > high) {
5675 raise_exception(EXCP05_BOUND);
5676 }
5677}
5678
5679static float approx_rsqrt(float a)
5680{
5681 return 1.0 / sqrt(a);
5682}
5683
5684static float approx_rcp(float a)
5685{
5686 return 1.0 / a;
5687}
5688
5689#if !defined(CONFIG_USER_ONLY)
5690
5691#define MMUSUFFIX _mmu
5692
5693#define SHIFT 0
5694#include "softmmu_template.h"
5695
5696#define SHIFT 1
5697#include "softmmu_template.h"
5698
5699#define SHIFT 2
5700#include "softmmu_template.h"
5701
5702#define SHIFT 3
5703#include "softmmu_template.h"
5704
5705#endif
5706
5707#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5708/* This code assumes real physical address always fit into host CPU reg,
5709 which is wrong in general, but true for our current use cases. */
5710RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5711{
5712 return remR3PhysReadS8(addr);
5713}
5714RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5715{
5716 return remR3PhysReadU8(addr);
5717}
5718void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5719{
5720 remR3PhysWriteU8(addr, val);
5721}
5722RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5723{
5724 return remR3PhysReadS16(addr);
5725}
5726RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5727{
5728 return remR3PhysReadU16(addr);
5729}
5730void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5731{
5732 remR3PhysWriteU16(addr, val);
5733}
5734RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5735{
5736 return remR3PhysReadS32(addr);
5737}
5738RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5739{
5740 return remR3PhysReadU32(addr);
5741}
5742void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5743{
5744 remR3PhysWriteU32(addr, val);
5745}
5746uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5747{
5748 return remR3PhysReadU64(addr);
5749}
5750void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5751{
5752 remR3PhysWriteU64(addr, val);
5753}
5754#endif /* VBOX */
5755
5756#if !defined(CONFIG_USER_ONLY)
5757/* try to fill the TLB and return an exception if error. If retaddr is
5758 NULL, it means that the function was called in C code (i.e. not
5759 from generated code or from helper.c) */
5760/* XXX: fix it to restore all registers */
5761void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5762{
5763 TranslationBlock *tb;
5764 int ret;
5765 uintptr_t pc;
5766 CPUX86State *saved_env;
5767
5768 /* XXX: hack to restore env in all cases, even if not called from
5769 generated code */
5770 saved_env = env;
5771 env = cpu_single_env;
5772
5773 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5774 if (ret) {
5775 if (retaddr) {
5776 /* now we have a real cpu fault */
5777 pc = (uintptr_t)retaddr;
5778 tb = tb_find_pc(pc);
5779 if (tb) {
5780 /* the PC is inside the translated code. It means that we have
5781 a virtual CPU fault */
5782 cpu_restore_state(tb, env, pc, NULL);
5783 }
5784 }
5785 raise_exception_err(env->exception_index, env->error_code);
5786 }
5787 env = saved_env;
5788}
5789#endif
5790
5791#ifdef VBOX
5792
5793/**
5794 * Correctly computes the eflags.
5795 * @returns eflags.
5796 * @param env1 CPU environment.
5797 */
5798uint32_t raw_compute_eflags(CPUX86State *env1)
5799{
5800 CPUX86State *savedenv = env;
5801 uint32_t efl;
5802 env = env1;
5803 efl = compute_eflags();
5804 env = savedenv;
5805 return efl;
5806}
5807
5808/**
5809 * Reads byte from virtual address in guest memory area.
5810 * XXX: is it working for any addresses? swapped out pages?
5811 * @returns read data byte.
5812 * @param env1 CPU environment.
5813 * @param pvAddr GC Virtual address.
5814 */
5815uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5816{
5817 CPUX86State *savedenv = env;
5818 uint8_t u8;
5819 env = env1;
5820 u8 = ldub_kernel(addr);
5821 env = savedenv;
5822 return u8;
5823}
5824
5825/**
5826 * Reads byte from virtual address in guest memory area.
5827 * XXX: is it working for any addresses? swapped out pages?
5828 * @returns read data byte.
5829 * @param env1 CPU environment.
5830 * @param pvAddr GC Virtual address.
5831 */
5832uint16_t read_word(CPUX86State *env1, target_ulong addr)
5833{
5834 CPUX86State *savedenv = env;
5835 uint16_t u16;
5836 env = env1;
5837 u16 = lduw_kernel(addr);
5838 env = savedenv;
5839 return u16;
5840}
5841
5842/**
5843 * Reads byte from virtual address in guest memory area.
5844 * XXX: is it working for any addresses? swapped out pages?
5845 * @returns read data byte.
5846 * @param env1 CPU environment.
5847 * @param pvAddr GC Virtual address.
5848 */
5849uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5850{
5851 CPUX86State *savedenv = env;
5852 uint32_t u32;
5853 env = env1;
5854 u32 = ldl_kernel(addr);
5855 env = savedenv;
5856 return u32;
5857}
5858
5859/**
5860 * Writes byte to virtual address in guest memory area.
5861 * XXX: is it working for any addresses? swapped out pages?
5862 * @returns read data byte.
5863 * @param env1 CPU environment.
5864 * @param pvAddr GC Virtual address.
5865 * @param val byte value
5866 */
5867void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5868{
5869 CPUX86State *savedenv = env;
5870 env = env1;
5871 stb(addr, val);
5872 env = savedenv;
5873}
5874
5875void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5876{
5877 CPUX86State *savedenv = env;
5878 env = env1;
5879 stw(addr, val);
5880 env = savedenv;
5881}
5882
5883void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5884{
5885 CPUX86State *savedenv = env;
5886 env = env1;
5887 stl(addr, val);
5888 env = savedenv;
5889}
5890
5891/**
5892 * Correctly loads selector into segment register with updating internal
5893 * qemu data/caches.
5894 * @param env1 CPU environment.
5895 * @param seg_reg Segment register.
5896 * @param selector Selector to load.
5897 */
5898void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5899{
5900 CPUX86State *savedenv = env;
5901#ifdef FORCE_SEGMENT_SYNC
5902 jmp_buf old_buf;
5903#endif
5904
5905 env = env1;
5906
5907 if ( env->eflags & X86_EFL_VM
5908 || !(env->cr[0] & X86_CR0_PE))
5909 {
5910 load_seg_vm(seg_reg, selector);
5911
5912 env = savedenv;
5913
5914 /* Successful sync. */
5915 Assert(env1->segs[seg_reg].newselector == 0);
5916 }
5917 else
5918 {
5919 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5920 time critical - let's not do that */
5921#ifdef FORCE_SEGMENT_SYNC
5922 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5923#endif
5924 if (setjmp(env1->jmp_env) == 0)
5925 {
5926 if (seg_reg == R_CS)
5927 {
5928 uint32_t e1, e2;
5929 e1 = e2 = 0;
5930 load_segment(&e1, &e2, selector);
5931 cpu_x86_load_seg_cache(env, R_CS, selector,
5932 get_seg_base(e1, e2),
5933 get_seg_limit(e1, e2),
5934 e2);
5935 }
5936 else
5937 helper_load_seg(seg_reg, selector);
5938 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5939 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5940
5941 env = savedenv;
5942
5943 /* Successful sync. */
5944 Assert(env1->segs[seg_reg].newselector == 0);
5945 }
5946 else
5947 {
5948 env = savedenv;
5949
5950 /* Postpone sync until the guest uses the selector. */
5951 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5952 env1->segs[seg_reg].newselector = selector;
5953 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5954 env1->exception_index = -1;
5955 env1->error_code = 0;
5956 env1->old_exception = -1;
5957 }
5958#ifdef FORCE_SEGMENT_SYNC
5959 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5960#endif
5961 }
5962
5963}
5964
5965DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5966{
5967 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5968}
5969
5970
5971int emulate_single_instr(CPUX86State *env1)
5972{
5973 TranslationBlock *tb;
5974 TranslationBlock *current;
5975 int flags;
5976 uint8_t *tc_ptr;
5977 target_ulong old_eip;
5978
5979 /* ensures env is loaded! */
5980 CPUX86State *savedenv = env;
5981 env = env1;
5982
5983 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5984
5985 current = env->current_tb;
5986 env->current_tb = NULL;
5987 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5988
5989 /*
5990 * Translate only one instruction.
5991 */
5992 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5993 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5994 env->segs[R_CS].base, flags, 0);
5995
5996 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5997
5998
5999 /* tb_link_phys: */
6000 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
6001 tb->jmp_next[0] = NULL;
6002 tb->jmp_next[1] = NULL;
6003 Assert(tb->jmp_next[0] == NULL);
6004 Assert(tb->jmp_next[1] == NULL);
6005 if (tb->tb_next_offset[0] != 0xffff)
6006 tb_reset_jump(tb, 0);
6007 if (tb->tb_next_offset[1] != 0xffff)
6008 tb_reset_jump(tb, 1);
6009
6010 /*
6011 * Execute it using emulation
6012 */
6013 old_eip = env->eip;
6014 env->current_tb = tb;
6015
6016 /*
6017 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
6018 * perhaps not a very safe hack
6019 */
6020 while (old_eip == env->eip)
6021 {
6022 tc_ptr = tb->tc_ptr;
6023
6024#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
6025 int fake_ret;
6026 tcg_qemu_tb_exec(tc_ptr, fake_ret);
6027#else
6028 tcg_qemu_tb_exec(tc_ptr);
6029#endif
6030
6031 /*
6032 * Exit once we detect an external interrupt and interrupts are enabled
6033 */
6034 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
6035 || ( (env->eflags & IF_MASK)
6036 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
6037 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
6038 )
6039 {
6040 break;
6041 }
6042 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
6043 tlb_flush(env, true);
6044 }
6045 }
6046 env->current_tb = current;
6047
6048 tb_phys_invalidate(tb, -1);
6049 tb_free(tb);
6050/*
6051 Assert(tb->tb_next_offset[0] == 0xffff);
6052 Assert(tb->tb_next_offset[1] == 0xffff);
6053 Assert(tb->tb_next[0] == 0xffff);
6054 Assert(tb->tb_next[1] == 0xffff);
6055 Assert(tb->jmp_next[0] == NULL);
6056 Assert(tb->jmp_next[1] == NULL);
6057 Assert(tb->jmp_first == NULL); */
6058
6059 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
6060
6061 /*
6062 * Execute the next instruction when we encounter instruction fusing.
6063 */
6064 if (env->hflags & HF_INHIBIT_IRQ_MASK)
6065 {
6066 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
6067 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6068 emulate_single_instr(env);
6069 }
6070
6071 env = savedenv;
6072 return 0;
6073}
6074
6075/**
6076 * Correctly loads a new ldtr selector.
6077 *
6078 * @param env1 CPU environment.
6079 * @param selector Selector to load.
6080 */
6081void sync_ldtr(CPUX86State *env1, int selector)
6082{
6083 CPUX86State *saved_env = env;
6084 if (setjmp(env1->jmp_env) == 0)
6085 {
6086 env = env1;
6087 helper_lldt(selector);
6088 env = saved_env;
6089 }
6090 else
6091 {
6092 env = saved_env;
6093#ifdef VBOX_STRICT
6094 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6095#endif
6096 }
6097}
6098
6099int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6100 uint32_t *esp_ptr, int dpl)
6101{
6102 int type, index, shift;
6103
6104 CPUX86State *savedenv = env;
6105 env = env1;
6106
6107 if (!(env->tr.flags & DESC_P_MASK))
6108 cpu_abort(env, "invalid tss");
6109 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6110 if ((type & 7) != 1)
6111 cpu_abort(env, "invalid tss type %d", type);
6112 shift = type >> 3;
6113 index = (dpl * 4 + 2) << shift;
6114 if (index + (4 << shift) - 1 > env->tr.limit)
6115 {
6116 env = savedenv;
6117 return 0;
6118 }
6119 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6120
6121 if (shift == 0) {
6122 *esp_ptr = lduw_kernel(env->tr.base + index);
6123 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6124 } else {
6125 *esp_ptr = ldl_kernel(env->tr.base + index);
6126 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6127 }
6128
6129 env = savedenv;
6130 return 1;
6131}
6132
6133//*****************************************************************************
6134// Needs to be at the bottom of the file (overriding macros)
6135
6136static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6137{
6138#ifdef USE_X86LDOUBLE
6139 CPU86_LDoubleU tmp;
6140 tmp.l.lower = *(uint64_t const *)ptr;
6141 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6142 return tmp.d;
6143#else
6144# error "Busted FPU saving/restoring!"
6145 return *(CPU86_LDouble *)ptr;
6146#endif
6147}
6148
6149static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6150{
6151#ifdef USE_X86LDOUBLE
6152 CPU86_LDoubleU tmp;
6153 tmp.d = f;
6154 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6155 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6156 *(uint16_t *)(ptr + 10) = 0;
6157 *(uint32_t *)(ptr + 12) = 0;
6158 AssertCompile(sizeof(long double) > 8);
6159#else
6160# error "Busted FPU saving/restoring!"
6161 *(CPU86_LDouble *)ptr = f;
6162#endif
6163}
6164
6165#undef stw
6166#undef stl
6167#undef stq
6168#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6169#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6170#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6171
6172//*****************************************************************************
6173void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6174{
6175 int fpus, fptag, i, nb_xmm_regs;
6176 CPU86_LDouble tmp;
6177 uint8_t *addr;
6178 int data64 = !!(env->hflags & HF_LMA_MASK);
6179
6180 if (env->cpuid_features & CPUID_FXSR)
6181 {
6182 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6183 fptag = 0;
6184 for(i = 0; i < 8; i++) {
6185 fptag |= (env->fptags[i] << i);
6186 }
6187 stw(ptr, env->fpuc);
6188 stw(ptr + 2, fpus);
6189 stw(ptr + 4, fptag ^ 0xff);
6190
6191 addr = ptr + 0x20;
6192 for(i = 0;i < 8; i++) {
6193 tmp = ST(i);
6194 helper_fstt_raw(tmp, addr);
6195 addr += 16;
6196 }
6197
6198 if (env->cr[4] & CR4_OSFXSR_MASK) {
6199 /* XXX: finish it */
6200 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6201 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6202 nb_xmm_regs = 8 << data64;
6203 addr = ptr + 0xa0;
6204 for(i = 0; i < nb_xmm_regs; i++) {
6205#if __GNUC__ < 4
6206 stq(addr, env->xmm_regs[i].XMM_Q(0));
6207 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6208#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6209 stl(addr, env->xmm_regs[i].XMM_L(0));
6210 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6211 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6212 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6213#endif
6214 addr += 16;
6215 }
6216 }
6217 }
6218 else
6219 {
6220 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6221 int fptag;
6222
6223 fp->FCW = env->fpuc;
6224 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6225 fptag = 0;
6226 for (i=7; i>=0; i--) {
6227 fptag <<= 2;
6228 if (env->fptags[i]) {
6229 fptag |= 3;
6230 } else {
6231 /* the FPU automatically computes it */
6232 }
6233 }
6234 fp->FTW = fptag;
6235
6236 for(i = 0;i < 8; i++) {
6237 tmp = ST(i);
6238 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6239 }
6240 }
6241}
6242
6243//*****************************************************************************
6244#undef lduw
6245#undef ldl
6246#undef ldq
6247#define lduw(a) *(uint16_t *)(a)
6248#define ldl(a) *(uint32_t *)(a)
6249#define ldq(a) *(uint64_t *)(a)
6250//*****************************************************************************
6251void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6252{
6253 int i, fpus, fptag, nb_xmm_regs;
6254 CPU86_LDouble tmp;
6255 uint8_t *addr;
6256 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6257
6258 if (env->cpuid_features & CPUID_FXSR)
6259 {
6260 env->fpuc = lduw(ptr);
6261 fpus = lduw(ptr + 2);
6262 fptag = lduw(ptr + 4);
6263 env->fpstt = (fpus >> 11) & 7;
6264 env->fpus = fpus & ~0x3800;
6265 fptag ^= 0xff;
6266 for(i = 0;i < 8; i++) {
6267 env->fptags[i] = ((fptag >> i) & 1);
6268 }
6269
6270 addr = ptr + 0x20;
6271 for(i = 0;i < 8; i++) {
6272 tmp = helper_fldt_raw(addr);
6273 ST(i) = tmp;
6274 addr += 16;
6275 }
6276
6277 if (env->cr[4] & CR4_OSFXSR_MASK) {
6278 /* XXX: finish it, endianness */
6279 env->mxcsr = ldl(ptr + 0x18);
6280 //ldl(ptr + 0x1c);
6281 nb_xmm_regs = 8 << data64;
6282 addr = ptr + 0xa0;
6283 for(i = 0; i < nb_xmm_regs; i++) {
6284#if HC_ARCH_BITS == 32
6285 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6286 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6287 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6288 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6289 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6290#else
6291 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6292 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6293#endif
6294 addr += 16;
6295 }
6296 }
6297 }
6298 else
6299 {
6300 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6301 int fptag, j;
6302
6303 env->fpuc = fp->FCW;
6304 env->fpstt = (fp->FSW >> 11) & 7;
6305 env->fpus = fp->FSW & ~0x3800;
6306 fptag = fp->FTW;
6307 for(i = 0;i < 8; i++) {
6308 env->fptags[i] = ((fptag & 3) == 3);
6309 fptag >>= 2;
6310 }
6311 j = env->fpstt;
6312 for(i = 0;i < 8; i++) {
6313 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6314 ST(i) = tmp;
6315 }
6316 }
6317}
6318//*****************************************************************************
6319//*****************************************************************************
6320
6321#endif /* VBOX */
6322
6323/* Secure Virtual Machine helpers */
6324
6325#if defined(CONFIG_USER_ONLY)
6326
6327void helper_vmrun(int aflag, int next_eip_addend)
6328{
6329}
6330void helper_vmmcall(void)
6331{
6332}
6333void helper_vmload(int aflag)
6334{
6335}
6336void helper_vmsave(int aflag)
6337{
6338}
6339void helper_stgi(void)
6340{
6341}
6342void helper_clgi(void)
6343{
6344}
6345void helper_skinit(void)
6346{
6347}
6348void helper_invlpga(int aflag)
6349{
6350}
6351void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6352{
6353}
6354void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6355{
6356}
6357
6358void helper_svm_check_io(uint32_t port, uint32_t param,
6359 uint32_t next_eip_addend)
6360{
6361}
6362#else
6363
6364static inline void svm_save_seg(target_phys_addr_t addr,
6365 const SegmentCache *sc)
6366{
6367 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6368 sc->selector);
6369 stq_phys(addr + offsetof(struct vmcb_seg, base),
6370 sc->base);
6371 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6372 sc->limit);
6373 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6374 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6375}
6376
6377static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6378{
6379 unsigned int flags;
6380
6381 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6382 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6383 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6384 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6385 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6386}
6387
6388static inline void svm_load_seg_cache(target_phys_addr_t addr,
6389 CPUState *env, int seg_reg)
6390{
6391 SegmentCache sc1, *sc = &sc1;
6392 svm_load_seg(addr, sc);
6393 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6394 sc->base, sc->limit, sc->flags);
6395}
6396
6397void helper_vmrun(int aflag, int next_eip_addend)
6398{
6399 target_ulong addr;
6400 uint32_t event_inj;
6401 uint32_t int_ctl;
6402
6403 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6404
6405 if (aflag == 2)
6406 addr = EAX;
6407 else
6408 addr = (uint32_t)EAX;
6409
6410 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6411
6412 env->vm_vmcb = addr;
6413
6414 /* save the current CPU state in the hsave page */
6415 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6416 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6417
6418 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6419 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6420
6421 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6422 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6423 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6424 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6425 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6426 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6427
6428 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6429 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6430
6431 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6432 &env->segs[R_ES]);
6433 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6434 &env->segs[R_CS]);
6435 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6436 &env->segs[R_SS]);
6437 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6438 &env->segs[R_DS]);
6439
6440 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6441 EIP + next_eip_addend);
6442 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6443 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6444
6445 /* load the interception bitmaps so we do not need to access the
6446 vmcb in svm mode */
6447 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6448 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6449 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6450 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6451 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6452 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6453
6454 /* enable intercepts */
6455 env->hflags |= HF_SVMI_MASK;
6456
6457 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6458
6459 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6460 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6461
6462 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6463 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6464
6465 /* clear exit_info_2 so we behave like the real hardware */
6466 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6467
6468 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6469 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6470 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6471 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6472 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6473 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6474 if (int_ctl & V_INTR_MASKING_MASK) {
6475 env->v_tpr = int_ctl & V_TPR_MASK;
6476 env->hflags2 |= HF2_VINTR_MASK;
6477 if (env->eflags & IF_MASK)
6478 env->hflags2 |= HF2_HIF_MASK;
6479 }
6480
6481 cpu_load_efer(env,
6482 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6483 env->eflags = 0;
6484 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6485 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6486 CC_OP = CC_OP_EFLAGS;
6487
6488 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6489 env, R_ES);
6490 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6491 env, R_CS);
6492 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6493 env, R_SS);
6494 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6495 env, R_DS);
6496
6497 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6498 env->eip = EIP;
6499 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6500 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6501 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6502 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6503 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6504
6505 /* FIXME: guest state consistency checks */
6506
6507 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6508 case TLB_CONTROL_DO_NOTHING:
6509 break;
6510 case TLB_CONTROL_FLUSH_ALL_ASID:
6511 /* FIXME: this is not 100% correct but should work for now */
6512 tlb_flush(env, 1);
6513 break;
6514 }
6515
6516 env->hflags2 |= HF2_GIF_MASK;
6517
6518 if (int_ctl & V_IRQ_MASK) {
6519 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6520 }
6521
6522 /* maybe we need to inject an event */
6523 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6524 if (event_inj & SVM_EVTINJ_VALID) {
6525 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6526 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6527 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6528
6529 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6530 /* FIXME: need to implement valid_err */
6531 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6532 case SVM_EVTINJ_TYPE_INTR:
6533 env->exception_index = vector;
6534 env->error_code = event_inj_err;
6535 env->exception_is_int = 0;
6536 env->exception_next_eip = -1;
6537 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6538 /* XXX: is it always correct ? */
6539 do_interrupt(vector, 0, 0, 0, 1);
6540 break;
6541 case SVM_EVTINJ_TYPE_NMI:
6542 env->exception_index = EXCP02_NMI;
6543 env->error_code = event_inj_err;
6544 env->exception_is_int = 0;
6545 env->exception_next_eip = EIP;
6546 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6547 cpu_loop_exit();
6548 break;
6549 case SVM_EVTINJ_TYPE_EXEPT:
6550 env->exception_index = vector;
6551 env->error_code = event_inj_err;
6552 env->exception_is_int = 0;
6553 env->exception_next_eip = -1;
6554 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6555 cpu_loop_exit();
6556 break;
6557 case SVM_EVTINJ_TYPE_SOFT:
6558 env->exception_index = vector;
6559 env->error_code = event_inj_err;
6560 env->exception_is_int = 1;
6561 env->exception_next_eip = EIP;
6562 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6563 cpu_loop_exit();
6564 break;
6565 }
6566 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6567 }
6568}
6569
6570void helper_vmmcall(void)
6571{
6572 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6573 raise_exception(EXCP06_ILLOP);
6574}
6575
6576void helper_vmload(int aflag)
6577{
6578 target_ulong addr;
6579 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6580
6581 if (aflag == 2)
6582 addr = EAX;
6583 else
6584 addr = (uint32_t)EAX;
6585
6586 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6587 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6588 env->segs[R_FS].base);
6589
6590 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6591 env, R_FS);
6592 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6593 env, R_GS);
6594 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6595 &env->tr);
6596 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6597 &env->ldt);
6598
6599#ifdef TARGET_X86_64
6600 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6601 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6602 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6603 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6604#endif
6605 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6606 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6607 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6608 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6609}
6610
6611void helper_vmsave(int aflag)
6612{
6613 target_ulong addr;
6614 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6615
6616 if (aflag == 2)
6617 addr = EAX;
6618 else
6619 addr = (uint32_t)EAX;
6620
6621 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6622 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6623 env->segs[R_FS].base);
6624
6625 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6626 &env->segs[R_FS]);
6627 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6628 &env->segs[R_GS]);
6629 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6630 &env->tr);
6631 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6632 &env->ldt);
6633
6634#ifdef TARGET_X86_64
6635 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6636 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6637 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6638 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6639#endif
6640 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6641 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6642 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6643 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6644}
6645
6646void helper_stgi(void)
6647{
6648 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6649 env->hflags2 |= HF2_GIF_MASK;
6650}
6651
6652void helper_clgi(void)
6653{
6654 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6655 env->hflags2 &= ~HF2_GIF_MASK;
6656}
6657
6658void helper_skinit(void)
6659{
6660 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6661 /* XXX: not implemented */
6662 raise_exception(EXCP06_ILLOP);
6663}
6664
6665void helper_invlpga(int aflag)
6666{
6667 target_ulong addr;
6668 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6669
6670 if (aflag == 2)
6671 addr = EAX;
6672 else
6673 addr = (uint32_t)EAX;
6674
6675 /* XXX: could use the ASID to see if it is needed to do the
6676 flush */
6677 tlb_flush_page(env, addr);
6678}
6679
6680void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6681{
6682 if (likely(!(env->hflags & HF_SVMI_MASK)))
6683 return;
6684#ifndef VBOX
6685 switch(type) {
6686 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6687 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6688 helper_vmexit(type, param);
6689 }
6690 break;
6691 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6692 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6693 helper_vmexit(type, param);
6694 }
6695 break;
6696 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6697 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6698 helper_vmexit(type, param);
6699 }
6700 break;
6701 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6702 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6703 helper_vmexit(type, param);
6704 }
6705 break;
6706 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6707 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6708 helper_vmexit(type, param);
6709 }
6710 break;
6711 case SVM_EXIT_MSR:
6712 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6713 /* FIXME: this should be read in at vmrun (faster this way?) */
6714 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6715 uint32_t t0, t1;
6716 switch((uint32_t)ECX) {
6717 case 0 ... 0x1fff:
6718 t0 = (ECX * 2) % 8;
6719 t1 = ECX / 8;
6720 break;
6721 case 0xc0000000 ... 0xc0001fff:
6722 t0 = (8192 + ECX - 0xc0000000) * 2;
6723 t1 = (t0 / 8);
6724 t0 %= 8;
6725 break;
6726 case 0xc0010000 ... 0xc0011fff:
6727 t0 = (16384 + ECX - 0xc0010000) * 2;
6728 t1 = (t0 / 8);
6729 t0 %= 8;
6730 break;
6731 default:
6732 helper_vmexit(type, param);
6733 t0 = 0;
6734 t1 = 0;
6735 break;
6736 }
6737 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6738 helper_vmexit(type, param);
6739 }
6740 break;
6741 default:
6742 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6743 helper_vmexit(type, param);
6744 }
6745 break;
6746 }
6747#else /* VBOX */
6748 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6749#endif /* VBOX */
6750}
6751
6752void helper_svm_check_io(uint32_t port, uint32_t param,
6753 uint32_t next_eip_addend)
6754{
6755 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6756 /* FIXME: this should be read in at vmrun (faster this way?) */
6757 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6758 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6759 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6760 /* next EIP */
6761 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6762 env->eip + next_eip_addend);
6763 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6764 }
6765 }
6766}
6767
6768/* Note: currently only 32 bits of exit_code are used */
6769void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6770{
6771 uint32_t int_ctl;
6772
6773 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6774 exit_code, exit_info_1,
6775 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6776 EIP);
6777
6778 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6779 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6780 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6781 } else {
6782 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6783 }
6784
6785 /* Save the VM state in the vmcb */
6786 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6787 &env->segs[R_ES]);
6788 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6789 &env->segs[R_CS]);
6790 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6791 &env->segs[R_SS]);
6792 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6793 &env->segs[R_DS]);
6794
6795 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6796 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6797
6798 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6799 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6800
6801 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6802 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6803 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6804 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6805 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6806
6807 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6808 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6809 int_ctl |= env->v_tpr & V_TPR_MASK;
6810 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6811 int_ctl |= V_IRQ_MASK;
6812 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6813
6814 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6815 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6816 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6817 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6818 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6819 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6820 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6821
6822 /* Reload the host state from vm_hsave */
6823 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6824 env->hflags &= ~HF_SVMI_MASK;
6825 env->intercept = 0;
6826 env->intercept_exceptions = 0;
6827 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6828 env->tsc_offset = 0;
6829
6830 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6831 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6832
6833 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6834 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6835
6836 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6837 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6838 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6839 /* we need to set the efer after the crs so the hidden flags get
6840 set properly */
6841 cpu_load_efer(env,
6842 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6843 env->eflags = 0;
6844 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6845 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6846 CC_OP = CC_OP_EFLAGS;
6847
6848 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6849 env, R_ES);
6850 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6851 env, R_CS);
6852 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6853 env, R_SS);
6854 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6855 env, R_DS);
6856
6857 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6858 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6859 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6860
6861 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6862 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6863
6864 /* other setups */
6865 cpu_x86_set_cpl(env, 0);
6866 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6867 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6868
6869 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6870 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6871 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6872 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6873 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6874
6875 env->hflags2 &= ~HF2_GIF_MASK;
6876 /* FIXME: Resets the current ASID register to zero (host ASID). */
6877
6878 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6879
6880 /* Clears the TSC_OFFSET inside the processor. */
6881
6882 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6883 from the page table indicated the host's CR3. If the PDPEs contain
6884 illegal state, the processor causes a shutdown. */
6885
6886 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6887 env->cr[0] |= CR0_PE_MASK;
6888 env->eflags &= ~VM_MASK;
6889
6890 /* Disables all breakpoints in the host DR7 register. */
6891
6892 /* Checks the reloaded host state for consistency. */
6893
6894 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6895 host's code segment or non-canonical (in the case of long mode), a
6896 #GP fault is delivered inside the host.) */
6897
6898 /* remove any pending exception */
6899 env->exception_index = -1;
6900 env->error_code = 0;
6901 env->old_exception = -1;
6902
6903 cpu_loop_exit();
6904}
6905
6906#endif
6907
6908/* MMX/SSE */
6909/* XXX: optimize by storing fptt and fptags in the static cpu state */
6910void helper_enter_mmx(void)
6911{
6912 env->fpstt = 0;
6913 *(uint32_t *)(env->fptags) = 0;
6914 *(uint32_t *)(env->fptags + 4) = 0;
6915}
6916
6917void helper_emms(void)
6918{
6919 /* set to empty state */
6920 *(uint32_t *)(env->fptags) = 0x01010101;
6921 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6922}
6923
6924/* XXX: suppress */
6925void helper_movq(void *d, void *s)
6926{
6927 *(uint64_t *)d = *(uint64_t *)s;
6928}
6929
6930#define SHIFT 0
6931#include "ops_sse.h"
6932
6933#define SHIFT 1
6934#include "ops_sse.h"
6935
6936#define SHIFT 0
6937#include "helper_template.h"
6938#undef SHIFT
6939
6940#define SHIFT 1
6941#include "helper_template.h"
6942#undef SHIFT
6943
6944#define SHIFT 2
6945#include "helper_template.h"
6946#undef SHIFT
6947
6948#ifdef TARGET_X86_64
6949
6950#define SHIFT 3
6951#include "helper_template.h"
6952#undef SHIFT
6953
6954#endif
6955
6956/* bit operations */
6957target_ulong helper_bsf(target_ulong t0)
6958{
6959 int count;
6960 target_ulong res;
6961
6962 res = t0;
6963 count = 0;
6964 while ((res & 1) == 0) {
6965 count++;
6966 res >>= 1;
6967 }
6968 return count;
6969}
6970
6971target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6972{
6973 int count;
6974 target_ulong res, mask;
6975
6976 if (wordsize > 0 && t0 == 0) {
6977 return wordsize;
6978 }
6979 res = t0;
6980 count = TARGET_LONG_BITS - 1;
6981 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6982 while ((res & mask) == 0) {
6983 count--;
6984 res <<= 1;
6985 }
6986 if (wordsize > 0) {
6987 return wordsize - 1 - count;
6988 }
6989 return count;
6990}
6991
6992target_ulong helper_bsr(target_ulong t0)
6993{
6994 return helper_lzcnt(t0, 0);
6995}
6996
6997static int compute_all_eflags(void)
6998{
6999 return CC_SRC;
7000}
7001
7002static int compute_c_eflags(void)
7003{
7004 return CC_SRC & CC_C;
7005}
7006
7007uint32_t helper_cc_compute_all(int op)
7008{
7009 switch (op) {
7010 default: /* should never happen */ return 0;
7011
7012 case CC_OP_EFLAGS: return compute_all_eflags();
7013
7014 case CC_OP_MULB: return compute_all_mulb();
7015 case CC_OP_MULW: return compute_all_mulw();
7016 case CC_OP_MULL: return compute_all_mull();
7017
7018 case CC_OP_ADDB: return compute_all_addb();
7019 case CC_OP_ADDW: return compute_all_addw();
7020 case CC_OP_ADDL: return compute_all_addl();
7021
7022 case CC_OP_ADCB: return compute_all_adcb();
7023 case CC_OP_ADCW: return compute_all_adcw();
7024 case CC_OP_ADCL: return compute_all_adcl();
7025
7026 case CC_OP_SUBB: return compute_all_subb();
7027 case CC_OP_SUBW: return compute_all_subw();
7028 case CC_OP_SUBL: return compute_all_subl();
7029
7030 case CC_OP_SBBB: return compute_all_sbbb();
7031 case CC_OP_SBBW: return compute_all_sbbw();
7032 case CC_OP_SBBL: return compute_all_sbbl();
7033
7034 case CC_OP_LOGICB: return compute_all_logicb();
7035 case CC_OP_LOGICW: return compute_all_logicw();
7036 case CC_OP_LOGICL: return compute_all_logicl();
7037
7038 case CC_OP_INCB: return compute_all_incb();
7039 case CC_OP_INCW: return compute_all_incw();
7040 case CC_OP_INCL: return compute_all_incl();
7041
7042 case CC_OP_DECB: return compute_all_decb();
7043 case CC_OP_DECW: return compute_all_decw();
7044 case CC_OP_DECL: return compute_all_decl();
7045
7046 case CC_OP_SHLB: return compute_all_shlb();
7047 case CC_OP_SHLW: return compute_all_shlw();
7048 case CC_OP_SHLL: return compute_all_shll();
7049
7050 case CC_OP_SARB: return compute_all_sarb();
7051 case CC_OP_SARW: return compute_all_sarw();
7052 case CC_OP_SARL: return compute_all_sarl();
7053
7054#ifdef TARGET_X86_64
7055 case CC_OP_MULQ: return compute_all_mulq();
7056
7057 case CC_OP_ADDQ: return compute_all_addq();
7058
7059 case CC_OP_ADCQ: return compute_all_adcq();
7060
7061 case CC_OP_SUBQ: return compute_all_subq();
7062
7063 case CC_OP_SBBQ: return compute_all_sbbq();
7064
7065 case CC_OP_LOGICQ: return compute_all_logicq();
7066
7067 case CC_OP_INCQ: return compute_all_incq();
7068
7069 case CC_OP_DECQ: return compute_all_decq();
7070
7071 case CC_OP_SHLQ: return compute_all_shlq();
7072
7073 case CC_OP_SARQ: return compute_all_sarq();
7074#endif
7075 }
7076}
7077
7078uint32_t helper_cc_compute_c(int op)
7079{
7080 switch (op) {
7081 default: /* should never happen */ return 0;
7082
7083 case CC_OP_EFLAGS: return compute_c_eflags();
7084
7085 case CC_OP_MULB: return compute_c_mull();
7086 case CC_OP_MULW: return compute_c_mull();
7087 case CC_OP_MULL: return compute_c_mull();
7088
7089 case CC_OP_ADDB: return compute_c_addb();
7090 case CC_OP_ADDW: return compute_c_addw();
7091 case CC_OP_ADDL: return compute_c_addl();
7092
7093 case CC_OP_ADCB: return compute_c_adcb();
7094 case CC_OP_ADCW: return compute_c_adcw();
7095 case CC_OP_ADCL: return compute_c_adcl();
7096
7097 case CC_OP_SUBB: return compute_c_subb();
7098 case CC_OP_SUBW: return compute_c_subw();
7099 case CC_OP_SUBL: return compute_c_subl();
7100
7101 case CC_OP_SBBB: return compute_c_sbbb();
7102 case CC_OP_SBBW: return compute_c_sbbw();
7103 case CC_OP_SBBL: return compute_c_sbbl();
7104
7105 case CC_OP_LOGICB: return compute_c_logicb();
7106 case CC_OP_LOGICW: return compute_c_logicw();
7107 case CC_OP_LOGICL: return compute_c_logicl();
7108
7109 case CC_OP_INCB: return compute_c_incl();
7110 case CC_OP_INCW: return compute_c_incl();
7111 case CC_OP_INCL: return compute_c_incl();
7112
7113 case CC_OP_DECB: return compute_c_incl();
7114 case CC_OP_DECW: return compute_c_incl();
7115 case CC_OP_DECL: return compute_c_incl();
7116
7117 case CC_OP_SHLB: return compute_c_shlb();
7118 case CC_OP_SHLW: return compute_c_shlw();
7119 case CC_OP_SHLL: return compute_c_shll();
7120
7121 case CC_OP_SARB: return compute_c_sarl();
7122 case CC_OP_SARW: return compute_c_sarl();
7123 case CC_OP_SARL: return compute_c_sarl();
7124
7125#ifdef TARGET_X86_64
7126 case CC_OP_MULQ: return compute_c_mull();
7127
7128 case CC_OP_ADDQ: return compute_c_addq();
7129
7130 case CC_OP_ADCQ: return compute_c_adcq();
7131
7132 case CC_OP_SUBQ: return compute_c_subq();
7133
7134 case CC_OP_SBBQ: return compute_c_sbbq();
7135
7136 case CC_OP_LOGICQ: return compute_c_logicq();
7137
7138 case CC_OP_INCQ: return compute_c_incl();
7139
7140 case CC_OP_DECQ: return compute_c_incl();
7141
7142 case CC_OP_SHLQ: return compute_c_shlq();
7143
7144 case CC_OP_SARQ: return compute_c_sarl();
7145#endif
7146 }
7147}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette