VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/op_helper.c@ 16340

Last change on this file since 16340 was 16340, checked in by vboxsync, 16 years ago

target-i386/op_helper.c: fixed stack alignment check in helper_external_event so it works on 64-bit darwin as well.

File size: 201.7 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176
177 /* According to AMD manual, should be read with IOPL == 3 */
178 eflags |= (3 << IOPL_SHIFT);
179
180 /* We only use helper_read_eflags_vme() in 16-bits mode */
181 return eflags & 0xffff;
182}
183
184void helper_dump_state()
185{
186 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
187 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
188 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
189 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
190 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
191 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
192 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
193}
194#endif
195
196/* return non zero if error */
197#ifndef VBOX
198static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#else /* VBOX */
200DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
201#endif /* VBOX */
202 int selector)
203{
204 SegmentCache *dt;
205 int index;
206 target_ulong ptr;
207
208#ifdef VBOX
209 /* Trying to load a selector with CPL=1? */
210 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
211 {
212 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
213 selector = selector & 0xfffc;
214 }
215#endif
216
217 if (selector & 0x4)
218 dt = &env->ldt;
219 else
220 dt = &env->gdt;
221 index = selector & ~7;
222 if ((index + 7) > dt->limit)
223 return -1;
224 ptr = dt->base + index;
225 *e1_ptr = ldl_kernel(ptr);
226 *e2_ptr = ldl_kernel(ptr + 4);
227 return 0;
228}
229
230#ifndef VBOX
231static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
232#else /* VBOX */
233DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
234#endif /* VBOX */
235{
236 unsigned int limit;
237 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
238 if (e2 & DESC_G_MASK)
239 limit = (limit << 12) | 0xfff;
240 return limit;
241}
242
243#ifndef VBOX
244static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
245#else /* VBOX */
246DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
247#endif /* VBOX */
248{
249 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
250}
251
252#ifndef VBOX
253static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#else /* VBOX */
255DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
256#endif /* VBOX */
257{
258 sc->base = get_seg_base(e1, e2);
259 sc->limit = get_seg_limit(e1, e2);
260 sc->flags = e2;
261}
262
263/* init the segment cache in vm86 mode. */
264#ifndef VBOX
265static inline void load_seg_vm(int seg, int selector)
266#else /* VBOX */
267DECLINLINE(void) load_seg_vm(int seg, int selector)
268#endif /* VBOX */
269{
270 selector &= 0xffff;
271#ifdef VBOX
272 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
273
274 if (seg == R_CS)
275 flags |= DESC_CS_MASK;
276
277 cpu_x86_load_seg_cache(env, seg, selector,
278 (selector << 4), 0xffff, flags);
279#else
280 cpu_x86_load_seg_cache(env, seg, selector,
281 (selector << 4), 0xffff, 0);
282#endif
283}
284
285#ifndef VBOX
286static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
287#else /* VBOX */
288DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
289#endif /* VBOX */
290 uint32_t *esp_ptr, int dpl)
291{
292#ifndef VBOX
293 int type, index, shift;
294#else
295 unsigned int type, index, shift;
296#endif
297
298#if 0
299 {
300 int i;
301 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
302 for(i=0;i<env->tr.limit;i++) {
303 printf("%02x ", env->tr.base[i]);
304 if ((i & 7) == 7) printf("\n");
305 }
306 printf("\n");
307 }
308#endif
309
310 if (!(env->tr.flags & DESC_P_MASK))
311 cpu_abort(env, "invalid tss");
312 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if ((type & 7) != 1)
314 cpu_abort(env, "invalid tss type");
315 shift = type >> 3;
316 index = (dpl * 4 + 2) << shift;
317 if (index + (4 << shift) - 1 > env->tr.limit)
318 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
319 if (shift == 0) {
320 *esp_ptr = lduw_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
322 } else {
323 *esp_ptr = ldl_kernel(env->tr.base + index);
324 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
325 }
326}
327
328/* XXX: merge with load_seg() */
329static void tss_load_seg(int seg_reg, int selector)
330{
331 uint32_t e1, e2;
332 int rpl, dpl, cpl;
333
334#ifdef VBOX
335 e1 = e2 = 0;
336 cpl = env->hflags & HF_CPL_MASK;
337 /* Trying to load a selector with CPL=1? */
338 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
339 {
340 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
341 selector = selector & 0xfffc;
342 }
343#endif
344
345 if ((selector & 0xfffc) != 0) {
346 if (load_segment(&e1, &e2, selector) != 0)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 if (!(e2 & DESC_S_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 rpl = selector & 3;
351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
352 cpl = env->hflags & HF_CPL_MASK;
353 if (seg_reg == R_CS) {
354 if (!(e2 & DESC_CS_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* XXX: is it correct ? */
357 if (dpl != rpl)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if ((e2 & DESC_C_MASK) && dpl > rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 } else if (seg_reg == R_SS) {
362 /* SS must be writable data */
363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 if (dpl != cpl || dpl != rpl)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 } else {
368 /* not readable code */
369 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* if data or non conforming code, checks the rights */
372 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 if (dpl < cpl || dpl < rpl)
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 }
376 }
377 if (!(e2 & DESC_P_MASK))
378 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
379 cpu_x86_load_seg_cache(env, seg_reg, selector,
380 get_seg_base(e1, e2),
381 get_seg_limit(e1, e2),
382 e2);
383 } else {
384 if (seg_reg == R_SS || seg_reg == R_CS)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386 }
387}
388
389#define SWITCH_TSS_JMP 0
390#define SWITCH_TSS_IRET 1
391#define SWITCH_TSS_CALL 2
392
393/* XXX: restore CPU state in registers (PowerPC case) */
394static void switch_tss(int tss_selector,
395 uint32_t e1, uint32_t e2, int source,
396 uint32_t next_eip)
397{
398 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
399 target_ulong tss_base;
400 uint32_t new_regs[8], new_segs[6];
401 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
402 uint32_t old_eflags, eflags_mask;
403 SegmentCache *dt;
404#ifndef VBOX
405 int index;
406#else
407 unsigned int index;
408#endif
409 target_ulong ptr;
410
411 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
412#ifdef DEBUG_PCALL
413 if (loglevel & CPU_LOG_PCALL)
414 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
415#endif
416
417#if defined(VBOX) && defined(DEBUG)
418 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
419#endif
420
421 /* if task gate, we read the TSS segment and we load it */
422 if (type == 5) {
423 if (!(e2 & DESC_P_MASK))
424 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
425 tss_selector = e1 >> 16;
426 if (tss_selector & 4)
427 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
428 if (load_segment(&e1, &e2, tss_selector) != 0)
429 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
430 if (e2 & DESC_S_MASK)
431 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
432 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
433 if ((type & 7) != 1)
434 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
435 }
436
437 if (!(e2 & DESC_P_MASK))
438 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
439
440 if (type & 8)
441 tss_limit_max = 103;
442 else
443 tss_limit_max = 43;
444 tss_limit = get_seg_limit(e1, e2);
445 tss_base = get_seg_base(e1, e2);
446 if ((tss_selector & 4) != 0 ||
447 tss_limit < tss_limit_max)
448 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
449 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
450 if (old_type & 8)
451 old_tss_limit_max = 103;
452 else
453 old_tss_limit_max = 43;
454
455 /* read all the registers from the new TSS */
456 if (type & 8) {
457 /* 32 bit */
458 new_cr3 = ldl_kernel(tss_base + 0x1c);
459 new_eip = ldl_kernel(tss_base + 0x20);
460 new_eflags = ldl_kernel(tss_base + 0x24);
461 for(i = 0; i < 8; i++)
462 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
463 for(i = 0; i < 6; i++)
464 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
465 new_ldt = lduw_kernel(tss_base + 0x60);
466 new_trap = ldl_kernel(tss_base + 0x64);
467 } else {
468 /* 16 bit */
469 new_cr3 = 0;
470 new_eip = lduw_kernel(tss_base + 0x0e);
471 new_eflags = lduw_kernel(tss_base + 0x10);
472 for(i = 0; i < 8; i++)
473 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
474 for(i = 0; i < 4; i++)
475 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
476 new_ldt = lduw_kernel(tss_base + 0x2a);
477 new_segs[R_FS] = 0;
478 new_segs[R_GS] = 0;
479 new_trap = 0;
480 }
481
482 /* NOTE: we must avoid memory exceptions during the task switch,
483 so we make dummy accesses before */
484 /* XXX: it can still fail in some cases, so a bigger hack is
485 necessary to valid the TLB after having done the accesses */
486
487 v1 = ldub_kernel(env->tr.base);
488 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
489 stb_kernel(env->tr.base, v1);
490 stb_kernel(env->tr.base + old_tss_limit_max, v2);
491
492 /* clear busy bit (it is restartable) */
493 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
494 target_ulong ptr;
495 uint32_t e2;
496 ptr = env->gdt.base + (env->tr.selector & ~7);
497 e2 = ldl_kernel(ptr + 4);
498 e2 &= ~DESC_TSS_BUSY_MASK;
499 stl_kernel(ptr + 4, e2);
500 }
501 old_eflags = compute_eflags();
502 if (source == SWITCH_TSS_IRET)
503 old_eflags &= ~NT_MASK;
504
505 /* save the current state in the old TSS */
506 if (type & 8) {
507 /* 32 bit */
508 stl_kernel(env->tr.base + 0x20, next_eip);
509 stl_kernel(env->tr.base + 0x24, old_eflags);
510 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
511 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
512 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
513 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
514 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
515 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
516 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
517 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
518 for(i = 0; i < 6; i++)
519 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
520#if defined(VBOX) && defined(DEBUG)
521 printf("TSS 32 bits switch\n");
522 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
523#endif
524 } else {
525 /* 16 bit */
526 stw_kernel(env->tr.base + 0x0e, next_eip);
527 stw_kernel(env->tr.base + 0x10, old_eflags);
528 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
529 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
530 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
531 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
532 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
533 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
534 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
535 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
536 for(i = 0; i < 4; i++)
537 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
538 }
539
540 /* now if an exception occurs, it will occurs in the next task
541 context */
542
543 if (source == SWITCH_TSS_CALL) {
544 stw_kernel(tss_base, env->tr.selector);
545 new_eflags |= NT_MASK;
546 }
547
548 /* set busy bit */
549 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
550 target_ulong ptr;
551 uint32_t e2;
552 ptr = env->gdt.base + (tss_selector & ~7);
553 e2 = ldl_kernel(ptr + 4);
554 e2 |= DESC_TSS_BUSY_MASK;
555 stl_kernel(ptr + 4, e2);
556 }
557
558 /* set the new CPU state */
559 /* from this point, any exception which occurs can give problems */
560 env->cr[0] |= CR0_TS_MASK;
561 env->hflags |= HF_TS_MASK;
562 env->tr.selector = tss_selector;
563 env->tr.base = tss_base;
564 env->tr.limit = tss_limit;
565 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
566
567 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
568 cpu_x86_update_cr3(env, new_cr3);
569 }
570
571 /* load all registers without an exception, then reload them with
572 possible exception */
573 env->eip = new_eip;
574 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
575 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
576 if (!(type & 8))
577 eflags_mask &= 0xffff;
578 load_eflags(new_eflags, eflags_mask);
579 /* XXX: what to do in 16 bit case ? */
580 EAX = new_regs[0];
581 ECX = new_regs[1];
582 EDX = new_regs[2];
583 EBX = new_regs[3];
584 ESP = new_regs[4];
585 EBP = new_regs[5];
586 ESI = new_regs[6];
587 EDI = new_regs[7];
588 if (new_eflags & VM_MASK) {
589 for(i = 0; i < 6; i++)
590 load_seg_vm(i, new_segs[i]);
591 /* in vm86, CPL is always 3 */
592 cpu_x86_set_cpl(env, 3);
593 } else {
594 /* CPL is set the RPL of CS */
595 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
596 /* first just selectors as the rest may trigger exceptions */
597 for(i = 0; i < 6; i++)
598 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
599 }
600
601 env->ldt.selector = new_ldt & ~4;
602 env->ldt.base = 0;
603 env->ldt.limit = 0;
604 env->ldt.flags = 0;
605
606 /* load the LDT */
607 if (new_ldt & 4)
608 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
609
610 if ((new_ldt & 0xfffc) != 0) {
611 dt = &env->gdt;
612 index = new_ldt & ~7;
613 if ((index + 7) > dt->limit)
614 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
615 ptr = dt->base + index;
616 e1 = ldl_kernel(ptr);
617 e2 = ldl_kernel(ptr + 4);
618 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
619 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
620 if (!(e2 & DESC_P_MASK))
621 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
622 load_seg_cache_raw_dt(&env->ldt, e1, e2);
623 }
624
625 /* load the segments */
626 if (!(new_eflags & VM_MASK)) {
627 tss_load_seg(R_CS, new_segs[R_CS]);
628 tss_load_seg(R_SS, new_segs[R_SS]);
629 tss_load_seg(R_ES, new_segs[R_ES]);
630 tss_load_seg(R_DS, new_segs[R_DS]);
631 tss_load_seg(R_FS, new_segs[R_FS]);
632 tss_load_seg(R_GS, new_segs[R_GS]);
633 }
634
635 /* check that EIP is in the CS segment limits */
636 if (new_eip > env->segs[R_CS].limit) {
637 /* XXX: different exception if CALL ? */
638 raise_exception_err(EXCP0D_GPF, 0);
639 }
640}
641
642/* check if Port I/O is allowed in TSS */
643#ifndef VBOX
644static inline void check_io(int addr, int size)
645{
646 int io_offset, val, mask;
647
648#else /* VBOX */
649DECLINLINE(void) check_io(int addr, int size)
650{
651 int val, mask;
652 unsigned int io_offset;
653#endif /* VBOX */
654 /* TSS must be a valid 32 bit one */
655 if (!(env->tr.flags & DESC_P_MASK) ||
656 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
657 env->tr.limit < 103)
658 goto fail;
659 io_offset = lduw_kernel(env->tr.base + 0x66);
660 io_offset += (addr >> 3);
661 /* Note: the check needs two bytes */
662 if ((io_offset + 1) > env->tr.limit)
663 goto fail;
664 val = lduw_kernel(env->tr.base + io_offset);
665 val >>= (addr & 7);
666 mask = (1 << size) - 1;
667 /* all bits must be zero to allow the I/O */
668 if ((val & mask) != 0) {
669 fail:
670 raise_exception_err(EXCP0D_GPF, 0);
671 }
672}
673
674#ifdef VBOX
675/* Keep in sync with gen_check_external_event() */
676void helper_check_external_event()
677{
678 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
679 | CPU_INTERRUPT_EXTERNAL_TIMER
680 | CPU_INTERRUPT_EXTERNAL_DMA))
681 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
682 && (env->eflags & IF_MASK)
683 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
684 {
685 helper_external_event();
686 }
687
688}
689
690void helper_sync_seg(uint32_t reg)
691{
692 assert(env->segs[reg].newselector != 0);
693 sync_seg(env, reg, env->segs[reg].newselector);
694}
695#endif
696
697void helper_check_iob(uint32_t t0)
698{
699 check_io(t0, 1);
700}
701
702void helper_check_iow(uint32_t t0)
703{
704 check_io(t0, 2);
705}
706
707void helper_check_iol(uint32_t t0)
708{
709 check_io(t0, 4);
710}
711
712void helper_outb(uint32_t port, uint32_t data)
713{
714 cpu_outb(env, port, data & 0xff);
715}
716
717target_ulong helper_inb(uint32_t port)
718{
719 return cpu_inb(env, port);
720}
721
722void helper_outw(uint32_t port, uint32_t data)
723{
724 cpu_outw(env, port, data & 0xffff);
725}
726
727target_ulong helper_inw(uint32_t port)
728{
729 return cpu_inw(env, port);
730}
731
732void helper_outl(uint32_t port, uint32_t data)
733{
734 cpu_outl(env, port, data);
735}
736
737target_ulong helper_inl(uint32_t port)
738{
739 return cpu_inl(env, port);
740}
741
742#ifndef VBOX
743static inline unsigned int get_sp_mask(unsigned int e2)
744#else /* VBOX */
745DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
746#endif /* VBOX */
747{
748 if (e2 & DESC_B_MASK)
749 return 0xffffffff;
750 else
751 return 0xffff;
752}
753
754#ifdef TARGET_X86_64
755#define SET_ESP(val, sp_mask)\
756do {\
757 if ((sp_mask) == 0xffff)\
758 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
759 else if ((sp_mask) == 0xffffffffLL)\
760 ESP = (uint32_t)(val);\
761 else\
762 ESP = (val);\
763} while (0)
764#else
765#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
766#endif
767
768/* in 64-bit machines, this can overflow. So this segment addition macro
769 * can be used to trim the value to 32-bit whenever needed */
770#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
771
772/* XXX: add a is_user flag to have proper security support */
773#define PUSHW(ssp, sp, sp_mask, val)\
774{\
775 sp -= 2;\
776 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
777}
778
779#define PUSHL(ssp, sp, sp_mask, val)\
780{\
781 sp -= 4;\
782 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
783}
784
785#define POPW(ssp, sp, sp_mask, val)\
786{\
787 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
788 sp += 2;\
789}
790
791#define POPL(ssp, sp, sp_mask, val)\
792{\
793 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
794 sp += 4;\
795}
796
797/* protected mode interrupt */
798static void do_interrupt_protected(int intno, int is_int, int error_code,
799 unsigned int next_eip, int is_hw)
800{
801 SegmentCache *dt;
802 target_ulong ptr, ssp;
803 int type, dpl, selector, ss_dpl, cpl;
804 int has_error_code, new_stack, shift;
805 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
806 uint32_t old_eip, sp_mask;
807
808#ifdef VBOX
809 ss = ss_e1 = ss_e2 = 0;
810# ifdef VBOX_WITH_VMI
811 if ( intno == 6
812 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
813 {
814 env->exception_index = EXCP_PARAV_CALL;
815 cpu_loop_exit();
816 }
817# endif
818 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
819 cpu_loop_exit();
820#endif
821
822 has_error_code = 0;
823 if (!is_int && !is_hw) {
824 switch(intno) {
825 case 8:
826 case 10:
827 case 11:
828 case 12:
829 case 13:
830 case 14:
831 case 17:
832 has_error_code = 1;
833 break;
834 }
835 }
836 if (is_int)
837 old_eip = next_eip;
838 else
839 old_eip = env->eip;
840
841 dt = &env->idt;
842#ifndef VBOX
843 if (intno * 8 + 7 > dt->limit)
844#else
845 if ((unsigned)intno * 8 + 7 > dt->limit)
846#endif
847 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
848 ptr = dt->base + intno * 8;
849 e1 = ldl_kernel(ptr);
850 e2 = ldl_kernel(ptr + 4);
851 /* check gate type */
852 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
853 switch(type) {
854 case 5: /* task gate */
855 /* must do that check here to return the correct error code */
856 if (!(e2 & DESC_P_MASK))
857 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
858 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
859 if (has_error_code) {
860 int type;
861 uint32_t mask;
862 /* push the error code */
863 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
864 shift = type >> 3;
865 if (env->segs[R_SS].flags & DESC_B_MASK)
866 mask = 0xffffffff;
867 else
868 mask = 0xffff;
869 esp = (ESP - (2 << shift)) & mask;
870 ssp = env->segs[R_SS].base + esp;
871 if (shift)
872 stl_kernel(ssp, error_code);
873 else
874 stw_kernel(ssp, error_code);
875 SET_ESP(esp, mask);
876 }
877 return;
878 case 6: /* 286 interrupt gate */
879 case 7: /* 286 trap gate */
880 case 14: /* 386 interrupt gate */
881 case 15: /* 386 trap gate */
882 break;
883 default:
884 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
885 break;
886 }
887 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
888 cpl = env->hflags & HF_CPL_MASK;
889 /* check privilege if software int */
890 if (is_int && dpl < cpl)
891 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
892 /* check valid bit */
893 if (!(e2 & DESC_P_MASK))
894 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
895 selector = e1 >> 16;
896 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
897 if ((selector & 0xfffc) == 0)
898 raise_exception_err(EXCP0D_GPF, 0);
899
900 if (load_segment(&e1, &e2, selector) != 0)
901 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
902 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
903 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
904 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
905 if (dpl > cpl)
906 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
907 if (!(e2 & DESC_P_MASK))
908 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
909 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
910 /* to inner privilege */
911 get_ss_esp_from_tss(&ss, &esp, dpl);
912 if ((ss & 0xfffc) == 0)
913 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
914 if ((ss & 3) != dpl)
915 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
916 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
917 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
918 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
919 if (ss_dpl != dpl)
920 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
921 if (!(ss_e2 & DESC_S_MASK) ||
922 (ss_e2 & DESC_CS_MASK) ||
923 !(ss_e2 & DESC_W_MASK))
924 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925 if (!(ss_e2 & DESC_P_MASK))
926#ifdef VBOX /* See page 3-477 of 253666.pdf */
927 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
928#else
929 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
930#endif
931 new_stack = 1;
932 sp_mask = get_sp_mask(ss_e2);
933 ssp = get_seg_base(ss_e1, ss_e2);
934#if defined(VBOX) && defined(DEBUG)
935 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
936#endif
937 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
938 /* to same privilege */
939 if (env->eflags & VM_MASK)
940 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
941 new_stack = 0;
942 sp_mask = get_sp_mask(env->segs[R_SS].flags);
943 ssp = env->segs[R_SS].base;
944 esp = ESP;
945 dpl = cpl;
946 } else {
947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 new_stack = 0; /* avoid warning */
949 sp_mask = 0; /* avoid warning */
950 ssp = 0; /* avoid warning */
951 esp = 0; /* avoid warning */
952 }
953
954 shift = type >> 3;
955
956#if 0
957 /* XXX: check that enough room is available */
958 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
959 if (env->eflags & VM_MASK)
960 push_size += 8;
961 push_size <<= shift;
962#endif
963 if (shift == 1) {
964 if (new_stack) {
965 if (env->eflags & VM_MASK) {
966 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
967 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
968 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
969 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
970 }
971 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
972 PUSHL(ssp, esp, sp_mask, ESP);
973 }
974 PUSHL(ssp, esp, sp_mask, compute_eflags());
975 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
976 PUSHL(ssp, esp, sp_mask, old_eip);
977 if (has_error_code) {
978 PUSHL(ssp, esp, sp_mask, error_code);
979 }
980 } else {
981 if (new_stack) {
982 if (env->eflags & VM_MASK) {
983 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
984 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
985 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
986 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
987 }
988 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
989 PUSHW(ssp, esp, sp_mask, ESP);
990 }
991 PUSHW(ssp, esp, sp_mask, compute_eflags());
992 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
993 PUSHW(ssp, esp, sp_mask, old_eip);
994 if (has_error_code) {
995 PUSHW(ssp, esp, sp_mask, error_code);
996 }
997 }
998
999 if (new_stack) {
1000 if (env->eflags & VM_MASK) {
1001 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1002 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1003 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1004 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1005 }
1006 ss = (ss & ~3) | dpl;
1007 cpu_x86_load_seg_cache(env, R_SS, ss,
1008 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1009 }
1010 SET_ESP(esp, sp_mask);
1011
1012 selector = (selector & ~3) | dpl;
1013 cpu_x86_load_seg_cache(env, R_CS, selector,
1014 get_seg_base(e1, e2),
1015 get_seg_limit(e1, e2),
1016 e2);
1017 cpu_x86_set_cpl(env, dpl);
1018 env->eip = offset;
1019
1020 /* interrupt gate clear IF mask */
1021 if ((type & 1) == 0) {
1022 env->eflags &= ~IF_MASK;
1023 }
1024 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1025}
1026#ifdef VBOX
1027
1028/* check if VME interrupt redirection is enabled in TSS */
1029DECLINLINE(bool) is_vme_irq_redirected(int intno)
1030{
1031 unsigned int io_offset, intredir_offset;
1032 unsigned char val, mask;
1033
1034 /* TSS must be a valid 32 bit one */
1035 if (!(env->tr.flags & DESC_P_MASK) ||
1036 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1037 env->tr.limit < 103)
1038 goto fail;
1039 io_offset = lduw_kernel(env->tr.base + 0x66);
1040 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1041 if (io_offset < 0x68 + 0x20)
1042 io_offset = 0x68 + 0x20;
1043 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1044 intredir_offset = io_offset - 0x20;
1045
1046 intredir_offset += (intno >> 3);
1047 if ((intredir_offset) > env->tr.limit)
1048 goto fail;
1049
1050 val = ldub_kernel(env->tr.base + intredir_offset);
1051 mask = 1 << (unsigned char)(intno & 7);
1052
1053 /* bit set means no redirection. */
1054 if ((val & mask) != 0) {
1055 return false;
1056 }
1057 return true;
1058
1059fail:
1060 raise_exception_err(EXCP0D_GPF, 0);
1061 return true;
1062}
1063
1064/* V86 mode software interrupt with CR4.VME=1 */
1065static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1066{
1067 target_ulong ptr, ssp;
1068 int selector;
1069 uint32_t offset, esp;
1070 uint32_t old_cs, old_eflags;
1071 uint32_t iopl;
1072
1073 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1074
1075 if (!is_vme_irq_redirected(intno))
1076 {
1077 if (iopl == 3)
1078 {
1079 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1080 return;
1081 }
1082 else
1083 raise_exception_err(EXCP0D_GPF, 0);
1084 }
1085
1086 /* virtual mode idt is at linear address 0 */
1087 ptr = 0 + intno * 4;
1088 offset = lduw_kernel(ptr);
1089 selector = lduw_kernel(ptr + 2);
1090 esp = ESP;
1091 ssp = env->segs[R_SS].base;
1092 old_cs = env->segs[R_CS].selector;
1093
1094 old_eflags = compute_eflags();
1095 if (iopl < 3)
1096 {
1097 /* copy VIF into IF and set IOPL to 3 */
1098 if (env->eflags & VIF_MASK)
1099 old_eflags |= IF_MASK;
1100 else
1101 old_eflags &= ~IF_MASK;
1102
1103 old_eflags |= (3 << IOPL_SHIFT);
1104 }
1105
1106 /* XXX: use SS segment size ? */
1107 PUSHW(ssp, esp, 0xffff, old_eflags);
1108 PUSHW(ssp, esp, 0xffff, old_cs);
1109 PUSHW(ssp, esp, 0xffff, next_eip);
1110
1111 /* update processor state */
1112 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1113 env->eip = offset;
1114 env->segs[R_CS].selector = selector;
1115 env->segs[R_CS].base = (selector << 4);
1116 env->eflags &= ~(TF_MASK | RF_MASK);
1117
1118 if (iopl < 3)
1119 env->eflags &= ~VIF_MASK;
1120 else
1121 env->eflags &= ~IF_MASK;
1122}
1123#endif /* VBOX */
1124
1125#ifdef TARGET_X86_64
1126
1127#define PUSHQ(sp, val)\
1128{\
1129 sp -= 8;\
1130 stq_kernel(sp, (val));\
1131}
1132
1133#define POPQ(sp, val)\
1134{\
1135 val = ldq_kernel(sp);\
1136 sp += 8;\
1137}
1138
1139#ifndef VBOX
1140static inline target_ulong get_rsp_from_tss(int level)
1141#else /* VBOX */
1142DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1143#endif /* VBOX */
1144{
1145 int index;
1146
1147#if 0
1148 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1149 env->tr.base, env->tr.limit);
1150#endif
1151
1152 if (!(env->tr.flags & DESC_P_MASK))
1153 cpu_abort(env, "invalid tss");
1154 index = 8 * level + 4;
1155 if ((index + 7) > env->tr.limit)
1156 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1157 return ldq_kernel(env->tr.base + index);
1158}
1159
1160/* 64 bit interrupt */
1161static void do_interrupt64(int intno, int is_int, int error_code,
1162 target_ulong next_eip, int is_hw)
1163{
1164 SegmentCache *dt;
1165 target_ulong ptr;
1166 int type, dpl, selector, cpl, ist;
1167 int has_error_code, new_stack;
1168 uint32_t e1, e2, e3, ss;
1169 target_ulong old_eip, esp, offset;
1170
1171#ifdef VBOX
1172 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1173 cpu_loop_exit();
1174#endif
1175
1176 has_error_code = 0;
1177 if (!is_int && !is_hw) {
1178 switch(intno) {
1179 case 8:
1180 case 10:
1181 case 11:
1182 case 12:
1183 case 13:
1184 case 14:
1185 case 17:
1186 has_error_code = 1;
1187 break;
1188 }
1189 }
1190 if (is_int)
1191 old_eip = next_eip;
1192 else
1193 old_eip = env->eip;
1194
1195 dt = &env->idt;
1196 if (intno * 16 + 15 > dt->limit)
1197 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1198 ptr = dt->base + intno * 16;
1199 e1 = ldl_kernel(ptr);
1200 e2 = ldl_kernel(ptr + 4);
1201 e3 = ldl_kernel(ptr + 8);
1202 /* check gate type */
1203 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1204 switch(type) {
1205 case 14: /* 386 interrupt gate */
1206 case 15: /* 386 trap gate */
1207 break;
1208 default:
1209 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1210 break;
1211 }
1212 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1213 cpl = env->hflags & HF_CPL_MASK;
1214 /* check privilege if software int */
1215 if (is_int && dpl < cpl)
1216 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1217 /* check valid bit */
1218 if (!(e2 & DESC_P_MASK))
1219 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1220 selector = e1 >> 16;
1221 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1222 ist = e2 & 7;
1223 if ((selector & 0xfffc) == 0)
1224 raise_exception_err(EXCP0D_GPF, 0);
1225
1226 if (load_segment(&e1, &e2, selector) != 0)
1227 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1228 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1229 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1230 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1231 if (dpl > cpl)
1232 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1233 if (!(e2 & DESC_P_MASK))
1234 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1235 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1236 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1237 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1238 /* to inner privilege */
1239 if (ist != 0)
1240 esp = get_rsp_from_tss(ist + 3);
1241 else
1242 esp = get_rsp_from_tss(dpl);
1243 esp &= ~0xfLL; /* align stack */
1244 ss = 0;
1245 new_stack = 1;
1246 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1247 /* to same privilege */
1248 if (env->eflags & VM_MASK)
1249 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1250 new_stack = 0;
1251 if (ist != 0)
1252 esp = get_rsp_from_tss(ist + 3);
1253 else
1254 esp = ESP;
1255 esp &= ~0xfLL; /* align stack */
1256 dpl = cpl;
1257 } else {
1258 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1259 new_stack = 0; /* avoid warning */
1260 esp = 0; /* avoid warning */
1261 }
1262
1263 PUSHQ(esp, env->segs[R_SS].selector);
1264 PUSHQ(esp, ESP);
1265 PUSHQ(esp, compute_eflags());
1266 PUSHQ(esp, env->segs[R_CS].selector);
1267 PUSHQ(esp, old_eip);
1268 if (has_error_code) {
1269 PUSHQ(esp, error_code);
1270 }
1271
1272 if (new_stack) {
1273 ss = 0 | dpl;
1274 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1275 }
1276 ESP = esp;
1277
1278 selector = (selector & ~3) | dpl;
1279 cpu_x86_load_seg_cache(env, R_CS, selector,
1280 get_seg_base(e1, e2),
1281 get_seg_limit(e1, e2),
1282 e2);
1283 cpu_x86_set_cpl(env, dpl);
1284 env->eip = offset;
1285
1286 /* interrupt gate clear IF mask */
1287 if ((type & 1) == 0) {
1288 env->eflags &= ~IF_MASK;
1289 }
1290 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1291}
1292#endif
1293
1294#if defined(CONFIG_USER_ONLY)
1295void helper_syscall(int next_eip_addend)
1296{
1297 env->exception_index = EXCP_SYSCALL;
1298 env->exception_next_eip = env->eip + next_eip_addend;
1299 cpu_loop_exit();
1300}
1301#else
1302void helper_syscall(int next_eip_addend)
1303{
1304 int selector;
1305
1306 if (!(env->efer & MSR_EFER_SCE)) {
1307 raise_exception_err(EXCP06_ILLOP, 0);
1308 }
1309 selector = (env->star >> 32) & 0xffff;
1310#ifdef TARGET_X86_64
1311 if (env->hflags & HF_LMA_MASK) {
1312 int code64;
1313
1314 ECX = env->eip + next_eip_addend;
1315 env->regs[11] = compute_eflags();
1316
1317 code64 = env->hflags & HF_CS64_MASK;
1318
1319 cpu_x86_set_cpl(env, 0);
1320 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1321 0, 0xffffffff,
1322 DESC_G_MASK | DESC_P_MASK |
1323 DESC_S_MASK |
1324 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1325 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1326 0, 0xffffffff,
1327 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1328 DESC_S_MASK |
1329 DESC_W_MASK | DESC_A_MASK);
1330 env->eflags &= ~env->fmask;
1331 load_eflags(env->eflags, 0);
1332 if (code64)
1333 env->eip = env->lstar;
1334 else
1335 env->eip = env->cstar;
1336 } else
1337#endif
1338 {
1339 ECX = (uint32_t)(env->eip + next_eip_addend);
1340
1341 cpu_x86_set_cpl(env, 0);
1342 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1343 0, 0xffffffff,
1344 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1345 DESC_S_MASK |
1346 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1347 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1348 0, 0xffffffff,
1349 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1350 DESC_S_MASK |
1351 DESC_W_MASK | DESC_A_MASK);
1352 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1353 env->eip = (uint32_t)env->star;
1354 }
1355}
1356#endif
1357
1358void helper_sysret(int dflag)
1359{
1360 int cpl, selector;
1361
1362 if (!(env->efer & MSR_EFER_SCE)) {
1363 raise_exception_err(EXCP06_ILLOP, 0);
1364 }
1365 cpl = env->hflags & HF_CPL_MASK;
1366 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1367 raise_exception_err(EXCP0D_GPF, 0);
1368 }
1369 selector = (env->star >> 48) & 0xffff;
1370#ifdef TARGET_X86_64
1371 if (env->hflags & HF_LMA_MASK) {
1372 if (dflag == 2) {
1373 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1374 0, 0xffffffff,
1375 DESC_G_MASK | DESC_P_MASK |
1376 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1377 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1378 DESC_L_MASK);
1379 env->eip = ECX;
1380 } else {
1381 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1382 0, 0xffffffff,
1383 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1384 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1385 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1386 env->eip = (uint32_t)ECX;
1387 }
1388 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1389 0, 0xffffffff,
1390 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1391 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1392 DESC_W_MASK | DESC_A_MASK);
1393 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1394 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1395 cpu_x86_set_cpl(env, 3);
1396 } else
1397#endif
1398 {
1399 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1400 0, 0xffffffff,
1401 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1402 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1403 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1404 env->eip = (uint32_t)ECX;
1405 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1406 0, 0xffffffff,
1407 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1408 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1409 DESC_W_MASK | DESC_A_MASK);
1410 env->eflags |= IF_MASK;
1411 cpu_x86_set_cpl(env, 3);
1412 }
1413#ifdef USE_KQEMU
1414 if (kqemu_is_ok(env)) {
1415 if (env->hflags & HF_LMA_MASK)
1416 CC_OP = CC_OP_EFLAGS;
1417 env->exception_index = -1;
1418 cpu_loop_exit();
1419 }
1420#endif
1421}
1422
1423#ifdef VBOX
1424/**
1425 * Checks and processes external VMM events.
1426 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1427 */
1428void helper_external_event(void)
1429{
1430#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1431 uintptr_t uSP;
1432# ifdef RT_ARCH_AMD64
1433 __asm__ __volatile__("movl %%rsp, %0" : "=r" (uSP));
1434# else
1435 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1436# endif
1437 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1438#endif
1439 /* Keep in sync with flags checked by gen_check_external_event() */
1440 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1441 {
1442 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1443 ~CPU_INTERRUPT_EXTERNAL_HARD);
1444 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1445 }
1446 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1447 {
1448 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1449 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1450 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1451 }
1452 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1453 {
1454 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1455 ~CPU_INTERRUPT_EXTERNAL_DMA);
1456 remR3DmaRun(env);
1457 }
1458 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1459 {
1460 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1461 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1462 remR3TimersRun(env);
1463 }
1464}
1465/* helper for recording call instruction addresses for later scanning */
1466void helper_record_call()
1467{
1468 if ( !(env->state & CPU_RAW_RING0)
1469 && (env->cr[0] & CR0_PG_MASK)
1470 && !(env->eflags & X86_EFL_IF))
1471 remR3RecordCall(env);
1472}
1473#endif /* VBOX */
1474
1475/* real mode interrupt */
1476static void do_interrupt_real(int intno, int is_int, int error_code,
1477 unsigned int next_eip)
1478{
1479 SegmentCache *dt;
1480 target_ulong ptr, ssp;
1481 int selector;
1482 uint32_t offset, esp;
1483 uint32_t old_cs, old_eip;
1484
1485 /* real mode (simpler !) */
1486 dt = &env->idt;
1487#ifndef VBOX
1488 if (intno * 4 + 3 > dt->limit)
1489#else
1490 if ((unsigned)intno * 4 + 3 > dt->limit)
1491#endif
1492 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1493 ptr = dt->base + intno * 4;
1494 offset = lduw_kernel(ptr);
1495 selector = lduw_kernel(ptr + 2);
1496 esp = ESP;
1497 ssp = env->segs[R_SS].base;
1498 if (is_int)
1499 old_eip = next_eip;
1500 else
1501 old_eip = env->eip;
1502 old_cs = env->segs[R_CS].selector;
1503 /* XXX: use SS segment size ? */
1504 PUSHW(ssp, esp, 0xffff, compute_eflags());
1505 PUSHW(ssp, esp, 0xffff, old_cs);
1506 PUSHW(ssp, esp, 0xffff, old_eip);
1507
1508 /* update processor state */
1509 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1510 env->eip = offset;
1511 env->segs[R_CS].selector = selector;
1512 env->segs[R_CS].base = (selector << 4);
1513 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1514}
1515
1516/* fake user mode interrupt */
1517void do_interrupt_user(int intno, int is_int, int error_code,
1518 target_ulong next_eip)
1519{
1520 SegmentCache *dt;
1521 target_ulong ptr;
1522 int dpl, cpl, shift;
1523 uint32_t e2;
1524
1525 dt = &env->idt;
1526 if (env->hflags & HF_LMA_MASK) {
1527 shift = 4;
1528 } else {
1529 shift = 3;
1530 }
1531 ptr = dt->base + (intno << shift);
1532 e2 = ldl_kernel(ptr + 4);
1533
1534 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1535 cpl = env->hflags & HF_CPL_MASK;
1536 /* check privilege if software int */
1537 if (is_int && dpl < cpl)
1538 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1539
1540 /* Since we emulate only user space, we cannot do more than
1541 exiting the emulation with the suitable exception and error
1542 code */
1543 if (is_int)
1544 EIP = next_eip;
1545}
1546
1547/*
1548 * Begin execution of an interruption. is_int is TRUE if coming from
1549 * the int instruction. next_eip is the EIP value AFTER the interrupt
1550 * instruction. It is only relevant if is_int is TRUE.
1551 */
1552void do_interrupt(int intno, int is_int, int error_code,
1553 target_ulong next_eip, int is_hw)
1554{
1555 if (loglevel & CPU_LOG_INT) {
1556 if ((env->cr[0] & CR0_PE_MASK)) {
1557 static int count;
1558 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1559 count, intno, error_code, is_int,
1560 env->hflags & HF_CPL_MASK,
1561 env->segs[R_CS].selector, EIP,
1562 (int)env->segs[R_CS].base + EIP,
1563 env->segs[R_SS].selector, ESP);
1564 if (intno == 0x0e) {
1565 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1566 } else {
1567 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1568 }
1569 fprintf(logfile, "\n");
1570 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1571#if 0
1572 {
1573 int i;
1574 uint8_t *ptr;
1575 fprintf(logfile, " code=");
1576 ptr = env->segs[R_CS].base + env->eip;
1577 for(i = 0; i < 16; i++) {
1578 fprintf(logfile, " %02x", ldub(ptr + i));
1579 }
1580 fprintf(logfile, "\n");
1581 }
1582#endif
1583 count++;
1584 }
1585 }
1586 if (env->cr[0] & CR0_PE_MASK) {
1587#ifdef TARGET_X86_64
1588 if (env->hflags & HF_LMA_MASK) {
1589 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1590 } else
1591#endif
1592 {
1593#ifdef VBOX
1594 /* int xx *, v86 code and VME enabled? */
1595 if ( (env->eflags & VM_MASK)
1596 && (env->cr[4] & CR4_VME_MASK)
1597 && is_int
1598 && !is_hw
1599 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1600 )
1601 do_soft_interrupt_vme(intno, error_code, next_eip);
1602 else
1603#endif /* VBOX */
1604 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1605 }
1606 } else {
1607 do_interrupt_real(intno, is_int, error_code, next_eip);
1608 }
1609}
1610
1611/*
1612 * Check nested exceptions and change to double or triple fault if
1613 * needed. It should only be called, if this is not an interrupt.
1614 * Returns the new exception number.
1615 */
1616static int check_exception(int intno, int *error_code)
1617{
1618 int first_contributory = env->old_exception == 0 ||
1619 (env->old_exception >= 10 &&
1620 env->old_exception <= 13);
1621 int second_contributory = intno == 0 ||
1622 (intno >= 10 && intno <= 13);
1623
1624 if (loglevel & CPU_LOG_INT)
1625 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1626 env->old_exception, intno);
1627
1628 if (env->old_exception == EXCP08_DBLE)
1629 cpu_abort(env, "triple fault");
1630
1631 if ((first_contributory && second_contributory)
1632 || (env->old_exception == EXCP0E_PAGE &&
1633 (second_contributory || (intno == EXCP0E_PAGE)))) {
1634 intno = EXCP08_DBLE;
1635 *error_code = 0;
1636 }
1637
1638 if (second_contributory || (intno == EXCP0E_PAGE) ||
1639 (intno == EXCP08_DBLE))
1640 env->old_exception = intno;
1641
1642 return intno;
1643}
1644
1645/*
1646 * Signal an interruption. It is executed in the main CPU loop.
1647 * is_int is TRUE if coming from the int instruction. next_eip is the
1648 * EIP value AFTER the interrupt instruction. It is only relevant if
1649 * is_int is TRUE.
1650 */
1651void raise_interrupt(int intno, int is_int, int error_code,
1652 int next_eip_addend)
1653{
1654#if defined(VBOX) && defined(DEBUG)
1655 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1656#endif
1657 if (!is_int) {
1658 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1659 intno = check_exception(intno, &error_code);
1660 } else {
1661 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1662 }
1663
1664 env->exception_index = intno;
1665 env->error_code = error_code;
1666 env->exception_is_int = is_int;
1667 env->exception_next_eip = env->eip + next_eip_addend;
1668 cpu_loop_exit();
1669}
1670
1671/* shortcuts to generate exceptions */
1672
1673void (raise_exception_err)(int exception_index, int error_code)
1674{
1675 raise_interrupt(exception_index, 0, error_code, 0);
1676}
1677
1678void raise_exception(int exception_index)
1679{
1680 raise_interrupt(exception_index, 0, 0, 0);
1681}
1682
1683/* SMM support */
1684
1685#if defined(CONFIG_USER_ONLY)
1686
1687void do_smm_enter(void)
1688{
1689}
1690
1691void helper_rsm(void)
1692{
1693}
1694
1695#else
1696
1697#ifdef TARGET_X86_64
1698#define SMM_REVISION_ID 0x00020064
1699#else
1700#define SMM_REVISION_ID 0x00020000
1701#endif
1702
1703void do_smm_enter(void)
1704{
1705 target_ulong sm_state;
1706 SegmentCache *dt;
1707 int i, offset;
1708
1709 if (loglevel & CPU_LOG_INT) {
1710 fprintf(logfile, "SMM: enter\n");
1711 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1712 }
1713
1714 env->hflags |= HF_SMM_MASK;
1715 cpu_smm_update(env);
1716
1717 sm_state = env->smbase + 0x8000;
1718
1719#ifdef TARGET_X86_64
1720 for(i = 0; i < 6; i++) {
1721 dt = &env->segs[i];
1722 offset = 0x7e00 + i * 16;
1723 stw_phys(sm_state + offset, dt->selector);
1724 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1725 stl_phys(sm_state + offset + 4, dt->limit);
1726 stq_phys(sm_state + offset + 8, dt->base);
1727 }
1728
1729 stq_phys(sm_state + 0x7e68, env->gdt.base);
1730 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1731
1732 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1733 stq_phys(sm_state + 0x7e78, env->ldt.base);
1734 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1735 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1736
1737 stq_phys(sm_state + 0x7e88, env->idt.base);
1738 stl_phys(sm_state + 0x7e84, env->idt.limit);
1739
1740 stw_phys(sm_state + 0x7e90, env->tr.selector);
1741 stq_phys(sm_state + 0x7e98, env->tr.base);
1742 stl_phys(sm_state + 0x7e94, env->tr.limit);
1743 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1744
1745 stq_phys(sm_state + 0x7ed0, env->efer);
1746
1747 stq_phys(sm_state + 0x7ff8, EAX);
1748 stq_phys(sm_state + 0x7ff0, ECX);
1749 stq_phys(sm_state + 0x7fe8, EDX);
1750 stq_phys(sm_state + 0x7fe0, EBX);
1751 stq_phys(sm_state + 0x7fd8, ESP);
1752 stq_phys(sm_state + 0x7fd0, EBP);
1753 stq_phys(sm_state + 0x7fc8, ESI);
1754 stq_phys(sm_state + 0x7fc0, EDI);
1755 for(i = 8; i < 16; i++)
1756 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1757 stq_phys(sm_state + 0x7f78, env->eip);
1758 stl_phys(sm_state + 0x7f70, compute_eflags());
1759 stl_phys(sm_state + 0x7f68, env->dr[6]);
1760 stl_phys(sm_state + 0x7f60, env->dr[7]);
1761
1762 stl_phys(sm_state + 0x7f48, env->cr[4]);
1763 stl_phys(sm_state + 0x7f50, env->cr[3]);
1764 stl_phys(sm_state + 0x7f58, env->cr[0]);
1765
1766 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1767 stl_phys(sm_state + 0x7f00, env->smbase);
1768#else
1769 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1770 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1771 stl_phys(sm_state + 0x7ff4, compute_eflags());
1772 stl_phys(sm_state + 0x7ff0, env->eip);
1773 stl_phys(sm_state + 0x7fec, EDI);
1774 stl_phys(sm_state + 0x7fe8, ESI);
1775 stl_phys(sm_state + 0x7fe4, EBP);
1776 stl_phys(sm_state + 0x7fe0, ESP);
1777 stl_phys(sm_state + 0x7fdc, EBX);
1778 stl_phys(sm_state + 0x7fd8, EDX);
1779 stl_phys(sm_state + 0x7fd4, ECX);
1780 stl_phys(sm_state + 0x7fd0, EAX);
1781 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1782 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1783
1784 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1785 stl_phys(sm_state + 0x7f64, env->tr.base);
1786 stl_phys(sm_state + 0x7f60, env->tr.limit);
1787 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1788
1789 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1790 stl_phys(sm_state + 0x7f80, env->ldt.base);
1791 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1792 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1793
1794 stl_phys(sm_state + 0x7f74, env->gdt.base);
1795 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1796
1797 stl_phys(sm_state + 0x7f58, env->idt.base);
1798 stl_phys(sm_state + 0x7f54, env->idt.limit);
1799
1800 for(i = 0; i < 6; i++) {
1801 dt = &env->segs[i];
1802 if (i < 3)
1803 offset = 0x7f84 + i * 12;
1804 else
1805 offset = 0x7f2c + (i - 3) * 12;
1806 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1807 stl_phys(sm_state + offset + 8, dt->base);
1808 stl_phys(sm_state + offset + 4, dt->limit);
1809 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1810 }
1811 stl_phys(sm_state + 0x7f14, env->cr[4]);
1812
1813 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1814 stl_phys(sm_state + 0x7ef8, env->smbase);
1815#endif
1816 /* init SMM cpu state */
1817
1818#ifdef TARGET_X86_64
1819 cpu_load_efer(env, 0);
1820#endif
1821 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1822 env->eip = 0x00008000;
1823 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1824 0xffffffff, 0);
1825 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1826 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1827 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1828 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1829 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1830
1831 cpu_x86_update_cr0(env,
1832 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1833 cpu_x86_update_cr4(env, 0);
1834 env->dr[7] = 0x00000400;
1835 CC_OP = CC_OP_EFLAGS;
1836}
1837
1838void helper_rsm(void)
1839{
1840#ifdef VBOX
1841 cpu_abort(env, "helper_rsm");
1842#else /* !VBOX */
1843 target_ulong sm_
1844
1845 target_ulong sm_state;
1846 int i, offset;
1847 uint32_t val;
1848
1849 sm_state = env->smbase + 0x8000;
1850#ifdef TARGET_X86_64
1851 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1852
1853 for(i = 0; i < 6; i++) {
1854 offset = 0x7e00 + i * 16;
1855 cpu_x86_load_seg_cache(env, i,
1856 lduw_phys(sm_state + offset),
1857 ldq_phys(sm_state + offset + 8),
1858 ldl_phys(sm_state + offset + 4),
1859 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1860 }
1861
1862 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1863 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1864
1865 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1866 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1867 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1868 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1869
1870 env->idt.base = ldq_phys(sm_state + 0x7e88);
1871 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1872
1873 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1874 env->tr.base = ldq_phys(sm_state + 0x7e98);
1875 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1876 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1877
1878 EAX = ldq_phys(sm_state + 0x7ff8);
1879 ECX = ldq_phys(sm_state + 0x7ff0);
1880 EDX = ldq_phys(sm_state + 0x7fe8);
1881 EBX = ldq_phys(sm_state + 0x7fe0);
1882 ESP = ldq_phys(sm_state + 0x7fd8);
1883 EBP = ldq_phys(sm_state + 0x7fd0);
1884 ESI = ldq_phys(sm_state + 0x7fc8);
1885 EDI = ldq_phys(sm_state + 0x7fc0);
1886 for(i = 8; i < 16; i++)
1887 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1888 env->eip = ldq_phys(sm_state + 0x7f78);
1889 load_eflags(ldl_phys(sm_state + 0x7f70),
1890 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1891 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1892 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1893
1894 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1895 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1896 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1897
1898 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1899 if (val & 0x20000) {
1900 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1901 }
1902#else
1903 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1904 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1905 load_eflags(ldl_phys(sm_state + 0x7ff4),
1906 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1907 env->eip = ldl_phys(sm_state + 0x7ff0);
1908 EDI = ldl_phys(sm_state + 0x7fec);
1909 ESI = ldl_phys(sm_state + 0x7fe8);
1910 EBP = ldl_phys(sm_state + 0x7fe4);
1911 ESP = ldl_phys(sm_state + 0x7fe0);
1912 EBX = ldl_phys(sm_state + 0x7fdc);
1913 EDX = ldl_phys(sm_state + 0x7fd8);
1914 ECX = ldl_phys(sm_state + 0x7fd4);
1915 EAX = ldl_phys(sm_state + 0x7fd0);
1916 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1917 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1918
1919 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1920 env->tr.base = ldl_phys(sm_state + 0x7f64);
1921 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1922 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1923
1924 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1925 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1926 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1927 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1928
1929 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1930 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1931
1932 env->idt.base = ldl_phys(sm_state + 0x7f58);
1933 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1934
1935 for(i = 0; i < 6; i++) {
1936 if (i < 3)
1937 offset = 0x7f84 + i * 12;
1938 else
1939 offset = 0x7f2c + (i - 3) * 12;
1940 cpu_x86_load_seg_cache(env, i,
1941 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1942 ldl_phys(sm_state + offset + 8),
1943 ldl_phys(sm_state + offset + 4),
1944 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1945 }
1946 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1947
1948 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1949 if (val & 0x20000) {
1950 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1951 }
1952#endif
1953 CC_OP = CC_OP_EFLAGS;
1954 env->hflags &= ~HF_SMM_MASK;
1955 cpu_smm_update(env);
1956
1957 if (loglevel & CPU_LOG_INT) {
1958 fprintf(logfile, "SMM: after RSM\n");
1959 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1960 }
1961#endif /* !VBOX */
1962}
1963
1964#endif /* !CONFIG_USER_ONLY */
1965
1966
1967/* division, flags are undefined */
1968
1969void helper_divb_AL(target_ulong t0)
1970{
1971 unsigned int num, den, q, r;
1972
1973 num = (EAX & 0xffff);
1974 den = (t0 & 0xff);
1975 if (den == 0) {
1976 raise_exception(EXCP00_DIVZ);
1977 }
1978 q = (num / den);
1979 if (q > 0xff)
1980 raise_exception(EXCP00_DIVZ);
1981 q &= 0xff;
1982 r = (num % den) & 0xff;
1983 EAX = (EAX & ~0xffff) | (r << 8) | q;
1984}
1985
1986void helper_idivb_AL(target_ulong t0)
1987{
1988 int num, den, q, r;
1989
1990 num = (int16_t)EAX;
1991 den = (int8_t)t0;
1992 if (den == 0) {
1993 raise_exception(EXCP00_DIVZ);
1994 }
1995 q = (num / den);
1996 if (q != (int8_t)q)
1997 raise_exception(EXCP00_DIVZ);
1998 q &= 0xff;
1999 r = (num % den) & 0xff;
2000 EAX = (EAX & ~0xffff) | (r << 8) | q;
2001}
2002
2003void helper_divw_AX(target_ulong t0)
2004{
2005 unsigned int num, den, q, r;
2006
2007 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2008 den = (t0 & 0xffff);
2009 if (den == 0) {
2010 raise_exception(EXCP00_DIVZ);
2011 }
2012 q = (num / den);
2013 if (q > 0xffff)
2014 raise_exception(EXCP00_DIVZ);
2015 q &= 0xffff;
2016 r = (num % den) & 0xffff;
2017 EAX = (EAX & ~0xffff) | q;
2018 EDX = (EDX & ~0xffff) | r;
2019}
2020
2021void helper_idivw_AX(target_ulong t0)
2022{
2023 int num, den, q, r;
2024
2025 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2026 den = (int16_t)t0;
2027 if (den == 0) {
2028 raise_exception(EXCP00_DIVZ);
2029 }
2030 q = (num / den);
2031 if (q != (int16_t)q)
2032 raise_exception(EXCP00_DIVZ);
2033 q &= 0xffff;
2034 r = (num % den) & 0xffff;
2035 EAX = (EAX & ~0xffff) | q;
2036 EDX = (EDX & ~0xffff) | r;
2037}
2038
2039void helper_divl_EAX(target_ulong t0)
2040{
2041 unsigned int den, r;
2042 uint64_t num, q;
2043
2044 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2045 den = t0;
2046 if (den == 0) {
2047 raise_exception(EXCP00_DIVZ);
2048 }
2049 q = (num / den);
2050 r = (num % den);
2051 if (q > 0xffffffff)
2052 raise_exception(EXCP00_DIVZ);
2053 EAX = (uint32_t)q;
2054 EDX = (uint32_t)r;
2055}
2056
2057void helper_idivl_EAX(target_ulong t0)
2058{
2059 int den, r;
2060 int64_t num, q;
2061
2062 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2063 den = t0;
2064 if (den == 0) {
2065 raise_exception(EXCP00_DIVZ);
2066 }
2067 q = (num / den);
2068 r = (num % den);
2069 if (q != (int32_t)q)
2070 raise_exception(EXCP00_DIVZ);
2071 EAX = (uint32_t)q;
2072 EDX = (uint32_t)r;
2073}
2074
2075/* bcd */
2076
2077/* XXX: exception */
2078void helper_aam(int base)
2079{
2080 int al, ah;
2081 al = EAX & 0xff;
2082 ah = al / base;
2083 al = al % base;
2084 EAX = (EAX & ~0xffff) | al | (ah << 8);
2085 CC_DST = al;
2086}
2087
2088void helper_aad(int base)
2089{
2090 int al, ah;
2091 al = EAX & 0xff;
2092 ah = (EAX >> 8) & 0xff;
2093 al = ((ah * base) + al) & 0xff;
2094 EAX = (EAX & ~0xffff) | al;
2095 CC_DST = al;
2096}
2097
2098void helper_aaa(void)
2099{
2100 int icarry;
2101 int al, ah, af;
2102 int eflags;
2103
2104 eflags = cc_table[CC_OP].compute_all();
2105 af = eflags & CC_A;
2106 al = EAX & 0xff;
2107 ah = (EAX >> 8) & 0xff;
2108
2109 icarry = (al > 0xf9);
2110 if (((al & 0x0f) > 9 ) || af) {
2111 al = (al + 6) & 0x0f;
2112 ah = (ah + 1 + icarry) & 0xff;
2113 eflags |= CC_C | CC_A;
2114 } else {
2115 eflags &= ~(CC_C | CC_A);
2116 al &= 0x0f;
2117 }
2118 EAX = (EAX & ~0xffff) | al | (ah << 8);
2119 CC_SRC = eflags;
2120 FORCE_RET();
2121}
2122
2123void helper_aas(void)
2124{
2125 int icarry;
2126 int al, ah, af;
2127 int eflags;
2128
2129 eflags = cc_table[CC_OP].compute_all();
2130 af = eflags & CC_A;
2131 al = EAX & 0xff;
2132 ah = (EAX >> 8) & 0xff;
2133
2134 icarry = (al < 6);
2135 if (((al & 0x0f) > 9 ) || af) {
2136 al = (al - 6) & 0x0f;
2137 ah = (ah - 1 - icarry) & 0xff;
2138 eflags |= CC_C | CC_A;
2139 } else {
2140 eflags &= ~(CC_C | CC_A);
2141 al &= 0x0f;
2142 }
2143 EAX = (EAX & ~0xffff) | al | (ah << 8);
2144 CC_SRC = eflags;
2145 FORCE_RET();
2146}
2147
2148void helper_daa(void)
2149{
2150 int al, af, cf;
2151 int eflags;
2152
2153 eflags = cc_table[CC_OP].compute_all();
2154 cf = eflags & CC_C;
2155 af = eflags & CC_A;
2156 al = EAX & 0xff;
2157
2158 eflags = 0;
2159 if (((al & 0x0f) > 9 ) || af) {
2160 al = (al + 6) & 0xff;
2161 eflags |= CC_A;
2162 }
2163 if ((al > 0x9f) || cf) {
2164 al = (al + 0x60) & 0xff;
2165 eflags |= CC_C;
2166 }
2167 EAX = (EAX & ~0xff) | al;
2168 /* well, speed is not an issue here, so we compute the flags by hand */
2169 eflags |= (al == 0) << 6; /* zf */
2170 eflags |= parity_table[al]; /* pf */
2171 eflags |= (al & 0x80); /* sf */
2172 CC_SRC = eflags;
2173 FORCE_RET();
2174}
2175
2176void helper_das(void)
2177{
2178 int al, al1, af, cf;
2179 int eflags;
2180
2181 eflags = cc_table[CC_OP].compute_all();
2182 cf = eflags & CC_C;
2183 af = eflags & CC_A;
2184 al = EAX & 0xff;
2185
2186 eflags = 0;
2187 al1 = al;
2188 if (((al & 0x0f) > 9 ) || af) {
2189 eflags |= CC_A;
2190 if (al < 6 || cf)
2191 eflags |= CC_C;
2192 al = (al - 6) & 0xff;
2193 }
2194 if ((al1 > 0x99) || cf) {
2195 al = (al - 0x60) & 0xff;
2196 eflags |= CC_C;
2197 }
2198 EAX = (EAX & ~0xff) | al;
2199 /* well, speed is not an issue here, so we compute the flags by hand */
2200 eflags |= (al == 0) << 6; /* zf */
2201 eflags |= parity_table[al]; /* pf */
2202 eflags |= (al & 0x80); /* sf */
2203 CC_SRC = eflags;
2204 FORCE_RET();
2205}
2206
2207void helper_into(int next_eip_addend)
2208{
2209 int eflags;
2210 eflags = cc_table[CC_OP].compute_all();
2211 if (eflags & CC_O) {
2212 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2213 }
2214}
2215
2216void helper_cmpxchg8b(target_ulong a0)
2217{
2218 uint64_t d;
2219 int eflags;
2220
2221 eflags = cc_table[CC_OP].compute_all();
2222 d = ldq(a0);
2223 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2224 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2225 eflags |= CC_Z;
2226 } else {
2227 /* always do the store */
2228 stq(a0, d);
2229 EDX = (uint32_t)(d >> 32);
2230 EAX = (uint32_t)d;
2231 eflags &= ~CC_Z;
2232 }
2233 CC_SRC = eflags;
2234}
2235
2236#ifdef TARGET_X86_64
2237void helper_cmpxchg16b(target_ulong a0)
2238{
2239 uint64_t d0, d1;
2240 int eflags;
2241
2242 if ((a0 & 0xf) != 0)
2243 raise_exception(EXCP0D_GPF);
2244 eflags = cc_table[CC_OP].compute_all();
2245 d0 = ldq(a0);
2246 d1 = ldq(a0 + 8);
2247 if (d0 == EAX && d1 == EDX) {
2248 stq(a0, EBX);
2249 stq(a0 + 8, ECX);
2250 eflags |= CC_Z;
2251 } else {
2252 /* always do the store */
2253 stq(a0, d0);
2254 stq(a0 + 8, d1);
2255 EDX = d1;
2256 EAX = d0;
2257 eflags &= ~CC_Z;
2258 }
2259 CC_SRC = eflags;
2260}
2261#endif
2262
2263void helper_single_step(void)
2264{
2265 env->dr[6] |= 0x4000;
2266 raise_exception(EXCP01_SSTP);
2267}
2268
2269void helper_cpuid(void)
2270{
2271#ifndef VBOX
2272 uint32_t index;
2273
2274 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2275
2276 index = (uint32_t)EAX;
2277 /* test if maximum index reached */
2278 if (index & 0x80000000) {
2279 if (index > env->cpuid_xlevel)
2280 index = env->cpuid_level;
2281 } else {
2282 if (index > env->cpuid_level)
2283 index = env->cpuid_level;
2284 }
2285
2286 switch(index) {
2287 case 0:
2288 EAX = env->cpuid_level;
2289 EBX = env->cpuid_vendor1;
2290 EDX = env->cpuid_vendor2;
2291 ECX = env->cpuid_vendor3;
2292 break;
2293 case 1:
2294 EAX = env->cpuid_version;
2295 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2296 ECX = env->cpuid_ext_features;
2297 EDX = env->cpuid_features;
2298 break;
2299 case 2:
2300 /* cache info: needed for Pentium Pro compatibility */
2301 EAX = 1;
2302 EBX = 0;
2303 ECX = 0;
2304 EDX = 0x2c307d;
2305 break;
2306 case 4:
2307 /* cache info: needed for Core compatibility */
2308 switch (ECX) {
2309 case 0: /* L1 dcache info */
2310 EAX = 0x0000121;
2311 EBX = 0x1c0003f;
2312 ECX = 0x000003f;
2313 EDX = 0x0000001;
2314 break;
2315 case 1: /* L1 icache info */
2316 EAX = 0x0000122;
2317 EBX = 0x1c0003f;
2318 ECX = 0x000003f;
2319 EDX = 0x0000001;
2320 break;
2321 case 2: /* L2 cache info */
2322 EAX = 0x0000143;
2323 EBX = 0x3c0003f;
2324 ECX = 0x0000fff;
2325 EDX = 0x0000001;
2326 break;
2327 default: /* end of info */
2328 EAX = 0;
2329 EBX = 0;
2330 ECX = 0;
2331 EDX = 0;
2332 break;
2333 }
2334
2335 break;
2336 case 5:
2337 /* mwait info: needed for Core compatibility */
2338 EAX = 0; /* Smallest monitor-line size in bytes */
2339 EBX = 0; /* Largest monitor-line size in bytes */
2340 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2341 EDX = 0;
2342 break;
2343 case 6:
2344 /* Thermal and Power Leaf */
2345 EAX = 0;
2346 EBX = 0;
2347 ECX = 0;
2348 EDX = 0;
2349 break;
2350 case 9:
2351 /* Direct Cache Access Information Leaf */
2352 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2353 EBX = 0;
2354 ECX = 0;
2355 EDX = 0;
2356 break;
2357 case 0xA:
2358 /* Architectural Performance Monitoring Leaf */
2359 EAX = 0;
2360 EBX = 0;
2361 ECX = 0;
2362 EDX = 0;
2363 break;
2364 case 0x80000000:
2365 EAX = env->cpuid_xlevel;
2366 EBX = env->cpuid_vendor1;
2367 EDX = env->cpuid_vendor2;
2368 ECX = env->cpuid_vendor3;
2369 break;
2370 case 0x80000001:
2371 EAX = env->cpuid_features;
2372 EBX = 0;
2373 ECX = env->cpuid_ext3_features;
2374 EDX = env->cpuid_ext2_features;
2375 break;
2376 case 0x80000002:
2377 case 0x80000003:
2378 case 0x80000004:
2379 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2380 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2381 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2382 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2383 break;
2384 case 0x80000005:
2385 /* cache info (L1 cache) */
2386 EAX = 0x01ff01ff;
2387 EBX = 0x01ff01ff;
2388 ECX = 0x40020140;
2389 EDX = 0x40020140;
2390 break;
2391 case 0x80000006:
2392 /* cache info (L2 cache) */
2393 EAX = 0;
2394 EBX = 0x42004200;
2395 ECX = 0x02008140;
2396 EDX = 0;
2397 break;
2398 case 0x80000008:
2399 /* virtual & phys address size in low 2 bytes. */
2400/* XXX: This value must match the one used in the MMU code. */
2401 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2402 /* 64 bit processor */
2403#if defined(USE_KQEMU)
2404 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2405#else
2406/* XXX: The physical address space is limited to 42 bits in exec.c. */
2407 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2408#endif
2409 } else {
2410#if defined(USE_KQEMU)
2411 EAX = 0x00000020; /* 32 bits physical */
2412#else
2413 if (env->cpuid_features & CPUID_PSE36)
2414 EAX = 0x00000024; /* 36 bits physical */
2415 else
2416 EAX = 0x00000020; /* 32 bits physical */
2417#endif
2418 }
2419 EBX = 0;
2420 ECX = 0;
2421 EDX = 0;
2422 break;
2423 case 0x8000000A:
2424 EAX = 0x00000001;
2425 EBX = 0;
2426 ECX = 0;
2427 EDX = 0;
2428 break;
2429 default:
2430 /* reserved values: zero */
2431 EAX = 0;
2432 EBX = 0;
2433 ECX = 0;
2434 EDX = 0;
2435 break;
2436 }
2437#else /* VBOX */
2438 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2439#endif /* VBOX */
2440}
2441
2442void helper_enter_level(int level, int data32, target_ulong t1)
2443{
2444 target_ulong ssp;
2445 uint32_t esp_mask, esp, ebp;
2446
2447 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2448 ssp = env->segs[R_SS].base;
2449 ebp = EBP;
2450 esp = ESP;
2451 if (data32) {
2452 /* 32 bit */
2453 esp -= 4;
2454 while (--level) {
2455 esp -= 4;
2456 ebp -= 4;
2457 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2458 }
2459 esp -= 4;
2460 stl(ssp + (esp & esp_mask), t1);
2461 } else {
2462 /* 16 bit */
2463 esp -= 2;
2464 while (--level) {
2465 esp -= 2;
2466 ebp -= 2;
2467 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2468 }
2469 esp -= 2;
2470 stw(ssp + (esp & esp_mask), t1);
2471 }
2472}
2473
2474#ifdef TARGET_X86_64
2475void helper_enter64_level(int level, int data64, target_ulong t1)
2476{
2477 target_ulong esp, ebp;
2478 ebp = EBP;
2479 esp = ESP;
2480
2481 if (data64) {
2482 /* 64 bit */
2483 esp -= 8;
2484 while (--level) {
2485 esp -= 8;
2486 ebp -= 8;
2487 stq(esp, ldq(ebp));
2488 }
2489 esp -= 8;
2490 stq(esp, t1);
2491 } else {
2492 /* 16 bit */
2493 esp -= 2;
2494 while (--level) {
2495 esp -= 2;
2496 ebp -= 2;
2497 stw(esp, lduw(ebp));
2498 }
2499 esp -= 2;
2500 stw(esp, t1);
2501 }
2502}
2503#endif
2504
2505void helper_lldt(int selector)
2506{
2507 SegmentCache *dt;
2508 uint32_t e1, e2;
2509#ifndef VBOX
2510 int index, entry_limit;
2511#else
2512 unsigned int index, entry_limit;
2513#endif
2514 target_ulong ptr;
2515
2516#ifdef VBOX
2517 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2518 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2519#endif
2520
2521 selector &= 0xffff;
2522 if ((selector & 0xfffc) == 0) {
2523 /* XXX: NULL selector case: invalid LDT */
2524 env->ldt.base = 0;
2525 env->ldt.limit = 0;
2526 } else {
2527 if (selector & 0x4)
2528 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2529 dt = &env->gdt;
2530 index = selector & ~7;
2531#ifdef TARGET_X86_64
2532 if (env->hflags & HF_LMA_MASK)
2533 entry_limit = 15;
2534 else
2535#endif
2536 entry_limit = 7;
2537 if ((index + entry_limit) > dt->limit)
2538 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2539 ptr = dt->base + index;
2540 e1 = ldl_kernel(ptr);
2541 e2 = ldl_kernel(ptr + 4);
2542 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2543 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2544 if (!(e2 & DESC_P_MASK))
2545 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2546#ifdef TARGET_X86_64
2547 if (env->hflags & HF_LMA_MASK) {
2548 uint32_t e3;
2549 e3 = ldl_kernel(ptr + 8);
2550 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2551 env->ldt.base |= (target_ulong)e3 << 32;
2552 } else
2553#endif
2554 {
2555 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2556 }
2557 }
2558 env->ldt.selector = selector;
2559#ifdef VBOX
2560 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2561 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2562#endif
2563}
2564
2565void helper_ltr(int selector)
2566{
2567 SegmentCache *dt;
2568 uint32_t e1, e2;
2569#ifndef VBOX
2570 int index, type, entry_limit;
2571#else
2572 unsigned int index;
2573 int type, entry_limit;
2574#endif
2575 target_ulong ptr;
2576
2577#ifdef VBOX
2578 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2579 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2580 env->tr.flags, (RTSEL)(selector & 0xffff)));
2581#endif
2582 selector &= 0xffff;
2583 if ((selector & 0xfffc) == 0) {
2584 /* NULL selector case: invalid TR */
2585 env->tr.base = 0;
2586 env->tr.limit = 0;
2587 env->tr.flags = 0;
2588 } else {
2589 if (selector & 0x4)
2590 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2591 dt = &env->gdt;
2592 index = selector & ~7;
2593#ifdef TARGET_X86_64
2594 if (env->hflags & HF_LMA_MASK)
2595 entry_limit = 15;
2596 else
2597#endif
2598 entry_limit = 7;
2599 if ((index + entry_limit) > dt->limit)
2600 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2601 ptr = dt->base + index;
2602 e1 = ldl_kernel(ptr);
2603 e2 = ldl_kernel(ptr + 4);
2604 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2605 if ((e2 & DESC_S_MASK) ||
2606 (type != 1 && type != 9))
2607 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2608 if (!(e2 & DESC_P_MASK))
2609 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2610#ifdef TARGET_X86_64
2611 if (env->hflags & HF_LMA_MASK) {
2612 uint32_t e3, e4;
2613 e3 = ldl_kernel(ptr + 8);
2614 e4 = ldl_kernel(ptr + 12);
2615 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2616 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2617 load_seg_cache_raw_dt(&env->tr, e1, e2);
2618 env->tr.base |= (target_ulong)e3 << 32;
2619 } else
2620#endif
2621 {
2622 load_seg_cache_raw_dt(&env->tr, e1, e2);
2623 }
2624 e2 |= DESC_TSS_BUSY_MASK;
2625 stl_kernel(ptr + 4, e2);
2626 }
2627 env->tr.selector = selector;
2628#ifdef VBOX
2629 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2630 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2631 env->tr.flags, (RTSEL)(selector & 0xffff)));
2632#endif
2633}
2634
2635/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2636void helper_load_seg(int seg_reg, int selector)
2637{
2638 uint32_t e1, e2;
2639 int cpl, dpl, rpl;
2640 SegmentCache *dt;
2641#ifndef VBOX
2642 int index;
2643#else
2644 unsigned int index;
2645#endif
2646 target_ulong ptr;
2647
2648 selector &= 0xffff;
2649 cpl = env->hflags & HF_CPL_MASK;
2650
2651#ifdef VBOX
2652 /* Trying to load a selector with CPL=1? */
2653 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2654 {
2655 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2656 selector = selector & 0xfffc;
2657 }
2658#endif
2659 if ((selector & 0xfffc) == 0) {
2660 /* null selector case */
2661 if (seg_reg == R_SS
2662#ifdef TARGET_X86_64
2663 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2664#endif
2665 )
2666 raise_exception_err(EXCP0D_GPF, 0);
2667 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2668 } else {
2669
2670 if (selector & 0x4)
2671 dt = &env->ldt;
2672 else
2673 dt = &env->gdt;
2674 index = selector & ~7;
2675 if ((index + 7) > dt->limit)
2676 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2677 ptr = dt->base + index;
2678 e1 = ldl_kernel(ptr);
2679 e2 = ldl_kernel(ptr + 4);
2680
2681 if (!(e2 & DESC_S_MASK))
2682 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2683 rpl = selector & 3;
2684 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2685 if (seg_reg == R_SS) {
2686 /* must be writable segment */
2687 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2688 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2689 if (rpl != cpl || dpl != cpl)
2690 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2691 } else {
2692 /* must be readable segment */
2693 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2694 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2695
2696 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2697 /* if not conforming code, test rights */
2698 if (dpl < cpl || dpl < rpl)
2699 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2700 }
2701 }
2702
2703 if (!(e2 & DESC_P_MASK)) {
2704 if (seg_reg == R_SS)
2705 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2706 else
2707 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2708 }
2709
2710 /* set the access bit if not already set */
2711 if (!(e2 & DESC_A_MASK)) {
2712 e2 |= DESC_A_MASK;
2713 stl_kernel(ptr + 4, e2);
2714 }
2715
2716 cpu_x86_load_seg_cache(env, seg_reg, selector,
2717 get_seg_base(e1, e2),
2718 get_seg_limit(e1, e2),
2719 e2);
2720#if 0
2721 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2722 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2723#endif
2724 }
2725}
2726
2727/* protected mode jump */
2728void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2729 int next_eip_addend)
2730{
2731 int gate_cs, type;
2732 uint32_t e1, e2, cpl, dpl, rpl, limit;
2733 target_ulong next_eip;
2734
2735#ifdef VBOX
2736 e1 = e2 = 0;
2737#endif
2738 if ((new_cs & 0xfffc) == 0)
2739 raise_exception_err(EXCP0D_GPF, 0);
2740 if (load_segment(&e1, &e2, new_cs) != 0)
2741 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2742 cpl = env->hflags & HF_CPL_MASK;
2743 if (e2 & DESC_S_MASK) {
2744 if (!(e2 & DESC_CS_MASK))
2745 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2746 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2747 if (e2 & DESC_C_MASK) {
2748 /* conforming code segment */
2749 if (dpl > cpl)
2750 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2751 } else {
2752 /* non conforming code segment */
2753 rpl = new_cs & 3;
2754 if (rpl > cpl)
2755 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2756 if (dpl != cpl)
2757 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2758 }
2759 if (!(e2 & DESC_P_MASK))
2760 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2761 limit = get_seg_limit(e1, e2);
2762 if (new_eip > limit &&
2763 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2764 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2765 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2766 get_seg_base(e1, e2), limit, e2);
2767 EIP = new_eip;
2768 } else {
2769 /* jump to call or task gate */
2770 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2771 rpl = new_cs & 3;
2772 cpl = env->hflags & HF_CPL_MASK;
2773 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2774 switch(type) {
2775 case 1: /* 286 TSS */
2776 case 9: /* 386 TSS */
2777 case 5: /* task gate */
2778 if (dpl < cpl || dpl < rpl)
2779 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2780 next_eip = env->eip + next_eip_addend;
2781 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2782 CC_OP = CC_OP_EFLAGS;
2783 break;
2784 case 4: /* 286 call gate */
2785 case 12: /* 386 call gate */
2786 if ((dpl < cpl) || (dpl < rpl))
2787 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2788 if (!(e2 & DESC_P_MASK))
2789 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2790 gate_cs = e1 >> 16;
2791 new_eip = (e1 & 0xffff);
2792 if (type == 12)
2793 new_eip |= (e2 & 0xffff0000);
2794 if (load_segment(&e1, &e2, gate_cs) != 0)
2795 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2796 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2797 /* must be code segment */
2798 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2799 (DESC_S_MASK | DESC_CS_MASK)))
2800 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2801 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2802 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2803 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2804 if (!(e2 & DESC_P_MASK))
2805#ifdef VBOX /* See page 3-514 of 253666.pdf */
2806 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2807#else
2808 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2809#endif
2810 limit = get_seg_limit(e1, e2);
2811 if (new_eip > limit)
2812 raise_exception_err(EXCP0D_GPF, 0);
2813 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2814 get_seg_base(e1, e2), limit, e2);
2815 EIP = new_eip;
2816 break;
2817 default:
2818 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2819 break;
2820 }
2821 }
2822}
2823
2824/* real mode call */
2825void helper_lcall_real(int new_cs, target_ulong new_eip1,
2826 int shift, int next_eip)
2827{
2828 int new_eip;
2829 uint32_t esp, esp_mask;
2830 target_ulong ssp;
2831
2832 new_eip = new_eip1;
2833 esp = ESP;
2834 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2835 ssp = env->segs[R_SS].base;
2836 if (shift) {
2837 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2838 PUSHL(ssp, esp, esp_mask, next_eip);
2839 } else {
2840 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2841 PUSHW(ssp, esp, esp_mask, next_eip);
2842 }
2843
2844 SET_ESP(esp, esp_mask);
2845 env->eip = new_eip;
2846 env->segs[R_CS].selector = new_cs;
2847 env->segs[R_CS].base = (new_cs << 4);
2848}
2849
2850/* protected mode call */
2851void helper_lcall_protected(int new_cs, target_ulong new_eip,
2852 int shift, int next_eip_addend)
2853{
2854 int new_stack, i;
2855 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2856 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2857 uint32_t val, limit, old_sp_mask;
2858 target_ulong ssp, old_ssp, next_eip;
2859
2860#ifdef VBOX
2861 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2862#endif
2863 next_eip = env->eip + next_eip_addend;
2864#ifdef DEBUG_PCALL
2865 if (loglevel & CPU_LOG_PCALL) {
2866 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2867 new_cs, (uint32_t)new_eip, shift);
2868 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2869 }
2870#endif
2871 if ((new_cs & 0xfffc) == 0)
2872 raise_exception_err(EXCP0D_GPF, 0);
2873 if (load_segment(&e1, &e2, new_cs) != 0)
2874 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2875 cpl = env->hflags & HF_CPL_MASK;
2876#ifdef DEBUG_PCALL
2877 if (loglevel & CPU_LOG_PCALL) {
2878 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2879 }
2880#endif
2881 if (e2 & DESC_S_MASK) {
2882 if (!(e2 & DESC_CS_MASK))
2883 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2884 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2885 if (e2 & DESC_C_MASK) {
2886 /* conforming code segment */
2887 if (dpl > cpl)
2888 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2889 } else {
2890 /* non conforming code segment */
2891 rpl = new_cs & 3;
2892 if (rpl > cpl)
2893 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2894 if (dpl != cpl)
2895 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2896 }
2897 if (!(e2 & DESC_P_MASK))
2898 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2899
2900#ifdef TARGET_X86_64
2901 /* XXX: check 16/32 bit cases in long mode */
2902 if (shift == 2) {
2903 target_ulong rsp;
2904 /* 64 bit case */
2905 rsp = ESP;
2906 PUSHQ(rsp, env->segs[R_CS].selector);
2907 PUSHQ(rsp, next_eip);
2908 /* from this point, not restartable */
2909 ESP = rsp;
2910 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2911 get_seg_base(e1, e2),
2912 get_seg_limit(e1, e2), e2);
2913 EIP = new_eip;
2914 } else
2915#endif
2916 {
2917 sp = ESP;
2918 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2919 ssp = env->segs[R_SS].base;
2920 if (shift) {
2921 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2922 PUSHL(ssp, sp, sp_mask, next_eip);
2923 } else {
2924 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2925 PUSHW(ssp, sp, sp_mask, next_eip);
2926 }
2927
2928 limit = get_seg_limit(e1, e2);
2929 if (new_eip > limit)
2930 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2931 /* from this point, not restartable */
2932 SET_ESP(sp, sp_mask);
2933 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2934 get_seg_base(e1, e2), limit, e2);
2935 EIP = new_eip;
2936 }
2937 } else {
2938 /* check gate type */
2939 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2940 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2941 rpl = new_cs & 3;
2942 switch(type) {
2943 case 1: /* available 286 TSS */
2944 case 9: /* available 386 TSS */
2945 case 5: /* task gate */
2946 if (dpl < cpl || dpl < rpl)
2947 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2948 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2949 CC_OP = CC_OP_EFLAGS;
2950 return;
2951 case 4: /* 286 call gate */
2952 case 12: /* 386 call gate */
2953 break;
2954 default:
2955 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2956 break;
2957 }
2958 shift = type >> 3;
2959
2960 if (dpl < cpl || dpl < rpl)
2961 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2962 /* check valid bit */
2963 if (!(e2 & DESC_P_MASK))
2964 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2965 selector = e1 >> 16;
2966 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2967 param_count = e2 & 0x1f;
2968 if ((selector & 0xfffc) == 0)
2969 raise_exception_err(EXCP0D_GPF, 0);
2970
2971 if (load_segment(&e1, &e2, selector) != 0)
2972 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2973 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2974 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2975 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2976 if (dpl > cpl)
2977 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2978 if (!(e2 & DESC_P_MASK))
2979 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2980
2981 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2982 /* to inner privilege */
2983 get_ss_esp_from_tss(&ss, &sp, dpl);
2984#ifdef DEBUG_PCALL
2985 if (loglevel & CPU_LOG_PCALL)
2986 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2987 ss, sp, param_count, ESP);
2988#endif
2989 if ((ss & 0xfffc) == 0)
2990 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2991 if ((ss & 3) != dpl)
2992 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2993 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2994 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2995 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2996 if (ss_dpl != dpl)
2997 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2998 if (!(ss_e2 & DESC_S_MASK) ||
2999 (ss_e2 & DESC_CS_MASK) ||
3000 !(ss_e2 & DESC_W_MASK))
3001 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3002 if (!(ss_e2 & DESC_P_MASK))
3003#ifdef VBOX /* See page 3-99 of 253666.pdf */
3004 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3005#else
3006 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3007#endif
3008
3009 // push_size = ((param_count * 2) + 8) << shift;
3010
3011 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3012 old_ssp = env->segs[R_SS].base;
3013
3014 sp_mask = get_sp_mask(ss_e2);
3015 ssp = get_seg_base(ss_e1, ss_e2);
3016 if (shift) {
3017 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3018 PUSHL(ssp, sp, sp_mask, ESP);
3019 for(i = param_count - 1; i >= 0; i--) {
3020 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3021 PUSHL(ssp, sp, sp_mask, val);
3022 }
3023 } else {
3024 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3025 PUSHW(ssp, sp, sp_mask, ESP);
3026 for(i = param_count - 1; i >= 0; i--) {
3027 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3028 PUSHW(ssp, sp, sp_mask, val);
3029 }
3030 }
3031 new_stack = 1;
3032 } else {
3033 /* to same privilege */
3034 sp = ESP;
3035 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3036 ssp = env->segs[R_SS].base;
3037 // push_size = (4 << shift);
3038 new_stack = 0;
3039 }
3040
3041 if (shift) {
3042 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3043 PUSHL(ssp, sp, sp_mask, next_eip);
3044 } else {
3045 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3046 PUSHW(ssp, sp, sp_mask, next_eip);
3047 }
3048
3049 /* from this point, not restartable */
3050
3051 if (new_stack) {
3052 ss = (ss & ~3) | dpl;
3053 cpu_x86_load_seg_cache(env, R_SS, ss,
3054 ssp,
3055 get_seg_limit(ss_e1, ss_e2),
3056 ss_e2);
3057 }
3058
3059 selector = (selector & ~3) | dpl;
3060 cpu_x86_load_seg_cache(env, R_CS, selector,
3061 get_seg_base(e1, e2),
3062 get_seg_limit(e1, e2),
3063 e2);
3064 cpu_x86_set_cpl(env, dpl);
3065 SET_ESP(sp, sp_mask);
3066 EIP = offset;
3067 }
3068#ifdef USE_KQEMU
3069 if (kqemu_is_ok(env)) {
3070 env->exception_index = -1;
3071 cpu_loop_exit();
3072 }
3073#endif
3074}
3075
3076/* real and vm86 mode iret */
3077void helper_iret_real(int shift)
3078{
3079 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3080 target_ulong ssp;
3081 int eflags_mask;
3082#ifdef VBOX
3083 bool fVME = false;
3084
3085 remR3TrapClear(env->pVM);
3086#endif /* VBOX */
3087
3088 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3089 sp = ESP;
3090 ssp = env->segs[R_SS].base;
3091 if (shift == 1) {
3092 /* 32 bits */
3093 POPL(ssp, sp, sp_mask, new_eip);
3094 POPL(ssp, sp, sp_mask, new_cs);
3095 new_cs &= 0xffff;
3096 POPL(ssp, sp, sp_mask, new_eflags);
3097 } else {
3098 /* 16 bits */
3099 POPW(ssp, sp, sp_mask, new_eip);
3100 POPW(ssp, sp, sp_mask, new_cs);
3101 POPW(ssp, sp, sp_mask, new_eflags);
3102 }
3103#ifdef VBOX
3104 if ( (env->eflags & VM_MASK)
3105 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3106 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3107 {
3108 fVME = true;
3109 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3110 /* if TF will be set -> #GP */
3111 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3112 || (new_eflags & TF_MASK))
3113 raise_exception(EXCP0D_GPF);
3114 }
3115#endif /* VBOX */
3116 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3117 env->segs[R_CS].selector = new_cs;
3118 env->segs[R_CS].base = (new_cs << 4);
3119 env->eip = new_eip;
3120#ifdef VBOX
3121 if (fVME)
3122 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3123 else
3124#endif
3125 if (env->eflags & VM_MASK)
3126 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3127 else
3128 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3129 if (shift == 0)
3130 eflags_mask &= 0xffff;
3131 load_eflags(new_eflags, eflags_mask);
3132 env->hflags2 &= ~HF2_NMI_MASK;
3133#ifdef VBOX
3134 if (fVME)
3135 {
3136 if (new_eflags & IF_MASK)
3137 env->eflags |= VIF_MASK;
3138 else
3139 env->eflags &= ~VIF_MASK;
3140 }
3141#endif /* VBOX */
3142}
3143
3144#ifndef VBOX
3145static inline void validate_seg(int seg_reg, int cpl)
3146#else /* VBOX */
3147DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3148#endif /* VBOX */
3149{
3150 int dpl;
3151 uint32_t e2;
3152
3153 /* XXX: on x86_64, we do not want to nullify FS and GS because
3154 they may still contain a valid base. I would be interested to
3155 know how a real x86_64 CPU behaves */
3156 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3157 (env->segs[seg_reg].selector & 0xfffc) == 0)
3158 return;
3159
3160 e2 = env->segs[seg_reg].flags;
3161 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3162 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3163 /* data or non conforming code segment */
3164 if (dpl < cpl) {
3165 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3166 }
3167 }
3168}
3169
3170/* protected mode iret */
3171#ifndef VBOX
3172static inline void helper_ret_protected(int shift, int is_iret, int addend)
3173#else /* VBOX */
3174DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3175#endif /* VBOX */
3176{
3177 uint32_t new_cs, new_eflags, new_ss;
3178 uint32_t new_es, new_ds, new_fs, new_gs;
3179 uint32_t e1, e2, ss_e1, ss_e2;
3180 int cpl, dpl, rpl, eflags_mask, iopl;
3181 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3182
3183#ifdef VBOX
3184 ss_e1 = ss_e2 = e1 = e2 = 0;
3185#endif
3186
3187#ifdef TARGET_X86_64
3188 if (shift == 2)
3189 sp_mask = -1;
3190 else
3191#endif
3192 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3193 sp = ESP;
3194 ssp = env->segs[R_SS].base;
3195 new_eflags = 0; /* avoid warning */
3196#ifdef TARGET_X86_64
3197 if (shift == 2) {
3198 POPQ(sp, new_eip);
3199 POPQ(sp, new_cs);
3200 new_cs &= 0xffff;
3201 if (is_iret) {
3202 POPQ(sp, new_eflags);
3203 }
3204 } else
3205#endif
3206 if (shift == 1) {
3207 /* 32 bits */
3208 POPL(ssp, sp, sp_mask, new_eip);
3209 POPL(ssp, sp, sp_mask, new_cs);
3210 new_cs &= 0xffff;
3211 if (is_iret) {
3212 POPL(ssp, sp, sp_mask, new_eflags);
3213#if defined(VBOX) && defined(DEBUG)
3214 printf("iret: new CS %04X\n", new_cs);
3215 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3216 printf("iret: new EFLAGS %08X\n", new_eflags);
3217 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3218#endif
3219 if (new_eflags & VM_MASK)
3220 goto return_to_vm86;
3221 }
3222#ifdef VBOX
3223 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3224 {
3225#ifdef DEBUG
3226 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3227#endif
3228 new_cs = new_cs & 0xfffc;
3229 }
3230#endif
3231 } else {
3232 /* 16 bits */
3233 POPW(ssp, sp, sp_mask, new_eip);
3234 POPW(ssp, sp, sp_mask, new_cs);
3235 if (is_iret)
3236 POPW(ssp, sp, sp_mask, new_eflags);
3237 }
3238#ifdef DEBUG_PCALL
3239 if (loglevel & CPU_LOG_PCALL) {
3240 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3241 new_cs, new_eip, shift, addend);
3242 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3243 }
3244#endif
3245 if ((new_cs & 0xfffc) == 0)
3246 {
3247#if defined(VBOX) && defined(DEBUG)
3248 printf("new_cs & 0xfffc) == 0\n");
3249#endif
3250 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3251 }
3252 if (load_segment(&e1, &e2, new_cs) != 0)
3253 {
3254#if defined(VBOX) && defined(DEBUG)
3255 printf("load_segment failed\n");
3256#endif
3257 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3258 }
3259 if (!(e2 & DESC_S_MASK) ||
3260 !(e2 & DESC_CS_MASK))
3261 {
3262#if defined(VBOX) && defined(DEBUG)
3263 printf("e2 mask %08x\n", e2);
3264#endif
3265 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3266 }
3267 cpl = env->hflags & HF_CPL_MASK;
3268 rpl = new_cs & 3;
3269 if (rpl < cpl)
3270 {
3271#if defined(VBOX) && defined(DEBUG)
3272 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3273#endif
3274 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3275 }
3276 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3277 if (e2 & DESC_C_MASK) {
3278 if (dpl > rpl)
3279 {
3280#if defined(VBOX) && defined(DEBUG)
3281 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3282#endif
3283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3284 }
3285 } else {
3286 if (dpl != rpl)
3287 {
3288#if defined(VBOX) && defined(DEBUG)
3289 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3290#endif
3291 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3292 }
3293 }
3294 if (!(e2 & DESC_P_MASK))
3295 {
3296#if defined(VBOX) && defined(DEBUG)
3297 printf("DESC_P_MASK e2=%08x\n", e2);
3298#endif
3299 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3300 }
3301
3302 sp += addend;
3303 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3304 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3305 /* return to same privilege level */
3306 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3307 get_seg_base(e1, e2),
3308 get_seg_limit(e1, e2),
3309 e2);
3310 } else {
3311 /* return to different privilege level */
3312#ifdef TARGET_X86_64
3313 if (shift == 2) {
3314 POPQ(sp, new_esp);
3315 POPQ(sp, new_ss);
3316 new_ss &= 0xffff;
3317 } else
3318#endif
3319 if (shift == 1) {
3320 /* 32 bits */
3321 POPL(ssp, sp, sp_mask, new_esp);
3322 POPL(ssp, sp, sp_mask, new_ss);
3323 new_ss &= 0xffff;
3324 } else {
3325 /* 16 bits */
3326 POPW(ssp, sp, sp_mask, new_esp);
3327 POPW(ssp, sp, sp_mask, new_ss);
3328 }
3329#ifdef DEBUG_PCALL
3330 if (loglevel & CPU_LOG_PCALL) {
3331 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3332 new_ss, new_esp);
3333 }
3334#endif
3335 if ((new_ss & 0xfffc) == 0) {
3336#ifdef TARGET_X86_64
3337 /* NULL ss is allowed in long mode if cpl != 3*/
3338 /* XXX: test CS64 ? */
3339 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3340 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3341 0, 0xffffffff,
3342 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3343 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3344 DESC_W_MASK | DESC_A_MASK);
3345 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3346 } else
3347#endif
3348 {
3349 raise_exception_err(EXCP0D_GPF, 0);
3350 }
3351 } else {
3352 if ((new_ss & 3) != rpl)
3353 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3354 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3355 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3356 if (!(ss_e2 & DESC_S_MASK) ||
3357 (ss_e2 & DESC_CS_MASK) ||
3358 !(ss_e2 & DESC_W_MASK))
3359 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3360 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3361 if (dpl != rpl)
3362 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3363 if (!(ss_e2 & DESC_P_MASK))
3364 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3365 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3366 get_seg_base(ss_e1, ss_e2),
3367 get_seg_limit(ss_e1, ss_e2),
3368 ss_e2);
3369 }
3370
3371 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3372 get_seg_base(e1, e2),
3373 get_seg_limit(e1, e2),
3374 e2);
3375 cpu_x86_set_cpl(env, rpl);
3376 sp = new_esp;
3377#ifdef TARGET_X86_64
3378 if (env->hflags & HF_CS64_MASK)
3379 sp_mask = -1;
3380 else
3381#endif
3382 sp_mask = get_sp_mask(ss_e2);
3383
3384 /* validate data segments */
3385 validate_seg(R_ES, rpl);
3386 validate_seg(R_DS, rpl);
3387 validate_seg(R_FS, rpl);
3388 validate_seg(R_GS, rpl);
3389
3390 sp += addend;
3391 }
3392 SET_ESP(sp, sp_mask);
3393 env->eip = new_eip;
3394 if (is_iret) {
3395 /* NOTE: 'cpl' is the _old_ CPL */
3396 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3397 if (cpl == 0)
3398#ifdef VBOX
3399 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3400#else
3401 eflags_mask |= IOPL_MASK;
3402#endif
3403 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3404 if (cpl <= iopl)
3405 eflags_mask |= IF_MASK;
3406 if (shift == 0)
3407 eflags_mask &= 0xffff;
3408 load_eflags(new_eflags, eflags_mask);
3409 }
3410 return;
3411
3412 return_to_vm86:
3413 POPL(ssp, sp, sp_mask, new_esp);
3414 POPL(ssp, sp, sp_mask, new_ss);
3415 POPL(ssp, sp, sp_mask, new_es);
3416 POPL(ssp, sp, sp_mask, new_ds);
3417 POPL(ssp, sp, sp_mask, new_fs);
3418 POPL(ssp, sp, sp_mask, new_gs);
3419
3420 /* modify processor state */
3421 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3422 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3423 load_seg_vm(R_CS, new_cs & 0xffff);
3424 cpu_x86_set_cpl(env, 3);
3425 load_seg_vm(R_SS, new_ss & 0xffff);
3426 load_seg_vm(R_ES, new_es & 0xffff);
3427 load_seg_vm(R_DS, new_ds & 0xffff);
3428 load_seg_vm(R_FS, new_fs & 0xffff);
3429 load_seg_vm(R_GS, new_gs & 0xffff);
3430
3431 env->eip = new_eip & 0xffff;
3432 ESP = new_esp;
3433}
3434
3435void helper_iret_protected(int shift, int next_eip)
3436{
3437 int tss_selector, type;
3438 uint32_t e1, e2;
3439
3440#ifdef VBOX
3441 e1 = e2 = 0;
3442 remR3TrapClear(env->pVM);
3443#endif
3444
3445 /* specific case for TSS */
3446 if (env->eflags & NT_MASK) {
3447#ifdef TARGET_X86_64
3448 if (env->hflags & HF_LMA_MASK)
3449 raise_exception_err(EXCP0D_GPF, 0);
3450#endif
3451 tss_selector = lduw_kernel(env->tr.base + 0);
3452 if (tss_selector & 4)
3453 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3454 if (load_segment(&e1, &e2, tss_selector) != 0)
3455 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3456 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3457 /* NOTE: we check both segment and busy TSS */
3458 if (type != 3)
3459 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3460 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3461 } else {
3462 helper_ret_protected(shift, 1, 0);
3463 }
3464 env->hflags2 &= ~HF2_NMI_MASK;
3465#ifdef USE_KQEMU
3466 if (kqemu_is_ok(env)) {
3467 CC_OP = CC_OP_EFLAGS;
3468 env->exception_index = -1;
3469 cpu_loop_exit();
3470 }
3471#endif
3472}
3473
3474void helper_lret_protected(int shift, int addend)
3475{
3476 helper_ret_protected(shift, 0, addend);
3477#ifdef USE_KQEMU
3478 if (kqemu_is_ok(env)) {
3479 env->exception_index = -1;
3480 cpu_loop_exit();
3481 }
3482#endif
3483}
3484
3485void helper_sysenter(void)
3486{
3487 if (env->sysenter_cs == 0) {
3488 raise_exception_err(EXCP0D_GPF, 0);
3489 }
3490 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3491 cpu_x86_set_cpl(env, 0);
3492
3493#ifdef TARGET_X86_64
3494 if (env->hflags & HF_LMA_MASK) {
3495 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3496 0, 0xffffffff,
3497 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3498 DESC_S_MASK |
3499 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3500 } else
3501#endif
3502 {
3503 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3504 0, 0xffffffff,
3505 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3506 DESC_S_MASK |
3507 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3508 }
3509 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3510 0, 0xffffffff,
3511 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3512 DESC_S_MASK |
3513 DESC_W_MASK | DESC_A_MASK);
3514 ESP = env->sysenter_esp;
3515 EIP = env->sysenter_eip;
3516}
3517
3518void helper_sysexit(int dflag)
3519{
3520 int cpl;
3521
3522 cpl = env->hflags & HF_CPL_MASK;
3523 if (env->sysenter_cs == 0 || cpl != 0) {
3524 raise_exception_err(EXCP0D_GPF, 0);
3525 }
3526 cpu_x86_set_cpl(env, 3);
3527#ifdef TARGET_X86_64
3528 if (dflag == 2) {
3529 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3530 0, 0xffffffff,
3531 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3532 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3533 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3534 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3535 0, 0xffffffff,
3536 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3537 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3538 DESC_W_MASK | DESC_A_MASK);
3539 } else
3540#endif
3541 {
3542 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3543 0, 0xffffffff,
3544 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3545 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3546 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3547 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3548 0, 0xffffffff,
3549 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3550 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3551 DESC_W_MASK | DESC_A_MASK);
3552 }
3553 ESP = ECX;
3554 EIP = EDX;
3555#ifdef USE_KQEMU
3556 if (kqemu_is_ok(env)) {
3557 env->exception_index = -1;
3558 cpu_loop_exit();
3559 }
3560#endif
3561}
3562
3563#if defined(CONFIG_USER_ONLY)
3564target_ulong helper_read_crN(int reg)
3565{
3566 return 0;
3567}
3568
3569void helper_write_crN(int reg, target_ulong t0)
3570{
3571}
3572#else
3573target_ulong helper_read_crN(int reg)
3574{
3575 target_ulong val;
3576
3577 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3578 switch(reg) {
3579 default:
3580 val = env->cr[reg];
3581 break;
3582 case 8:
3583 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3584 val = cpu_get_apic_tpr(env);
3585 } else {
3586 val = env->v_tpr;
3587 }
3588 break;
3589 }
3590 return val;
3591}
3592
3593void helper_write_crN(int reg, target_ulong t0)
3594{
3595 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3596 switch(reg) {
3597 case 0:
3598 cpu_x86_update_cr0(env, t0);
3599 break;
3600 case 3:
3601 cpu_x86_update_cr3(env, t0);
3602 break;
3603 case 4:
3604 cpu_x86_update_cr4(env, t0);
3605 break;
3606 case 8:
3607 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3608 cpu_set_apic_tpr(env, t0);
3609 }
3610 env->v_tpr = t0 & 0x0f;
3611 break;
3612 default:
3613 env->cr[reg] = t0;
3614 break;
3615 }
3616}
3617#endif
3618
3619void helper_lmsw(target_ulong t0)
3620{
3621 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3622 if already set to one. */
3623 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3624 helper_write_crN(0, t0);
3625}
3626
3627void helper_clts(void)
3628{
3629 env->cr[0] &= ~CR0_TS_MASK;
3630 env->hflags &= ~HF_TS_MASK;
3631}
3632
3633/* XXX: do more */
3634void helper_movl_drN_T0(int reg, target_ulong t0)
3635{
3636 env->dr[reg] = t0;
3637}
3638
3639void helper_invlpg(target_ulong addr)
3640{
3641 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3642 tlb_flush_page(env, addr);
3643}
3644
3645void helper_rdtsc(void)
3646{
3647 uint64_t val;
3648
3649 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3650 raise_exception(EXCP0D_GPF);
3651 }
3652 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3653
3654 val = cpu_get_tsc(env) + env->tsc_offset;
3655 EAX = (uint32_t)(val);
3656 EDX = (uint32_t)(val >> 32);
3657}
3658
3659#ifdef VBOX
3660void helper_rdtscp(void)
3661{
3662 uint64_t val;
3663 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3664 raise_exception(EXCP0D_GPF);
3665 }
3666
3667 val = cpu_get_tsc(env);
3668 EAX = (uint32_t)(val);
3669 EDX = (uint32_t)(val >> 32);
3670 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3671}
3672#endif
3673
3674void helper_rdpmc(void)
3675{
3676 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3677 raise_exception(EXCP0D_GPF);
3678 }
3679 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3680
3681 /* currently unimplemented */
3682 raise_exception_err(EXCP06_ILLOP, 0);
3683}
3684
3685#if defined(CONFIG_USER_ONLY)
3686void helper_wrmsr(void)
3687{
3688}
3689
3690void helper_rdmsr(void)
3691{
3692}
3693#else
3694void helper_wrmsr(void)
3695{
3696 uint64_t val;
3697
3698 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3699
3700 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3701
3702 switch((uint32_t)ECX) {
3703 case MSR_IA32_SYSENTER_CS:
3704 env->sysenter_cs = val & 0xffff;
3705 break;
3706 case MSR_IA32_SYSENTER_ESP:
3707 env->sysenter_esp = val;
3708 break;
3709 case MSR_IA32_SYSENTER_EIP:
3710 env->sysenter_eip = val;
3711 break;
3712 case MSR_IA32_APICBASE:
3713 cpu_set_apic_base(env, val);
3714 break;
3715 case MSR_EFER:
3716 {
3717 uint64_t update_mask;
3718 update_mask = 0;
3719 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3720 update_mask |= MSR_EFER_SCE;
3721 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3722 update_mask |= MSR_EFER_LME;
3723 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3724 update_mask |= MSR_EFER_FFXSR;
3725 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3726 update_mask |= MSR_EFER_NXE;
3727 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3728 update_mask |= MSR_EFER_SVME;
3729 cpu_load_efer(env, (env->efer & ~update_mask) |
3730 (val & update_mask));
3731 }
3732 break;
3733 case MSR_STAR:
3734 env->star = val;
3735 break;
3736 case MSR_PAT:
3737 env->pat = val;
3738 break;
3739 case MSR_VM_HSAVE_PA:
3740 env->vm_hsave = val;
3741 break;
3742#ifdef TARGET_X86_64
3743 case MSR_LSTAR:
3744 env->lstar = val;
3745 break;
3746 case MSR_CSTAR:
3747 env->cstar = val;
3748 break;
3749 case MSR_FMASK:
3750 env->fmask = val;
3751 break;
3752 case MSR_FSBASE:
3753 env->segs[R_FS].base = val;
3754 break;
3755 case MSR_GSBASE:
3756 env->segs[R_GS].base = val;
3757 break;
3758 case MSR_KERNELGSBASE:
3759 env->kernelgsbase = val;
3760 break;
3761#endif
3762 default:
3763#ifndef VBOX
3764 /* XXX: exception ? */
3765 break;
3766#else /* VBOX */
3767 {
3768 uint32_t ecx = (uint32_t)ECX;
3769 /* In X2APIC specification this range is reserved for APIC control. */
3770 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3771 cpu_apic_wrmsr(env, ecx, val);
3772 /** @todo else exception? */
3773 break;
3774 }
3775 case MSR_K8_TSC_AUX:
3776 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3777 break;
3778#endif /* VBOX */
3779 }
3780}
3781
3782void helper_rdmsr(void)
3783{
3784 uint64_t val;
3785
3786 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3787
3788 switch((uint32_t)ECX) {
3789 case MSR_IA32_SYSENTER_CS:
3790 val = env->sysenter_cs;
3791 break;
3792 case MSR_IA32_SYSENTER_ESP:
3793 val = env->sysenter_esp;
3794 break;
3795 case MSR_IA32_SYSENTER_EIP:
3796 val = env->sysenter_eip;
3797 break;
3798 case MSR_IA32_APICBASE:
3799 val = cpu_get_apic_base(env);
3800 break;
3801 case MSR_EFER:
3802 val = env->efer;
3803 break;
3804 case MSR_STAR:
3805 val = env->star;
3806 break;
3807 case MSR_PAT:
3808 val = env->pat;
3809 break;
3810 case MSR_VM_HSAVE_PA:
3811 val = env->vm_hsave;
3812 break;
3813 case MSR_IA32_PERF_STATUS:
3814 /* tsc_increment_by_tick */
3815 val = 1000ULL;
3816 /* CPU multiplier */
3817 val |= (((uint64_t)4ULL) << 40);
3818 break;
3819#ifdef TARGET_X86_64
3820 case MSR_LSTAR:
3821 val = env->lstar;
3822 break;
3823 case MSR_CSTAR:
3824 val = env->cstar;
3825 break;
3826 case MSR_FMASK:
3827 val = env->fmask;
3828 break;
3829 case MSR_FSBASE:
3830 val = env->segs[R_FS].base;
3831 break;
3832 case MSR_GSBASE:
3833 val = env->segs[R_GS].base;
3834 break;
3835 case MSR_KERNELGSBASE:
3836 val = env->kernelgsbase;
3837 break;
3838#endif
3839#ifdef USE_KQEMU
3840 case MSR_QPI_COMMBASE:
3841 if (env->kqemu_enabled) {
3842 val = kqemu_comm_base;
3843 } else {
3844 val = 0;
3845 }
3846 break;
3847#endif
3848 default:
3849#ifndef VBOX
3850 /* XXX: exception ? */
3851 val = 0;
3852 break;
3853#else /* VBOX */
3854 {
3855 uint32_t ecx = (uint32_t)ECX;
3856 /* In X2APIC specification this range is reserved for APIC control. */
3857 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3858 val = cpu_apic_rdmsr(env, ecx);
3859 else
3860 val = 0; /** @todo else exception? */
3861 break;
3862 }
3863 case MSR_K8_TSC_AUX:
3864 val = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3865 break;
3866#endif /* VBOX */
3867 }
3868 EAX = (uint32_t)(val);
3869 EDX = (uint32_t)(val >> 32);
3870}
3871#endif
3872
3873target_ulong helper_lsl(target_ulong selector1)
3874{
3875 unsigned int limit;
3876 uint32_t e1, e2, eflags, selector;
3877 int rpl, dpl, cpl, type;
3878
3879 selector = selector1 & 0xffff;
3880 eflags = cc_table[CC_OP].compute_all();
3881 if (load_segment(&e1, &e2, selector) != 0)
3882 goto fail;
3883 rpl = selector & 3;
3884 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3885 cpl = env->hflags & HF_CPL_MASK;
3886 if (e2 & DESC_S_MASK) {
3887 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3888 /* conforming */
3889 } else {
3890 if (dpl < cpl || dpl < rpl)
3891 goto fail;
3892 }
3893 } else {
3894 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3895 switch(type) {
3896 case 1:
3897 case 2:
3898 case 3:
3899 case 9:
3900 case 11:
3901 break;
3902 default:
3903 goto fail;
3904 }
3905 if (dpl < cpl || dpl < rpl) {
3906 fail:
3907 CC_SRC = eflags & ~CC_Z;
3908 return 0;
3909 }
3910 }
3911 limit = get_seg_limit(e1, e2);
3912 CC_SRC = eflags | CC_Z;
3913 return limit;
3914}
3915
3916target_ulong helper_lar(target_ulong selector1)
3917{
3918 uint32_t e1, e2, eflags, selector;
3919 int rpl, dpl, cpl, type;
3920
3921 selector = selector1 & 0xffff;
3922 eflags = cc_table[CC_OP].compute_all();
3923 if ((selector & 0xfffc) == 0)
3924 goto fail;
3925 if (load_segment(&e1, &e2, selector) != 0)
3926 goto fail;
3927 rpl = selector & 3;
3928 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3929 cpl = env->hflags & HF_CPL_MASK;
3930 if (e2 & DESC_S_MASK) {
3931 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3932 /* conforming */
3933 } else {
3934 if (dpl < cpl || dpl < rpl)
3935 goto fail;
3936 }
3937 } else {
3938 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3939 switch(type) {
3940 case 1:
3941 case 2:
3942 case 3:
3943 case 4:
3944 case 5:
3945 case 9:
3946 case 11:
3947 case 12:
3948 break;
3949 default:
3950 goto fail;
3951 }
3952 if (dpl < cpl || dpl < rpl) {
3953 fail:
3954 CC_SRC = eflags & ~CC_Z;
3955 return 0;
3956 }
3957 }
3958 CC_SRC = eflags | CC_Z;
3959 return e2 & 0x00f0ff00;
3960}
3961
3962void helper_verr(target_ulong selector1)
3963{
3964 uint32_t e1, e2, eflags, selector;
3965 int rpl, dpl, cpl;
3966
3967 selector = selector1 & 0xffff;
3968 eflags = cc_table[CC_OP].compute_all();
3969 if ((selector & 0xfffc) == 0)
3970 goto fail;
3971 if (load_segment(&e1, &e2, selector) != 0)
3972 goto fail;
3973 if (!(e2 & DESC_S_MASK))
3974 goto fail;
3975 rpl = selector & 3;
3976 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3977 cpl = env->hflags & HF_CPL_MASK;
3978 if (e2 & DESC_CS_MASK) {
3979 if (!(e2 & DESC_R_MASK))
3980 goto fail;
3981 if (!(e2 & DESC_C_MASK)) {
3982 if (dpl < cpl || dpl < rpl)
3983 goto fail;
3984 }
3985 } else {
3986 if (dpl < cpl || dpl < rpl) {
3987 fail:
3988 CC_SRC = eflags & ~CC_Z;
3989 return;
3990 }
3991 }
3992 CC_SRC = eflags | CC_Z;
3993}
3994
3995void helper_verw(target_ulong selector1)
3996{
3997 uint32_t e1, e2, eflags, selector;
3998 int rpl, dpl, cpl;
3999
4000 selector = selector1 & 0xffff;
4001 eflags = cc_table[CC_OP].compute_all();
4002 if ((selector & 0xfffc) == 0)
4003 goto fail;
4004 if (load_segment(&e1, &e2, selector) != 0)
4005 goto fail;
4006 if (!(e2 & DESC_S_MASK))
4007 goto fail;
4008 rpl = selector & 3;
4009 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4010 cpl = env->hflags & HF_CPL_MASK;
4011 if (e2 & DESC_CS_MASK) {
4012 goto fail;
4013 } else {
4014 if (dpl < cpl || dpl < rpl)
4015 goto fail;
4016 if (!(e2 & DESC_W_MASK)) {
4017 fail:
4018 CC_SRC = eflags & ~CC_Z;
4019 return;
4020 }
4021 }
4022 CC_SRC = eflags | CC_Z;
4023}
4024
4025/* x87 FPU helpers */
4026
4027static void fpu_set_exception(int mask)
4028{
4029 env->fpus |= mask;
4030 if (env->fpus & (~env->fpuc & FPUC_EM))
4031 env->fpus |= FPUS_SE | FPUS_B;
4032}
4033
4034#ifndef VBOX
4035static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4036#else /* VBOX */
4037DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4038#endif /* VBOX */
4039{
4040 if (b == 0.0)
4041 fpu_set_exception(FPUS_ZE);
4042 return a / b;
4043}
4044
4045void fpu_raise_exception(void)
4046{
4047 if (env->cr[0] & CR0_NE_MASK) {
4048 raise_exception(EXCP10_COPR);
4049 }
4050#if !defined(CONFIG_USER_ONLY)
4051 else {
4052 cpu_set_ferr(env);
4053 }
4054#endif
4055}
4056
4057void helper_flds_FT0(uint32_t val)
4058{
4059 union {
4060 float32 f;
4061 uint32_t i;
4062 } u;
4063 u.i = val;
4064 FT0 = float32_to_floatx(u.f, &env->fp_status);
4065}
4066
4067void helper_fldl_FT0(uint64_t val)
4068{
4069 union {
4070 float64 f;
4071 uint64_t i;
4072 } u;
4073 u.i = val;
4074 FT0 = float64_to_floatx(u.f, &env->fp_status);
4075}
4076
4077void helper_fildl_FT0(int32_t val)
4078{
4079 FT0 = int32_to_floatx(val, &env->fp_status);
4080}
4081
4082void helper_flds_ST0(uint32_t val)
4083{
4084 int new_fpstt;
4085 union {
4086 float32 f;
4087 uint32_t i;
4088 } u;
4089 new_fpstt = (env->fpstt - 1) & 7;
4090 u.i = val;
4091 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4092 env->fpstt = new_fpstt;
4093 env->fptags[new_fpstt] = 0; /* validate stack entry */
4094}
4095
4096void helper_fldl_ST0(uint64_t val)
4097{
4098 int new_fpstt;
4099 union {
4100 float64 f;
4101 uint64_t i;
4102 } u;
4103 new_fpstt = (env->fpstt - 1) & 7;
4104 u.i = val;
4105 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4106 env->fpstt = new_fpstt;
4107 env->fptags[new_fpstt] = 0; /* validate stack entry */
4108}
4109
4110void helper_fildl_ST0(int32_t val)
4111{
4112 int new_fpstt;
4113 new_fpstt = (env->fpstt - 1) & 7;
4114 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4115 env->fpstt = new_fpstt;
4116 env->fptags[new_fpstt] = 0; /* validate stack entry */
4117}
4118
4119void helper_fildll_ST0(int64_t val)
4120{
4121 int new_fpstt;
4122 new_fpstt = (env->fpstt - 1) & 7;
4123 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4124 env->fpstt = new_fpstt;
4125 env->fptags[new_fpstt] = 0; /* validate stack entry */
4126}
4127
4128#ifndef VBOX
4129uint32_t helper_fsts_ST0(void)
4130#else
4131RTCCUINTREG helper_fsts_ST0(void)
4132#endif
4133{
4134 union {
4135 float32 f;
4136 uint32_t i;
4137 } u;
4138 u.f = floatx_to_float32(ST0, &env->fp_status);
4139 return u.i;
4140}
4141
4142uint64_t helper_fstl_ST0(void)
4143{
4144 union {
4145 float64 f;
4146 uint64_t i;
4147 } u;
4148 u.f = floatx_to_float64(ST0, &env->fp_status);
4149 return u.i;
4150}
4151#ifndef VBOX
4152int32_t helper_fist_ST0(void)
4153#else
4154RTCCINTREG helper_fist_ST0(void)
4155#endif
4156{
4157 int32_t val;
4158 val = floatx_to_int32(ST0, &env->fp_status);
4159 if (val != (int16_t)val)
4160 val = -32768;
4161 return val;
4162}
4163
4164#ifndef VBOX
4165int32_t helper_fistl_ST0(void)
4166#else
4167RTCCINTREG helper_fistl_ST0(void)
4168#endif
4169{
4170 int32_t val;
4171 val = floatx_to_int32(ST0, &env->fp_status);
4172 return val;
4173}
4174
4175int64_t helper_fistll_ST0(void)
4176{
4177 int64_t val;
4178 val = floatx_to_int64(ST0, &env->fp_status);
4179 return val;
4180}
4181
4182#ifndef VBOX
4183int32_t helper_fistt_ST0(void)
4184#else
4185RTCCINTREG helper_fistt_ST0(void)
4186#endif
4187{
4188 int32_t val;
4189 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4190 if (val != (int16_t)val)
4191 val = -32768;
4192 return val;
4193}
4194
4195#ifndef VBOX
4196int32_t helper_fisttl_ST0(void)
4197#else
4198RTCCINTREG helper_fisttl_ST0(void)
4199#endif
4200{
4201 int32_t val;
4202 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4203 return val;
4204}
4205
4206int64_t helper_fisttll_ST0(void)
4207{
4208 int64_t val;
4209 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4210 return val;
4211}
4212
4213void helper_fldt_ST0(target_ulong ptr)
4214{
4215 int new_fpstt;
4216 new_fpstt = (env->fpstt - 1) & 7;
4217 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4218 env->fpstt = new_fpstt;
4219 env->fptags[new_fpstt] = 0; /* validate stack entry */
4220}
4221
4222void helper_fstt_ST0(target_ulong ptr)
4223{
4224 helper_fstt(ST0, ptr);
4225}
4226
4227void helper_fpush(void)
4228{
4229 fpush();
4230}
4231
4232void helper_fpop(void)
4233{
4234 fpop();
4235}
4236
4237void helper_fdecstp(void)
4238{
4239 env->fpstt = (env->fpstt - 1) & 7;
4240 env->fpus &= (~0x4700);
4241}
4242
4243void helper_fincstp(void)
4244{
4245 env->fpstt = (env->fpstt + 1) & 7;
4246 env->fpus &= (~0x4700);
4247}
4248
4249/* FPU move */
4250
4251void helper_ffree_STN(int st_index)
4252{
4253 env->fptags[(env->fpstt + st_index) & 7] = 1;
4254}
4255
4256void helper_fmov_ST0_FT0(void)
4257{
4258 ST0 = FT0;
4259}
4260
4261void helper_fmov_FT0_STN(int st_index)
4262{
4263 FT0 = ST(st_index);
4264}
4265
4266void helper_fmov_ST0_STN(int st_index)
4267{
4268 ST0 = ST(st_index);
4269}
4270
4271void helper_fmov_STN_ST0(int st_index)
4272{
4273 ST(st_index) = ST0;
4274}
4275
4276void helper_fxchg_ST0_STN(int st_index)
4277{
4278 CPU86_LDouble tmp;
4279 tmp = ST(st_index);
4280 ST(st_index) = ST0;
4281 ST0 = tmp;
4282}
4283
4284/* FPU operations */
4285
4286static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4287
4288void helper_fcom_ST0_FT0(void)
4289{
4290 int ret;
4291
4292 ret = floatx_compare(ST0, FT0, &env->fp_status);
4293 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4294 FORCE_RET();
4295}
4296
4297void helper_fucom_ST0_FT0(void)
4298{
4299 int ret;
4300
4301 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4302 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4303 FORCE_RET();
4304}
4305
4306static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4307
4308void helper_fcomi_ST0_FT0(void)
4309{
4310 int eflags;
4311 int ret;
4312
4313 ret = floatx_compare(ST0, FT0, &env->fp_status);
4314 eflags = cc_table[CC_OP].compute_all();
4315 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4316 CC_SRC = eflags;
4317 FORCE_RET();
4318}
4319
4320void helper_fucomi_ST0_FT0(void)
4321{
4322 int eflags;
4323 int ret;
4324
4325 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4326 eflags = cc_table[CC_OP].compute_all();
4327 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4328 CC_SRC = eflags;
4329 FORCE_RET();
4330}
4331
4332void helper_fadd_ST0_FT0(void)
4333{
4334 ST0 += FT0;
4335}
4336
4337void helper_fmul_ST0_FT0(void)
4338{
4339 ST0 *= FT0;
4340}
4341
4342void helper_fsub_ST0_FT0(void)
4343{
4344 ST0 -= FT0;
4345}
4346
4347void helper_fsubr_ST0_FT0(void)
4348{
4349 ST0 = FT0 - ST0;
4350}
4351
4352void helper_fdiv_ST0_FT0(void)
4353{
4354 ST0 = helper_fdiv(ST0, FT0);
4355}
4356
4357void helper_fdivr_ST0_FT0(void)
4358{
4359 ST0 = helper_fdiv(FT0, ST0);
4360}
4361
4362/* fp operations between STN and ST0 */
4363
4364void helper_fadd_STN_ST0(int st_index)
4365{
4366 ST(st_index) += ST0;
4367}
4368
4369void helper_fmul_STN_ST0(int st_index)
4370{
4371 ST(st_index) *= ST0;
4372}
4373
4374void helper_fsub_STN_ST0(int st_index)
4375{
4376 ST(st_index) -= ST0;
4377}
4378
4379void helper_fsubr_STN_ST0(int st_index)
4380{
4381 CPU86_LDouble *p;
4382 p = &ST(st_index);
4383 *p = ST0 - *p;
4384}
4385
4386void helper_fdiv_STN_ST0(int st_index)
4387{
4388 CPU86_LDouble *p;
4389 p = &ST(st_index);
4390 *p = helper_fdiv(*p, ST0);
4391}
4392
4393void helper_fdivr_STN_ST0(int st_index)
4394{
4395 CPU86_LDouble *p;
4396 p = &ST(st_index);
4397 *p = helper_fdiv(ST0, *p);
4398}
4399
4400/* misc FPU operations */
4401void helper_fchs_ST0(void)
4402{
4403 ST0 = floatx_chs(ST0);
4404}
4405
4406void helper_fabs_ST0(void)
4407{
4408 ST0 = floatx_abs(ST0);
4409}
4410
4411void helper_fld1_ST0(void)
4412{
4413 ST0 = f15rk[1];
4414}
4415
4416void helper_fldl2t_ST0(void)
4417{
4418 ST0 = f15rk[6];
4419}
4420
4421void helper_fldl2e_ST0(void)
4422{
4423 ST0 = f15rk[5];
4424}
4425
4426void helper_fldpi_ST0(void)
4427{
4428 ST0 = f15rk[2];
4429}
4430
4431void helper_fldlg2_ST0(void)
4432{
4433 ST0 = f15rk[3];
4434}
4435
4436void helper_fldln2_ST0(void)
4437{
4438 ST0 = f15rk[4];
4439}
4440
4441void helper_fldz_ST0(void)
4442{
4443 ST0 = f15rk[0];
4444}
4445
4446void helper_fldz_FT0(void)
4447{
4448 FT0 = f15rk[0];
4449}
4450
4451#ifndef VBOX
4452uint32_t helper_fnstsw(void)
4453#else
4454RTCCUINTREG helper_fnstsw(void)
4455#endif
4456{
4457 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4458}
4459
4460#ifndef VBOX
4461uint32_t helper_fnstcw(void)
4462#else
4463RTCCUINTREG helper_fnstcw(void)
4464#endif
4465{
4466 return env->fpuc;
4467}
4468
4469static void update_fp_status(void)
4470{
4471 int rnd_type;
4472
4473 /* set rounding mode */
4474 switch(env->fpuc & RC_MASK) {
4475 default:
4476 case RC_NEAR:
4477 rnd_type = float_round_nearest_even;
4478 break;
4479 case RC_DOWN:
4480 rnd_type = float_round_down;
4481 break;
4482 case RC_UP:
4483 rnd_type = float_round_up;
4484 break;
4485 case RC_CHOP:
4486 rnd_type = float_round_to_zero;
4487 break;
4488 }
4489 set_float_rounding_mode(rnd_type, &env->fp_status);
4490#ifdef FLOATX80
4491 switch((env->fpuc >> 8) & 3) {
4492 case 0:
4493 rnd_type = 32;
4494 break;
4495 case 2:
4496 rnd_type = 64;
4497 break;
4498 case 3:
4499 default:
4500 rnd_type = 80;
4501 break;
4502 }
4503 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4504#endif
4505}
4506
4507void helper_fldcw(uint32_t val)
4508{
4509 env->fpuc = val;
4510 update_fp_status();
4511}
4512
4513void helper_fclex(void)
4514{
4515 env->fpus &= 0x7f00;
4516}
4517
4518void helper_fwait(void)
4519{
4520 if (env->fpus & FPUS_SE)
4521 fpu_raise_exception();
4522 FORCE_RET();
4523}
4524
4525void helper_fninit(void)
4526{
4527 env->fpus = 0;
4528 env->fpstt = 0;
4529 env->fpuc = 0x37f;
4530 env->fptags[0] = 1;
4531 env->fptags[1] = 1;
4532 env->fptags[2] = 1;
4533 env->fptags[3] = 1;
4534 env->fptags[4] = 1;
4535 env->fptags[5] = 1;
4536 env->fptags[6] = 1;
4537 env->fptags[7] = 1;
4538}
4539
4540/* BCD ops */
4541
4542void helper_fbld_ST0(target_ulong ptr)
4543{
4544 CPU86_LDouble tmp;
4545 uint64_t val;
4546 unsigned int v;
4547 int i;
4548
4549 val = 0;
4550 for(i = 8; i >= 0; i--) {
4551 v = ldub(ptr + i);
4552 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4553 }
4554 tmp = val;
4555 if (ldub(ptr + 9) & 0x80)
4556 tmp = -tmp;
4557 fpush();
4558 ST0 = tmp;
4559}
4560
4561void helper_fbst_ST0(target_ulong ptr)
4562{
4563 int v;
4564 target_ulong mem_ref, mem_end;
4565 int64_t val;
4566
4567 val = floatx_to_int64(ST0, &env->fp_status);
4568 mem_ref = ptr;
4569 mem_end = mem_ref + 9;
4570 if (val < 0) {
4571 stb(mem_end, 0x80);
4572 val = -val;
4573 } else {
4574 stb(mem_end, 0x00);
4575 }
4576 while (mem_ref < mem_end) {
4577 if (val == 0)
4578 break;
4579 v = val % 100;
4580 val = val / 100;
4581 v = ((v / 10) << 4) | (v % 10);
4582 stb(mem_ref++, v);
4583 }
4584 while (mem_ref < mem_end) {
4585 stb(mem_ref++, 0);
4586 }
4587}
4588
4589void helper_f2xm1(void)
4590{
4591 ST0 = pow(2.0,ST0) - 1.0;
4592}
4593
4594void helper_fyl2x(void)
4595{
4596 CPU86_LDouble fptemp;
4597
4598 fptemp = ST0;
4599 if (fptemp>0.0){
4600 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4601 ST1 *= fptemp;
4602 fpop();
4603 } else {
4604 env->fpus &= (~0x4700);
4605 env->fpus |= 0x400;
4606 }
4607}
4608
4609void helper_fptan(void)
4610{
4611 CPU86_LDouble fptemp;
4612
4613 fptemp = ST0;
4614 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4615 env->fpus |= 0x400;
4616 } else {
4617 ST0 = tan(fptemp);
4618 fpush();
4619 ST0 = 1.0;
4620 env->fpus &= (~0x400); /* C2 <-- 0 */
4621 /* the above code is for |arg| < 2**52 only */
4622 }
4623}
4624
4625void helper_fpatan(void)
4626{
4627 CPU86_LDouble fptemp, fpsrcop;
4628
4629 fpsrcop = ST1;
4630 fptemp = ST0;
4631 ST1 = atan2(fpsrcop,fptemp);
4632 fpop();
4633}
4634
4635void helper_fxtract(void)
4636{
4637 CPU86_LDoubleU temp;
4638 unsigned int expdif;
4639
4640 temp.d = ST0;
4641 expdif = EXPD(temp) - EXPBIAS;
4642 /*DP exponent bias*/
4643 ST0 = expdif;
4644 fpush();
4645 BIASEXPONENT(temp);
4646 ST0 = temp.d;
4647}
4648
4649#ifdef VBOX
4650#ifdef _MSC_VER
4651/* MSC cannot divide by zero */
4652extern double _Nan;
4653#define NaN _Nan
4654#else
4655#define NaN (0.0 / 0.0)
4656#endif
4657#endif /* VBOX */
4658
4659void helper_fprem1(void)
4660{
4661 CPU86_LDouble dblq, fpsrcop, fptemp;
4662 CPU86_LDoubleU fpsrcop1, fptemp1;
4663 int expdif;
4664 signed long long int q;
4665
4666#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4667 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4668#else
4669 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4670#endif
4671 ST0 = 0.0 / 0.0; /* NaN */
4672 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4673 return;
4674 }
4675
4676 fpsrcop = ST0;
4677 fptemp = ST1;
4678 fpsrcop1.d = fpsrcop;
4679 fptemp1.d = fptemp;
4680 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4681
4682 if (expdif < 0) {
4683 /* optimisation? taken from the AMD docs */
4684 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4685 /* ST0 is unchanged */
4686 return;
4687 }
4688
4689 if (expdif < 53) {
4690 dblq = fpsrcop / fptemp;
4691 /* round dblq towards nearest integer */
4692 dblq = rint(dblq);
4693 ST0 = fpsrcop - fptemp * dblq;
4694
4695 /* convert dblq to q by truncating towards zero */
4696 if (dblq < 0.0)
4697 q = (signed long long int)(-dblq);
4698 else
4699 q = (signed long long int)dblq;
4700
4701 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4702 /* (C0,C3,C1) <-- (q2,q1,q0) */
4703 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4704 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4705 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4706 } else {
4707 env->fpus |= 0x400; /* C2 <-- 1 */
4708 fptemp = pow(2.0, expdif - 50);
4709 fpsrcop = (ST0 / ST1) / fptemp;
4710 /* fpsrcop = integer obtained by chopping */
4711 fpsrcop = (fpsrcop < 0.0) ?
4712 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4713 ST0 -= (ST1 * fpsrcop * fptemp);
4714 }
4715}
4716
4717void helper_fprem(void)
4718{
4719 CPU86_LDouble dblq, fpsrcop, fptemp;
4720 CPU86_LDoubleU fpsrcop1, fptemp1;
4721 int expdif;
4722 signed long long int q;
4723
4724#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4725 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4726#else
4727 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4728#endif
4729 ST0 = 0.0 / 0.0; /* NaN */
4730 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4731 return;
4732 }
4733
4734 fpsrcop = (CPU86_LDouble)ST0;
4735 fptemp = (CPU86_LDouble)ST1;
4736 fpsrcop1.d = fpsrcop;
4737 fptemp1.d = fptemp;
4738 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4739
4740 if (expdif < 0) {
4741 /* optimisation? taken from the AMD docs */
4742 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4743 /* ST0 is unchanged */
4744 return;
4745 }
4746
4747 if ( expdif < 53 ) {
4748 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4749 /* round dblq towards zero */
4750 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4751 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4752
4753 /* convert dblq to q by truncating towards zero */
4754 if (dblq < 0.0)
4755 q = (signed long long int)(-dblq);
4756 else
4757 q = (signed long long int)dblq;
4758
4759 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4760 /* (C0,C3,C1) <-- (q2,q1,q0) */
4761 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4762 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4763 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4764 } else {
4765 int N = 32 + (expdif % 32); /* as per AMD docs */
4766 env->fpus |= 0x400; /* C2 <-- 1 */
4767 fptemp = pow(2.0, (double)(expdif - N));
4768 fpsrcop = (ST0 / ST1) / fptemp;
4769 /* fpsrcop = integer obtained by chopping */
4770 fpsrcop = (fpsrcop < 0.0) ?
4771 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4772 ST0 -= (ST1 * fpsrcop * fptemp);
4773 }
4774}
4775
4776void helper_fyl2xp1(void)
4777{
4778 CPU86_LDouble fptemp;
4779
4780 fptemp = ST0;
4781 if ((fptemp+1.0)>0.0) {
4782 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4783 ST1 *= fptemp;
4784 fpop();
4785 } else {
4786 env->fpus &= (~0x4700);
4787 env->fpus |= 0x400;
4788 }
4789}
4790
4791void helper_fsqrt(void)
4792{
4793 CPU86_LDouble fptemp;
4794
4795 fptemp = ST0;
4796 if (fptemp<0.0) {
4797 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4798 env->fpus |= 0x400;
4799 }
4800 ST0 = sqrt(fptemp);
4801}
4802
4803void helper_fsincos(void)
4804{
4805 CPU86_LDouble fptemp;
4806
4807 fptemp = ST0;
4808 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4809 env->fpus |= 0x400;
4810 } else {
4811 ST0 = sin(fptemp);
4812 fpush();
4813 ST0 = cos(fptemp);
4814 env->fpus &= (~0x400); /* C2 <-- 0 */
4815 /* the above code is for |arg| < 2**63 only */
4816 }
4817}
4818
4819void helper_frndint(void)
4820{
4821 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4822}
4823
4824void helper_fscale(void)
4825{
4826 ST0 = ldexp (ST0, (int)(ST1));
4827}
4828
4829void helper_fsin(void)
4830{
4831 CPU86_LDouble fptemp;
4832
4833 fptemp = ST0;
4834 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4835 env->fpus |= 0x400;
4836 } else {
4837 ST0 = sin(fptemp);
4838 env->fpus &= (~0x400); /* C2 <-- 0 */
4839 /* the above code is for |arg| < 2**53 only */
4840 }
4841}
4842
4843void helper_fcos(void)
4844{
4845 CPU86_LDouble fptemp;
4846
4847 fptemp = ST0;
4848 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4849 env->fpus |= 0x400;
4850 } else {
4851 ST0 = cos(fptemp);
4852 env->fpus &= (~0x400); /* C2 <-- 0 */
4853 /* the above code is for |arg5 < 2**63 only */
4854 }
4855}
4856
4857void helper_fxam_ST0(void)
4858{
4859 CPU86_LDoubleU temp;
4860 int expdif;
4861
4862 temp.d = ST0;
4863
4864 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4865 if (SIGND(temp))
4866 env->fpus |= 0x200; /* C1 <-- 1 */
4867
4868 /* XXX: test fptags too */
4869 expdif = EXPD(temp);
4870 if (expdif == MAXEXPD) {
4871#ifdef USE_X86LDOUBLE
4872 if (MANTD(temp) == 0x8000000000000000ULL)
4873#else
4874 if (MANTD(temp) == 0)
4875#endif
4876 env->fpus |= 0x500 /*Infinity*/;
4877 else
4878 env->fpus |= 0x100 /*NaN*/;
4879 } else if (expdif == 0) {
4880 if (MANTD(temp) == 0)
4881 env->fpus |= 0x4000 /*Zero*/;
4882 else
4883 env->fpus |= 0x4400 /*Denormal*/;
4884 } else {
4885 env->fpus |= 0x400;
4886 }
4887}
4888
4889void helper_fstenv(target_ulong ptr, int data32)
4890{
4891 int fpus, fptag, exp, i;
4892 uint64_t mant;
4893 CPU86_LDoubleU tmp;
4894
4895 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4896 fptag = 0;
4897 for (i=7; i>=0; i--) {
4898 fptag <<= 2;
4899 if (env->fptags[i]) {
4900 fptag |= 3;
4901 } else {
4902 tmp.d = env->fpregs[i].d;
4903 exp = EXPD(tmp);
4904 mant = MANTD(tmp);
4905 if (exp == 0 && mant == 0) {
4906 /* zero */
4907 fptag |= 1;
4908 } else if (exp == 0 || exp == MAXEXPD
4909#ifdef USE_X86LDOUBLE
4910 || (mant & (1LL << 63)) == 0
4911#endif
4912 ) {
4913 /* NaNs, infinity, denormal */
4914 fptag |= 2;
4915 }
4916 }
4917 }
4918 if (data32) {
4919 /* 32 bit */
4920 stl(ptr, env->fpuc);
4921 stl(ptr + 4, fpus);
4922 stl(ptr + 8, fptag);
4923 stl(ptr + 12, 0); /* fpip */
4924 stl(ptr + 16, 0); /* fpcs */
4925 stl(ptr + 20, 0); /* fpoo */
4926 stl(ptr + 24, 0); /* fpos */
4927 } else {
4928 /* 16 bit */
4929 stw(ptr, env->fpuc);
4930 stw(ptr + 2, fpus);
4931 stw(ptr + 4, fptag);
4932 stw(ptr + 6, 0);
4933 stw(ptr + 8, 0);
4934 stw(ptr + 10, 0);
4935 stw(ptr + 12, 0);
4936 }
4937}
4938
4939void helper_fldenv(target_ulong ptr, int data32)
4940{
4941 int i, fpus, fptag;
4942
4943 if (data32) {
4944 env->fpuc = lduw(ptr);
4945 fpus = lduw(ptr + 4);
4946 fptag = lduw(ptr + 8);
4947 }
4948 else {
4949 env->fpuc = lduw(ptr);
4950 fpus = lduw(ptr + 2);
4951 fptag = lduw(ptr + 4);
4952 }
4953 env->fpstt = (fpus >> 11) & 7;
4954 env->fpus = fpus & ~0x3800;
4955 for(i = 0;i < 8; i++) {
4956 env->fptags[i] = ((fptag & 3) == 3);
4957 fptag >>= 2;
4958 }
4959}
4960
4961void helper_fsave(target_ulong ptr, int data32)
4962{
4963 CPU86_LDouble tmp;
4964 int i;
4965
4966 helper_fstenv(ptr, data32);
4967
4968 ptr += (14 << data32);
4969 for(i = 0;i < 8; i++) {
4970 tmp = ST(i);
4971 helper_fstt(tmp, ptr);
4972 ptr += 10;
4973 }
4974
4975 /* fninit */
4976 env->fpus = 0;
4977 env->fpstt = 0;
4978 env->fpuc = 0x37f;
4979 env->fptags[0] = 1;
4980 env->fptags[1] = 1;
4981 env->fptags[2] = 1;
4982 env->fptags[3] = 1;
4983 env->fptags[4] = 1;
4984 env->fptags[5] = 1;
4985 env->fptags[6] = 1;
4986 env->fptags[7] = 1;
4987}
4988
4989void helper_frstor(target_ulong ptr, int data32)
4990{
4991 CPU86_LDouble tmp;
4992 int i;
4993
4994 helper_fldenv(ptr, data32);
4995 ptr += (14 << data32);
4996
4997 for(i = 0;i < 8; i++) {
4998 tmp = helper_fldt(ptr);
4999 ST(i) = tmp;
5000 ptr += 10;
5001 }
5002}
5003
5004void helper_fxsave(target_ulong ptr, int data64)
5005{
5006 int fpus, fptag, i, nb_xmm_regs;
5007 CPU86_LDouble tmp;
5008 target_ulong addr;
5009
5010 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5011 fptag = 0;
5012 for(i = 0; i < 8; i++) {
5013 fptag |= (env->fptags[i] << i);
5014 }
5015 stw(ptr, env->fpuc);
5016 stw(ptr + 2, fpus);
5017 stw(ptr + 4, fptag ^ 0xff);
5018#ifdef TARGET_X86_64
5019 if (data64) {
5020 stq(ptr + 0x08, 0); /* rip */
5021 stq(ptr + 0x10, 0); /* rdp */
5022 } else
5023#endif
5024 {
5025 stl(ptr + 0x08, 0); /* eip */
5026 stl(ptr + 0x0c, 0); /* sel */
5027 stl(ptr + 0x10, 0); /* dp */
5028 stl(ptr + 0x14, 0); /* sel */
5029 }
5030
5031 addr = ptr + 0x20;
5032 for(i = 0;i < 8; i++) {
5033 tmp = ST(i);
5034 helper_fstt(tmp, addr);
5035 addr += 16;
5036 }
5037
5038 if (env->cr[4] & CR4_OSFXSR_MASK) {
5039 /* XXX: finish it */
5040 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5041 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5042 if (env->hflags & HF_CS64_MASK)
5043 nb_xmm_regs = 16;
5044 else
5045 nb_xmm_regs = 8;
5046 addr = ptr + 0xa0;
5047 for(i = 0; i < nb_xmm_regs; i++) {
5048 stq(addr, env->xmm_regs[i].XMM_Q(0));
5049 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5050 addr += 16;
5051 }
5052 }
5053}
5054
5055void helper_fxrstor(target_ulong ptr, int data64)
5056{
5057 int i, fpus, fptag, nb_xmm_regs;
5058 CPU86_LDouble tmp;
5059 target_ulong addr;
5060
5061 env->fpuc = lduw(ptr);
5062 fpus = lduw(ptr + 2);
5063 fptag = lduw(ptr + 4);
5064 env->fpstt = (fpus >> 11) & 7;
5065 env->fpus = fpus & ~0x3800;
5066 fptag ^= 0xff;
5067 for(i = 0;i < 8; i++) {
5068 env->fptags[i] = ((fptag >> i) & 1);
5069 }
5070
5071 addr = ptr + 0x20;
5072 for(i = 0;i < 8; i++) {
5073 tmp = helper_fldt(addr);
5074 ST(i) = tmp;
5075 addr += 16;
5076 }
5077
5078 if (env->cr[4] & CR4_OSFXSR_MASK) {
5079 /* XXX: finish it */
5080 env->mxcsr = ldl(ptr + 0x18);
5081 //ldl(ptr + 0x1c);
5082 if (env->hflags & HF_CS64_MASK)
5083 nb_xmm_regs = 16;
5084 else
5085 nb_xmm_regs = 8;
5086 addr = ptr + 0xa0;
5087 for(i = 0; i < nb_xmm_regs; i++) {
5088#if !defined(VBOX) || __GNUC__ < 4
5089 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5090 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5091#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5092# if 1
5093 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5094 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5095 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5096 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5097# else
5098 /* this works fine on Mac OS X, gcc 4.0.1 */
5099 uint64_t u64 = ldq(addr);
5100 env->xmm_regs[i].XMM_Q(0);
5101 u64 = ldq(addr + 4);
5102 env->xmm_regs[i].XMM_Q(1) = u64;
5103# endif
5104#endif
5105 addr += 16;
5106 }
5107 }
5108}
5109
5110#ifndef USE_X86LDOUBLE
5111
5112void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5113{
5114 CPU86_LDoubleU temp;
5115 int e;
5116
5117 temp.d = f;
5118 /* mantissa */
5119 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5120 /* exponent + sign */
5121 e = EXPD(temp) - EXPBIAS + 16383;
5122 e |= SIGND(temp) >> 16;
5123 *pexp = e;
5124}
5125
5126CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5127{
5128 CPU86_LDoubleU temp;
5129 int e;
5130 uint64_t ll;
5131
5132 /* XXX: handle overflow ? */
5133 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5134 e |= (upper >> 4) & 0x800; /* sign */
5135 ll = (mant >> 11) & ((1LL << 52) - 1);
5136#ifdef __arm__
5137 temp.l.upper = (e << 20) | (ll >> 32);
5138 temp.l.lower = ll;
5139#else
5140 temp.ll = ll | ((uint64_t)e << 52);
5141#endif
5142 return temp.d;
5143}
5144
5145#else
5146
5147void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5148{
5149 CPU86_LDoubleU temp;
5150
5151 temp.d = f;
5152 *pmant = temp.l.lower;
5153 *pexp = temp.l.upper;
5154}
5155
5156CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5157{
5158 CPU86_LDoubleU temp;
5159
5160 temp.l.upper = upper;
5161 temp.l.lower = mant;
5162 return temp.d;
5163}
5164#endif
5165
5166#ifdef TARGET_X86_64
5167
5168//#define DEBUG_MULDIV
5169
5170static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5171{
5172 *plow += a;
5173 /* carry test */
5174 if (*plow < a)
5175 (*phigh)++;
5176 *phigh += b;
5177}
5178
5179static void neg128(uint64_t *plow, uint64_t *phigh)
5180{
5181 *plow = ~ *plow;
5182 *phigh = ~ *phigh;
5183 add128(plow, phigh, 1, 0);
5184}
5185
5186/* return TRUE if overflow */
5187static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5188{
5189 uint64_t q, r, a1, a0;
5190 int i, qb, ab;
5191
5192 a0 = *plow;
5193 a1 = *phigh;
5194 if (a1 == 0) {
5195 q = a0 / b;
5196 r = a0 % b;
5197 *plow = q;
5198 *phigh = r;
5199 } else {
5200 if (a1 >= b)
5201 return 1;
5202 /* XXX: use a better algorithm */
5203 for(i = 0; i < 64; i++) {
5204 ab = a1 >> 63;
5205 a1 = (a1 << 1) | (a0 >> 63);
5206 if (ab || a1 >= b) {
5207 a1 -= b;
5208 qb = 1;
5209 } else {
5210 qb = 0;
5211 }
5212 a0 = (a0 << 1) | qb;
5213 }
5214#if defined(DEBUG_MULDIV)
5215 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5216 *phigh, *plow, b, a0, a1);
5217#endif
5218 *plow = a0;
5219 *phigh = a1;
5220 }
5221 return 0;
5222}
5223
5224/* return TRUE if overflow */
5225static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5226{
5227 int sa, sb;
5228 sa = ((int64_t)*phigh < 0);
5229 if (sa)
5230 neg128(plow, phigh);
5231 sb = (b < 0);
5232 if (sb)
5233 b = -b;
5234 if (div64(plow, phigh, b) != 0)
5235 return 1;
5236 if (sa ^ sb) {
5237 if (*plow > (1ULL << 63))
5238 return 1;
5239 *plow = - *plow;
5240 } else {
5241 if (*plow >= (1ULL << 63))
5242 return 1;
5243 }
5244 if (sa)
5245 *phigh = - *phigh;
5246 return 0;
5247}
5248
5249void helper_mulq_EAX_T0(target_ulong t0)
5250{
5251 uint64_t r0, r1;
5252
5253 mulu64(&r0, &r1, EAX, t0);
5254 EAX = r0;
5255 EDX = r1;
5256 CC_DST = r0;
5257 CC_SRC = r1;
5258}
5259
5260void helper_imulq_EAX_T0(target_ulong t0)
5261{
5262 uint64_t r0, r1;
5263
5264 muls64(&r0, &r1, EAX, t0);
5265 EAX = r0;
5266 EDX = r1;
5267 CC_DST = r0;
5268 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5269}
5270
5271target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5272{
5273 uint64_t r0, r1;
5274
5275 muls64(&r0, &r1, t0, t1);
5276 CC_DST = r0;
5277 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5278 return r0;
5279}
5280
5281void helper_divq_EAX(target_ulong t0)
5282{
5283 uint64_t r0, r1;
5284 if (t0 == 0) {
5285 raise_exception(EXCP00_DIVZ);
5286 }
5287 r0 = EAX;
5288 r1 = EDX;
5289 if (div64(&r0, &r1, t0))
5290 raise_exception(EXCP00_DIVZ);
5291 EAX = r0;
5292 EDX = r1;
5293}
5294
5295void helper_idivq_EAX(target_ulong t0)
5296{
5297 uint64_t r0, r1;
5298 if (t0 == 0) {
5299 raise_exception(EXCP00_DIVZ);
5300 }
5301 r0 = EAX;
5302 r1 = EDX;
5303 if (idiv64(&r0, &r1, t0))
5304 raise_exception(EXCP00_DIVZ);
5305 EAX = r0;
5306 EDX = r1;
5307}
5308#endif
5309
5310static void do_hlt(void)
5311{
5312 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5313 env->halted = 1;
5314 env->exception_index = EXCP_HLT;
5315 cpu_loop_exit();
5316}
5317
5318void helper_hlt(int next_eip_addend)
5319{
5320 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5321 EIP += next_eip_addend;
5322
5323 do_hlt();
5324}
5325
5326void helper_monitor(target_ulong ptr)
5327{
5328 if ((uint32_t)ECX != 0)
5329 raise_exception(EXCP0D_GPF);
5330 /* XXX: store address ? */
5331 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5332}
5333
5334void helper_mwait(int next_eip_addend)
5335{
5336 if ((uint32_t)ECX != 0)
5337 raise_exception(EXCP0D_GPF);
5338#ifdef VBOX
5339 helper_hlt(next_eip_addend);
5340#else
5341 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5342 EIP += next_eip_addend;
5343
5344 /* XXX: not complete but not completely erroneous */
5345 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5346 /* more than one CPU: do not sleep because another CPU may
5347 wake this one */
5348 } else {
5349 do_hlt();
5350 }
5351#endif
5352}
5353
5354void helper_debug(void)
5355{
5356 env->exception_index = EXCP_DEBUG;
5357 cpu_loop_exit();
5358}
5359
5360void helper_raise_interrupt(int intno, int next_eip_addend)
5361{
5362 raise_interrupt(intno, 1, 0, next_eip_addend);
5363}
5364
5365void helper_raise_exception(int exception_index)
5366{
5367 raise_exception(exception_index);
5368}
5369
5370void helper_cli(void)
5371{
5372 env->eflags &= ~IF_MASK;
5373}
5374
5375void helper_sti(void)
5376{
5377 env->eflags |= IF_MASK;
5378}
5379
5380#ifdef VBOX
5381void helper_cli_vme(void)
5382{
5383 env->eflags &= ~VIF_MASK;
5384}
5385
5386void helper_sti_vme(void)
5387{
5388 /* First check, then change eflags according to the AMD manual */
5389 if (env->eflags & VIP_MASK) {
5390 raise_exception(EXCP0D_GPF);
5391 }
5392 env->eflags |= VIF_MASK;
5393}
5394#endif
5395
5396#if 0
5397/* vm86plus instructions */
5398void helper_cli_vm(void)
5399{
5400 env->eflags &= ~VIF_MASK;
5401}
5402
5403void helper_sti_vm(void)
5404{
5405 env->eflags |= VIF_MASK;
5406 if (env->eflags & VIP_MASK) {
5407 raise_exception(EXCP0D_GPF);
5408 }
5409}
5410#endif
5411
5412void helper_set_inhibit_irq(void)
5413{
5414 env->hflags |= HF_INHIBIT_IRQ_MASK;
5415}
5416
5417void helper_reset_inhibit_irq(void)
5418{
5419 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5420}
5421
5422void helper_boundw(target_ulong a0, int v)
5423{
5424 int low, high;
5425 low = ldsw(a0);
5426 high = ldsw(a0 + 2);
5427 v = (int16_t)v;
5428 if (v < low || v > high) {
5429 raise_exception(EXCP05_BOUND);
5430 }
5431 FORCE_RET();
5432}
5433
5434void helper_boundl(target_ulong a0, int v)
5435{
5436 int low, high;
5437 low = ldl(a0);
5438 high = ldl(a0 + 4);
5439 if (v < low || v > high) {
5440 raise_exception(EXCP05_BOUND);
5441 }
5442 FORCE_RET();
5443}
5444
5445static float approx_rsqrt(float a)
5446{
5447 return 1.0 / sqrt(a);
5448}
5449
5450static float approx_rcp(float a)
5451{
5452 return 1.0 / a;
5453}
5454
5455#if !defined(CONFIG_USER_ONLY)
5456
5457#define MMUSUFFIX _mmu
5458
5459#define SHIFT 0
5460#include "softmmu_template.h"
5461
5462#define SHIFT 1
5463#include "softmmu_template.h"
5464
5465#define SHIFT 2
5466#include "softmmu_template.h"
5467
5468#define SHIFT 3
5469#include "softmmu_template.h"
5470
5471#endif
5472
5473#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5474/* This code assumes real physical address always fit into host CPU reg,
5475 which is wrong in general, but true for our current use cases. */
5476RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5477{
5478 return remR3PhysReadS8(addr);
5479}
5480RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5481{
5482 return remR3PhysReadU8(addr);
5483}
5484void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5485{
5486 remR3PhysWriteU8(addr, val);
5487}
5488RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5489{
5490 return remR3PhysReadS16(addr);
5491}
5492RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5493{
5494 return remR3PhysReadU16(addr);
5495}
5496void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5497{
5498 remR3PhysWriteU16(addr, val);
5499}
5500RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5501{
5502 return remR3PhysReadS32(addr);
5503}
5504RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5505{
5506 return remR3PhysReadU32(addr);
5507}
5508void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5509{
5510 remR3PhysWriteU32(addr, val);
5511}
5512uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5513{
5514 return remR3PhysReadU64(addr);
5515}
5516void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5517{
5518 remR3PhysWriteU64(addr, val);
5519}
5520#endif
5521
5522/* try to fill the TLB and return an exception if error. If retaddr is
5523 NULL, it means that the function was called in C code (i.e. not
5524 from generated code or from helper.c) */
5525/* XXX: fix it to restore all registers */
5526void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5527{
5528 TranslationBlock *tb;
5529 int ret;
5530 unsigned long pc;
5531 CPUX86State *saved_env;
5532
5533 /* XXX: hack to restore env in all cases, even if not called from
5534 generated code */
5535 saved_env = env;
5536 env = cpu_single_env;
5537
5538 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5539 if (ret) {
5540 if (retaddr) {
5541 /* now we have a real cpu fault */
5542 pc = (unsigned long)retaddr;
5543 tb = tb_find_pc(pc);
5544 if (tb) {
5545 /* the PC is inside the translated code. It means that we have
5546 a virtual CPU fault */
5547 cpu_restore_state(tb, env, pc, NULL);
5548 }
5549 }
5550 raise_exception_err(env->exception_index, env->error_code);
5551 }
5552 env = saved_env;
5553}
5554
5555#ifdef VBOX
5556
5557/**
5558 * Correctly computes the eflags.
5559 * @returns eflags.
5560 * @param env1 CPU environment.
5561 */
5562uint32_t raw_compute_eflags(CPUX86State *env1)
5563{
5564 CPUX86State *savedenv = env;
5565 uint32_t efl;
5566 env = env1;
5567 efl = compute_eflags();
5568 env = savedenv;
5569 return efl;
5570}
5571
5572/**
5573 * Reads byte from virtual address in guest memory area.
5574 * XXX: is it working for any addresses? swapped out pages?
5575 * @returns readed data byte.
5576 * @param env1 CPU environment.
5577 * @param pvAddr GC Virtual address.
5578 */
5579uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5580{
5581 CPUX86State *savedenv = env;
5582 uint8_t u8;
5583 env = env1;
5584 u8 = ldub_kernel(addr);
5585 env = savedenv;
5586 return u8;
5587}
5588
5589/**
5590 * Reads byte from virtual address in guest memory area.
5591 * XXX: is it working for any addresses? swapped out pages?
5592 * @returns readed data byte.
5593 * @param env1 CPU environment.
5594 * @param pvAddr GC Virtual address.
5595 */
5596uint16_t read_word(CPUX86State *env1, target_ulong addr)
5597{
5598 CPUX86State *savedenv = env;
5599 uint16_t u16;
5600 env = env1;
5601 u16 = lduw_kernel(addr);
5602 env = savedenv;
5603 return u16;
5604}
5605
5606/**
5607 * Reads byte from virtual address in guest memory area.
5608 * XXX: is it working for any addresses? swapped out pages?
5609 * @returns readed data byte.
5610 * @param env1 CPU environment.
5611 * @param pvAddr GC Virtual address.
5612 */
5613uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5614{
5615 CPUX86State *savedenv = env;
5616 uint32_t u32;
5617 env = env1;
5618 u32 = ldl_kernel(addr);
5619 env = savedenv;
5620 return u32;
5621}
5622
5623/**
5624 * Writes byte to virtual address in guest memory area.
5625 * XXX: is it working for any addresses? swapped out pages?
5626 * @returns readed data byte.
5627 * @param env1 CPU environment.
5628 * @param pvAddr GC Virtual address.
5629 * @param val byte value
5630 */
5631void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5632{
5633 CPUX86State *savedenv = env;
5634 env = env1;
5635 stb(addr, val);
5636 env = savedenv;
5637}
5638
5639void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5640{
5641 CPUX86State *savedenv = env;
5642 env = env1;
5643 stw(addr, val);
5644 env = savedenv;
5645}
5646
5647void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5648{
5649 CPUX86State *savedenv = env;
5650 env = env1;
5651 stl(addr, val);
5652 env = savedenv;
5653}
5654
5655/**
5656 * Correctly loads selector into segment register with updating internal
5657 * qemu data/caches.
5658 * @param env1 CPU environment.
5659 * @param seg_reg Segment register.
5660 * @param selector Selector to load.
5661 */
5662void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5663{
5664 CPUX86State *savedenv = env;
5665 jmp_buf old_buf;
5666
5667 env = env1;
5668
5669 if ( env->eflags & X86_EFL_VM
5670 || !(env->cr[0] & X86_CR0_PE))
5671 {
5672 load_seg_vm(seg_reg, selector);
5673
5674 env = savedenv;
5675
5676 /* Successful sync. */
5677 env1->segs[seg_reg].newselector = 0;
5678 }
5679 else
5680 {
5681 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5682 time critical - let's not do that */
5683#if 0
5684 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5685#endif
5686 if (setjmp(env1->jmp_env) == 0)
5687 {
5688 if (seg_reg == R_CS)
5689 {
5690 uint32_t e1, e2;
5691 e1 = e2 = 0;
5692 load_segment(&e1, &e2, selector);
5693 cpu_x86_load_seg_cache(env, R_CS, selector,
5694 get_seg_base(e1, e2),
5695 get_seg_limit(e1, e2),
5696 e2);
5697 }
5698 else
5699 tss_load_seg(seg_reg, selector);
5700 env = savedenv;
5701
5702 /* Successful sync. */
5703 env1->segs[seg_reg].newselector = 0;
5704 }
5705 else
5706 {
5707 env = savedenv;
5708
5709 /* Postpone sync until the guest uses the selector. */
5710 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5711 env1->segs[seg_reg].newselector = selector;
5712 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5713 env1->exception_index = -1;
5714 env1->error_code = 0;
5715 env1->old_exception = -1;
5716 }
5717#if 0
5718 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5719#endif
5720 }
5721
5722}
5723
5724DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5725{
5726 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5727}
5728
5729
5730int emulate_single_instr(CPUX86State *env1)
5731{
5732 TranslationBlock *tb;
5733 TranslationBlock *current;
5734 int flags;
5735 uint8_t *tc_ptr;
5736 target_ulong old_eip;
5737
5738 /* ensures env is loaded! */
5739 CPUX86State *savedenv = env;
5740 env = env1;
5741
5742 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5743
5744 current = env->current_tb;
5745 env->current_tb = NULL;
5746 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5747
5748 /*
5749 * Translate only one instruction.
5750 */
5751 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5752 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5753 env->segs[R_CS].base, flags, 0);
5754
5755 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5756
5757
5758 /* tb_link_phys: */
5759 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5760 tb->jmp_next[0] = NULL;
5761 tb->jmp_next[1] = NULL;
5762 Assert(tb->jmp_next[0] == NULL);
5763 Assert(tb->jmp_next[1] == NULL);
5764 if (tb->tb_next_offset[0] != 0xffff)
5765 tb_reset_jump(tb, 0);
5766 if (tb->tb_next_offset[1] != 0xffff)
5767 tb_reset_jump(tb, 1);
5768
5769 /*
5770 * Execute it using emulation
5771 */
5772 old_eip = env->eip;
5773 env->current_tb = tb;
5774
5775 /*
5776 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5777 * perhaps not a very safe hack
5778 */
5779 while(old_eip == env->eip)
5780 {
5781 tc_ptr = tb->tc_ptr;
5782
5783#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5784 int fake_ret;
5785 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5786#else
5787 tcg_qemu_tb_exec(tc_ptr);
5788#endif
5789 /*
5790 * Exit once we detect an external interrupt and interrupts are enabled
5791 */
5792 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5793 ( (env->eflags & IF_MASK) &&
5794 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5795 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5796 {
5797 break;
5798 }
5799 }
5800 env->current_tb = current;
5801
5802 tb_phys_invalidate(tb, -1);
5803 tb_free(tb);
5804/*
5805 Assert(tb->tb_next_offset[0] == 0xffff);
5806 Assert(tb->tb_next_offset[1] == 0xffff);
5807 Assert(tb->tb_next[0] == 0xffff);
5808 Assert(tb->tb_next[1] == 0xffff);
5809 Assert(tb->jmp_next[0] == NULL);
5810 Assert(tb->jmp_next[1] == NULL);
5811 Assert(tb->jmp_first == NULL); */
5812
5813 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5814
5815 /*
5816 * Execute the next instruction when we encounter instruction fusing.
5817 */
5818 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5819 {
5820 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5821 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5822 emulate_single_instr(env);
5823 }
5824
5825 env = savedenv;
5826 return 0;
5827}
5828
5829/**
5830 * Correctly loads a new ldtr selector.
5831 *
5832 * @param env1 CPU environment.
5833 * @param selector Selector to load.
5834 */
5835void sync_ldtr(CPUX86State *env1, int selector)
5836{
5837 CPUX86State *saved_env = env;
5838 if (setjmp(env1->jmp_env) == 0)
5839 {
5840 env = env1;
5841 helper_lldt(selector);
5842 env = saved_env;
5843 }
5844 else
5845 {
5846 env = saved_env;
5847#ifdef VBOX_STRICT
5848 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5849#endif
5850 }
5851}
5852
5853/**
5854 * Correctly loads a new tr selector.
5855 *
5856 * @param env1 CPU environment.
5857 * @param selector Selector to load.
5858 */
5859int sync_tr(CPUX86State *env1, int selector)
5860{
5861 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
5862 SegmentCache *dt;
5863 uint32_t e1, e2;
5864 int index, type, entry_limit;
5865 target_ulong ptr;
5866 CPUX86State *saved_env = env;
5867 env = env1;
5868
5869 selector &= 0xffff;
5870 if ((selector & 0xfffc) == 0) {
5871 /* NULL selector case: invalid TR */
5872 env->tr.base = 0;
5873 env->tr.limit = 0;
5874 env->tr.flags = 0;
5875 } else {
5876 if (selector & 0x4)
5877 goto l_failure;
5878 dt = &env->gdt;
5879 index = selector & ~7;
5880#ifdef TARGET_X86_64
5881 if (env->hflags & HF_LMA_MASK)
5882 entry_limit = 15;
5883 else
5884#endif
5885 entry_limit = 7;
5886 if ((index + entry_limit) > dt->limit)
5887 goto l_failure;
5888 ptr = dt->base + index;
5889 e1 = ldl_kernel(ptr);
5890 e2 = ldl_kernel(ptr + 4);
5891 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5892 if ((e2 & DESC_S_MASK) /*||
5893 (type != 1 && type != 9)*/)
5894 goto l_failure;
5895 if (!(e2 & DESC_P_MASK))
5896 goto l_failure;
5897#ifdef TARGET_X86_64
5898 if (env->hflags & HF_LMA_MASK) {
5899 uint32_t e3;
5900 e3 = ldl_kernel(ptr + 8);
5901 load_seg_cache_raw_dt(&env->tr, e1, e2);
5902 env->tr.base |= (target_ulong)e3 << 32;
5903 } else
5904#endif
5905 {
5906 load_seg_cache_raw_dt(&env->tr, e1, e2);
5907 }
5908 e2 |= DESC_TSS_BUSY_MASK;
5909 stl_kernel(ptr + 4, e2);
5910 }
5911 env->tr.selector = selector;
5912
5913 env = saved_env;
5914 return 0;
5915l_failure:
5916 AssertMsgFailed(("selector=%d\n", selector));
5917 return -1;
5918}
5919
5920
5921int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5922 uint32_t *esp_ptr, int dpl)
5923{
5924 int type, index, shift;
5925
5926 CPUX86State *savedenv = env;
5927 env = env1;
5928
5929 if (!(env->tr.flags & DESC_P_MASK))
5930 cpu_abort(env, "invalid tss");
5931 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5932 if ((type & 7) != 1)
5933 cpu_abort(env, "invalid tss type %d", type);
5934 shift = type >> 3;
5935 index = (dpl * 4 + 2) << shift;
5936 if (index + (4 << shift) - 1 > env->tr.limit)
5937 {
5938 env = savedenv;
5939 return 0;
5940 }
5941 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5942
5943 if (shift == 0) {
5944 *esp_ptr = lduw_kernel(env->tr.base + index);
5945 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5946 } else {
5947 *esp_ptr = ldl_kernel(env->tr.base + index);
5948 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5949 }
5950
5951 env = savedenv;
5952 return 1;
5953}
5954
5955//*****************************************************************************
5956// Needs to be at the bottom of the file (overriding macros)
5957
5958#ifndef VBOX
5959static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5960#else /* VBOX */
5961DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5962#endif /* VBOX */
5963{
5964 return *(CPU86_LDouble *)ptr;
5965}
5966
5967#ifndef VBOX
5968static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5969#else /* VBOX */
5970DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5971#endif /* VBOX */
5972{
5973 *(CPU86_LDouble *)ptr = f;
5974}
5975
5976#undef stw
5977#undef stl
5978#undef stq
5979#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5980#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5981#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5982
5983//*****************************************************************************
5984void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5985{
5986 int fpus, fptag, i, nb_xmm_regs;
5987 CPU86_LDouble tmp;
5988 uint8_t *addr;
5989 int data64 = !!(env->hflags & HF_LMA_MASK);
5990
5991 if (env->cpuid_features & CPUID_FXSR)
5992 {
5993 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5994 fptag = 0;
5995 for(i = 0; i < 8; i++) {
5996 fptag |= (env->fptags[i] << i);
5997 }
5998 stw(ptr, env->fpuc);
5999 stw(ptr + 2, fpus);
6000 stw(ptr + 4, fptag ^ 0xff);
6001
6002 addr = ptr + 0x20;
6003 for(i = 0;i < 8; i++) {
6004 tmp = ST(i);
6005 helper_fstt_raw(tmp, addr);
6006 addr += 16;
6007 }
6008
6009 if (env->cr[4] & CR4_OSFXSR_MASK) {
6010 /* XXX: finish it */
6011 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6012 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6013 nb_xmm_regs = 8 << data64;
6014 addr = ptr + 0xa0;
6015 for(i = 0; i < nb_xmm_regs; i++) {
6016#if __GNUC__ < 4
6017 stq(addr, env->xmm_regs[i].XMM_Q(0));
6018 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6019#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6020 stl(addr, env->xmm_regs[i].XMM_L(0));
6021 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6022 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6023 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6024#endif
6025 addr += 16;
6026 }
6027 }
6028 }
6029 else
6030 {
6031 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6032 int fptag;
6033
6034 fp->FCW = env->fpuc;
6035 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6036 fptag = 0;
6037 for (i=7; i>=0; i--) {
6038 fptag <<= 2;
6039 if (env->fptags[i]) {
6040 fptag |= 3;
6041 } else {
6042 /* the FPU automatically computes it */
6043 }
6044 }
6045 fp->FTW = fptag;
6046
6047 for(i = 0;i < 8; i++) {
6048 tmp = ST(i);
6049 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6050 }
6051 }
6052}
6053
6054//*****************************************************************************
6055#undef lduw
6056#undef ldl
6057#undef ldq
6058#define lduw(a) *(uint16_t *)(a)
6059#define ldl(a) *(uint32_t *)(a)
6060#define ldq(a) *(uint64_t *)(a)
6061//*****************************************************************************
6062void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6063{
6064 int i, fpus, fptag, nb_xmm_regs;
6065 CPU86_LDouble tmp;
6066 uint8_t *addr;
6067 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6068
6069 if (env->cpuid_features & CPUID_FXSR)
6070 {
6071 env->fpuc = lduw(ptr);
6072 fpus = lduw(ptr + 2);
6073 fptag = lduw(ptr + 4);
6074 env->fpstt = (fpus >> 11) & 7;
6075 env->fpus = fpus & ~0x3800;
6076 fptag ^= 0xff;
6077 for(i = 0;i < 8; i++) {
6078 env->fptags[i] = ((fptag >> i) & 1);
6079 }
6080
6081 addr = ptr + 0x20;
6082 for(i = 0;i < 8; i++) {
6083 tmp = helper_fldt_raw(addr);
6084 ST(i) = tmp;
6085 addr += 16;
6086 }
6087
6088 if (env->cr[4] & CR4_OSFXSR_MASK) {
6089 /* XXX: finish it, endianness */
6090 env->mxcsr = ldl(ptr + 0x18);
6091 //ldl(ptr + 0x1c);
6092 nb_xmm_regs = 8 << data64;
6093 addr = ptr + 0xa0;
6094 for(i = 0; i < nb_xmm_regs; i++) {
6095#if HC_ARCH_BITS == 32
6096 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6097 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6098 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6099 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6100 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6101#else
6102 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6103 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6104#endif
6105 addr += 16;
6106 }
6107 }
6108 }
6109 else
6110 {
6111 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6112 int fptag, j;
6113
6114 env->fpuc = fp->FCW;
6115 env->fpstt = (fp->FSW >> 11) & 7;
6116 env->fpus = fp->FSW & ~0x3800;
6117 fptag = fp->FTW;
6118 for(i = 0;i < 8; i++) {
6119 env->fptags[i] = ((fptag & 3) == 3);
6120 fptag >>= 2;
6121 }
6122 j = env->fpstt;
6123 for(i = 0;i < 8; i++) {
6124 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6125 ST(i) = tmp;
6126 }
6127 }
6128}
6129//*****************************************************************************
6130//*****************************************************************************
6131
6132#endif /* VBOX */
6133
6134/* Secure Virtual Machine helpers */
6135
6136#if defined(CONFIG_USER_ONLY)
6137
6138void helper_vmrun(int aflag, int next_eip_addend)
6139{
6140}
6141void helper_vmmcall(void)
6142{
6143}
6144void helper_vmload(int aflag)
6145{
6146}
6147void helper_vmsave(int aflag)
6148{
6149}
6150void helper_stgi(void)
6151{
6152}
6153void helper_clgi(void)
6154{
6155}
6156void helper_skinit(void)
6157{
6158}
6159void helper_invlpga(int aflag)
6160{
6161}
6162void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6163{
6164}
6165void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6166{
6167}
6168
6169void helper_svm_check_io(uint32_t port, uint32_t param,
6170 uint32_t next_eip_addend)
6171{
6172}
6173#else
6174
6175#ifndef VBOX
6176static inline void svm_save_seg(target_phys_addr_t addr,
6177#else /* VBOX */
6178DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6179#endif /* VBOX */
6180 const SegmentCache *sc)
6181{
6182 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6183 sc->selector);
6184 stq_phys(addr + offsetof(struct vmcb_seg, base),
6185 sc->base);
6186 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6187 sc->limit);
6188 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6189 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6190}
6191
6192#ifndef VBOX
6193static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6194#else /* VBOX */
6195DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6196#endif /* VBOX */
6197{
6198 unsigned int flags;
6199
6200 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6201 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6202 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6203 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6204 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6205}
6206
6207#ifndef VBOX
6208static inline void svm_load_seg_cache(target_phys_addr_t addr,
6209#else /* VBOX */
6210DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6211#endif /* VBOX */
6212 CPUState *env, int seg_reg)
6213{
6214 SegmentCache sc1, *sc = &sc1;
6215 svm_load_seg(addr, sc);
6216 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6217 sc->base, sc->limit, sc->flags);
6218}
6219
6220void helper_vmrun(int aflag, int next_eip_addend)
6221{
6222 target_ulong addr;
6223 uint32_t event_inj;
6224 uint32_t int_ctl;
6225
6226 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6227
6228 if (aflag == 2)
6229 addr = EAX;
6230 else
6231 addr = (uint32_t)EAX;
6232
6233 if (loglevel & CPU_LOG_TB_IN_ASM)
6234 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6235
6236 env->vm_vmcb = addr;
6237
6238 /* save the current CPU state in the hsave page */
6239 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6240 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6241
6242 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6243 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6244
6245 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6246 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6247 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6248 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6249 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6250 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6251
6252 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6253 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6254
6255 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6256 &env->segs[R_ES]);
6257 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6258 &env->segs[R_CS]);
6259 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6260 &env->segs[R_SS]);
6261 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6262 &env->segs[R_DS]);
6263
6264 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6265 EIP + next_eip_addend);
6266 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6267 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6268
6269 /* load the interception bitmaps so we do not need to access the
6270 vmcb in svm mode */
6271 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6272 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6273 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6274 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6275 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6276 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6277
6278 /* enable intercepts */
6279 env->hflags |= HF_SVMI_MASK;
6280
6281 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6282
6283 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6284 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6285
6286 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6287 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6288
6289 /* clear exit_info_2 so we behave like the real hardware */
6290 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6291
6292 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6293 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6294 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6295 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6296 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6297 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6298 if (int_ctl & V_INTR_MASKING_MASK) {
6299 env->v_tpr = int_ctl & V_TPR_MASK;
6300 env->hflags2 |= HF2_VINTR_MASK;
6301 if (env->eflags & IF_MASK)
6302 env->hflags2 |= HF2_HIF_MASK;
6303 }
6304
6305 cpu_load_efer(env,
6306 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6307 env->eflags = 0;
6308 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6309 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6310 CC_OP = CC_OP_EFLAGS;
6311
6312 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6313 env, R_ES);
6314 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6315 env, R_CS);
6316 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6317 env, R_SS);
6318 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6319 env, R_DS);
6320
6321 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6322 env->eip = EIP;
6323 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6324 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6325 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6326 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6327 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6328
6329 /* FIXME: guest state consistency checks */
6330
6331 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6332 case TLB_CONTROL_DO_NOTHING:
6333 break;
6334 case TLB_CONTROL_FLUSH_ALL_ASID:
6335 /* FIXME: this is not 100% correct but should work for now */
6336 tlb_flush(env, 1);
6337 break;
6338 }
6339
6340 env->hflags2 |= HF2_GIF_MASK;
6341
6342 if (int_ctl & V_IRQ_MASK) {
6343 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6344 }
6345
6346 /* maybe we need to inject an event */
6347 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6348 if (event_inj & SVM_EVTINJ_VALID) {
6349 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6350 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6351 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6352 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6353
6354 if (loglevel & CPU_LOG_TB_IN_ASM)
6355 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6356 /* FIXME: need to implement valid_err */
6357 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6358 case SVM_EVTINJ_TYPE_INTR:
6359 env->exception_index = vector;
6360 env->error_code = event_inj_err;
6361 env->exception_is_int = 0;
6362 env->exception_next_eip = -1;
6363 if (loglevel & CPU_LOG_TB_IN_ASM)
6364 fprintf(logfile, "INTR");
6365 /* XXX: is it always correct ? */
6366 do_interrupt(vector, 0, 0, 0, 1);
6367 break;
6368 case SVM_EVTINJ_TYPE_NMI:
6369 env->exception_index = EXCP02_NMI;
6370 env->error_code = event_inj_err;
6371 env->exception_is_int = 0;
6372 env->exception_next_eip = EIP;
6373 if (loglevel & CPU_LOG_TB_IN_ASM)
6374 fprintf(logfile, "NMI");
6375 cpu_loop_exit();
6376 break;
6377 case SVM_EVTINJ_TYPE_EXEPT:
6378 env->exception_index = vector;
6379 env->error_code = event_inj_err;
6380 env->exception_is_int = 0;
6381 env->exception_next_eip = -1;
6382 if (loglevel & CPU_LOG_TB_IN_ASM)
6383 fprintf(logfile, "EXEPT");
6384 cpu_loop_exit();
6385 break;
6386 case SVM_EVTINJ_TYPE_SOFT:
6387 env->exception_index = vector;
6388 env->error_code = event_inj_err;
6389 env->exception_is_int = 1;
6390 env->exception_next_eip = EIP;
6391 if (loglevel & CPU_LOG_TB_IN_ASM)
6392 fprintf(logfile, "SOFT");
6393 cpu_loop_exit();
6394 break;
6395 }
6396 if (loglevel & CPU_LOG_TB_IN_ASM)
6397 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6398 }
6399}
6400
6401void helper_vmmcall(void)
6402{
6403 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6404 raise_exception(EXCP06_ILLOP);
6405}
6406
6407void helper_vmload(int aflag)
6408{
6409 target_ulong addr;
6410 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6411
6412 if (aflag == 2)
6413 addr = EAX;
6414 else
6415 addr = (uint32_t)EAX;
6416
6417 if (loglevel & CPU_LOG_TB_IN_ASM)
6418 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6419 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6420 env->segs[R_FS].base);
6421
6422 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6423 env, R_FS);
6424 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6425 env, R_GS);
6426 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6427 &env->tr);
6428 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6429 &env->ldt);
6430
6431#ifdef TARGET_X86_64
6432 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6433 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6434 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6435 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6436#endif
6437 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6438 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6439 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6440 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6441}
6442
6443void helper_vmsave(int aflag)
6444{
6445 target_ulong addr;
6446 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6447
6448 if (aflag == 2)
6449 addr = EAX;
6450 else
6451 addr = (uint32_t)EAX;
6452
6453 if (loglevel & CPU_LOG_TB_IN_ASM)
6454 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6455 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6456 env->segs[R_FS].base);
6457
6458 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6459 &env->segs[R_FS]);
6460 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6461 &env->segs[R_GS]);
6462 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6463 &env->tr);
6464 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6465 &env->ldt);
6466
6467#ifdef TARGET_X86_64
6468 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6469 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6470 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6471 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6472#endif
6473 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6474 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6475 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6476 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6477}
6478
6479void helper_stgi(void)
6480{
6481 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6482 env->hflags2 |= HF2_GIF_MASK;
6483}
6484
6485void helper_clgi(void)
6486{
6487 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6488 env->hflags2 &= ~HF2_GIF_MASK;
6489}
6490
6491void helper_skinit(void)
6492{
6493 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6494 /* XXX: not implemented */
6495 raise_exception(EXCP06_ILLOP);
6496}
6497
6498void helper_invlpga(int aflag)
6499{
6500 target_ulong addr;
6501 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6502
6503 if (aflag == 2)
6504 addr = EAX;
6505 else
6506 addr = (uint32_t)EAX;
6507
6508 /* XXX: could use the ASID to see if it is needed to do the
6509 flush */
6510 tlb_flush_page(env, addr);
6511}
6512
6513void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6514{
6515 if (likely(!(env->hflags & HF_SVMI_MASK)))
6516 return;
6517#ifndef VBOX
6518 switch(type) {
6519#ifndef VBOX
6520 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6521#else
6522 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6523 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6524 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6525#endif
6526 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6527 helper_vmexit(type, param);
6528 }
6529 break;
6530#ifndef VBOX
6531 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6532#else
6533 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6534 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6535 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6536#endif
6537 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6538 helper_vmexit(type, param);
6539 }
6540 break;
6541 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6542 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6543 helper_vmexit(type, param);
6544 }
6545 break;
6546 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6547 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6548 helper_vmexit(type, param);
6549 }
6550 break;
6551 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6552 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6553 helper_vmexit(type, param);
6554 }
6555 break;
6556 case SVM_EXIT_MSR:
6557 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6558 /* FIXME: this should be read in at vmrun (faster this way?) */
6559 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6560 uint32_t t0, t1;
6561 switch((uint32_t)ECX) {
6562 case 0 ... 0x1fff:
6563 t0 = (ECX * 2) % 8;
6564 t1 = ECX / 8;
6565 break;
6566 case 0xc0000000 ... 0xc0001fff:
6567 t0 = (8192 + ECX - 0xc0000000) * 2;
6568 t1 = (t0 / 8);
6569 t0 %= 8;
6570 break;
6571 case 0xc0010000 ... 0xc0011fff:
6572 t0 = (16384 + ECX - 0xc0010000) * 2;
6573 t1 = (t0 / 8);
6574 t0 %= 8;
6575 break;
6576 default:
6577 helper_vmexit(type, param);
6578 t0 = 0;
6579 t1 = 0;
6580 break;
6581 }
6582 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6583 helper_vmexit(type, param);
6584 }
6585 break;
6586 default:
6587 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6588 helper_vmexit(type, param);
6589 }
6590 break;
6591 }
6592#else
6593 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6594#endif
6595}
6596
6597void helper_svm_check_io(uint32_t port, uint32_t param,
6598 uint32_t next_eip_addend)
6599{
6600 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6601 /* FIXME: this should be read in at vmrun (faster this way?) */
6602 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6603 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6604 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6605 /* next EIP */
6606 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6607 env->eip + next_eip_addend);
6608 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6609 }
6610 }
6611}
6612
6613/* Note: currently only 32 bits of exit_code are used */
6614void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6615{
6616 uint32_t int_ctl;
6617
6618 if (loglevel & CPU_LOG_TB_IN_ASM)
6619 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6620 exit_code, exit_info_1,
6621 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6622 EIP);
6623
6624 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6625 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6626 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6627 } else {
6628 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6629 }
6630
6631 /* Save the VM state in the vmcb */
6632 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6633 &env->segs[R_ES]);
6634 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6635 &env->segs[R_CS]);
6636 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6637 &env->segs[R_SS]);
6638 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6639 &env->segs[R_DS]);
6640
6641 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6642 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6643
6644 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6645 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6646
6647 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6648 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6649 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6650 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6651 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6652
6653 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6654 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6655 int_ctl |= env->v_tpr & V_TPR_MASK;
6656 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6657 int_ctl |= V_IRQ_MASK;
6658 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6659
6660 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6661 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6662 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6663 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6664 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6665 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6666 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6667
6668 /* Reload the host state from vm_hsave */
6669 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6670 env->hflags &= ~HF_SVMI_MASK;
6671 env->intercept = 0;
6672 env->intercept_exceptions = 0;
6673 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6674 env->tsc_offset = 0;
6675
6676 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6677 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6678
6679 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6680 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6681
6682 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6683 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6684 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6685 /* we need to set the efer after the crs so the hidden flags get
6686 set properly */
6687 cpu_load_efer(env,
6688 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6689 env->eflags = 0;
6690 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6691 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6692 CC_OP = CC_OP_EFLAGS;
6693
6694 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6695 env, R_ES);
6696 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6697 env, R_CS);
6698 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6699 env, R_SS);
6700 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6701 env, R_DS);
6702
6703 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6704 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6705 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6706
6707 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6708 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6709
6710 /* other setups */
6711 cpu_x86_set_cpl(env, 0);
6712 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6713 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6714
6715 env->hflags2 &= ~HF2_GIF_MASK;
6716 /* FIXME: Resets the current ASID register to zero (host ASID). */
6717
6718 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6719
6720 /* Clears the TSC_OFFSET inside the processor. */
6721
6722 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6723 from the page table indicated the host's CR3. If the PDPEs contain
6724 illegal state, the processor causes a shutdown. */
6725
6726 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6727 env->cr[0] |= CR0_PE_MASK;
6728 env->eflags &= ~VM_MASK;
6729
6730 /* Disables all breakpoints in the host DR7 register. */
6731
6732 /* Checks the reloaded host state for consistency. */
6733
6734 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6735 host's code segment or non-canonical (in the case of long mode), a
6736 #GP fault is delivered inside the host.) */
6737
6738 /* remove any pending exception */
6739 env->exception_index = -1;
6740 env->error_code = 0;
6741 env->old_exception = -1;
6742
6743 cpu_loop_exit();
6744}
6745
6746#endif
6747
6748/* MMX/SSE */
6749/* XXX: optimize by storing fptt and fptags in the static cpu state */
6750void helper_enter_mmx(void)
6751{
6752 env->fpstt = 0;
6753 *(uint32_t *)(env->fptags) = 0;
6754 *(uint32_t *)(env->fptags + 4) = 0;
6755}
6756
6757void helper_emms(void)
6758{
6759 /* set to empty state */
6760 *(uint32_t *)(env->fptags) = 0x01010101;
6761 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6762}
6763
6764/* XXX: suppress */
6765void helper_movq(uint64_t *d, uint64_t *s)
6766{
6767 *d = *s;
6768}
6769
6770#define SHIFT 0
6771#include "ops_sse.h"
6772
6773#define SHIFT 1
6774#include "ops_sse.h"
6775
6776#define SHIFT 0
6777#include "helper_template.h"
6778#undef SHIFT
6779
6780#define SHIFT 1
6781#include "helper_template.h"
6782#undef SHIFT
6783
6784#define SHIFT 2
6785#include "helper_template.h"
6786#undef SHIFT
6787
6788#ifdef TARGET_X86_64
6789
6790#define SHIFT 3
6791#include "helper_template.h"
6792#undef SHIFT
6793
6794#endif
6795
6796/* bit operations */
6797target_ulong helper_bsf(target_ulong t0)
6798{
6799 int count;
6800 target_ulong res;
6801
6802 res = t0;
6803 count = 0;
6804 while ((res & 1) == 0) {
6805 count++;
6806 res >>= 1;
6807 }
6808 return count;
6809}
6810
6811target_ulong helper_bsr(target_ulong t0)
6812{
6813 int count;
6814 target_ulong res, mask;
6815
6816 res = t0;
6817 count = TARGET_LONG_BITS - 1;
6818 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6819 while ((res & mask) == 0) {
6820 count--;
6821 res <<= 1;
6822 }
6823 return count;
6824}
6825
6826
6827static int compute_all_eflags(void)
6828{
6829 return CC_SRC;
6830}
6831
6832static int compute_c_eflags(void)
6833{
6834 return CC_SRC & CC_C;
6835}
6836
6837#ifndef VBOX
6838CCTable cc_table[CC_OP_NB] = {
6839 [CC_OP_DYNAMIC] = { /* should never happen */ },
6840
6841 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6842
6843 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6844 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6845 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6846
6847 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6848 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6849 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6850
6851 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6852 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6853 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6854
6855 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6856 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6857 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6858
6859 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6860 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6861 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6862
6863 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6864 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6865 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6866
6867 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6868 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6869 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6870
6871 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6872 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6873 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6874
6875 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6876 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6877 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6878
6879 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6880 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6881 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6882
6883#ifdef TARGET_X86_64
6884 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6885
6886 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6887
6888 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6889
6890 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6891
6892 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6893
6894 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6895
6896 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6897
6898 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6899
6900 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6901
6902 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6903#endif
6904};
6905#else /* VBOX */
6906/* Sync carefully with cpu.h */
6907CCTable cc_table[CC_OP_NB] = {
6908 /* CC_OP_DYNAMIC */ { 0, 0 },
6909
6910 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6911
6912 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6913 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6914 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6915#ifdef TARGET_X86_64
6916 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6917#else
6918 /* CC_OP_MULQ */ { 0, 0 },
6919#endif
6920
6921 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6922 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6923 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6924#ifdef TARGET_X86_64
6925 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6926#else
6927 /* CC_OP_ADDQ */ { 0, 0 },
6928#endif
6929
6930 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6931 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6932 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6933#ifdef TARGET_X86_64
6934 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6935#else
6936 /* CC_OP_ADCQ */ { 0, 0 },
6937#endif
6938
6939 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6940 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6941 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6942#ifdef TARGET_X86_64
6943 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6944#else
6945 /* CC_OP_SUBQ */ { 0, 0 },
6946#endif
6947
6948 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6949 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6950 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6951#ifdef TARGET_X86_64
6952 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6953#else
6954 /* CC_OP_SBBQ */ { 0, 0 },
6955#endif
6956
6957 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6958 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6959 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6960#ifdef TARGET_X86_64
6961 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6962#else
6963 /* CC_OP_LOGICQ */ { 0, 0 },
6964#endif
6965
6966 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6967 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6968 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6969#ifdef TARGET_X86_64
6970 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6971#else
6972 /* CC_OP_INCQ */ { 0, 0 },
6973#endif
6974
6975 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6976 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6977 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6978#ifdef TARGET_X86_64
6979 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6980#else
6981 /* CC_OP_DECQ */ { 0, 0 },
6982#endif
6983
6984 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6985 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6986 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6987#ifdef TARGET_X86_64
6988 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6989#else
6990 /* CC_OP_SHLQ */ { 0, 0 },
6991#endif
6992
6993 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6994 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6995 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6996#ifdef TARGET_X86_64
6997 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6998#else
6999 /* CC_OP_SARQ */ { 0, 0 },
7000#endif
7001};
7002#endif /* VBOX */
7003
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette