VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/op_helper.c@ 17044

Last change on this file since 17044 was 17044, checked in by vboxsync, 16 years ago

REM: clear VIF/VIP on entry to protected mode interrupts

  • Property svn:eol-style set to native
File size: 193.8 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176
177 /* According to AMD manual, should be read with IOPL == 3 */
178 eflags |= (3 << IOPL_SHIFT);
179
180 /* We only use helper_read_eflags_vme() in 16-bits mode */
181 return eflags & 0xffff;
182}
183
184void helper_dump_state()
185{
186 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
187 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
188 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
189 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
190 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
191 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
192 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
193}
194#endif
195
196/* return non zero if error */
197#ifndef VBOX
198static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#else /* VBOX */
200DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
201#endif /* VBOX */
202 int selector)
203{
204 SegmentCache *dt;
205 int index;
206 target_ulong ptr;
207
208#ifdef VBOX
209 /* Trying to load a selector with CPL=1? */
210 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
211 {
212 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
213 selector = selector & 0xfffc;
214 }
215#endif
216
217 if (selector & 0x4)
218 dt = &env->ldt;
219 else
220 dt = &env->gdt;
221 index = selector & ~7;
222 if ((index + 7) > dt->limit)
223 return -1;
224 ptr = dt->base + index;
225 *e1_ptr = ldl_kernel(ptr);
226 *e2_ptr = ldl_kernel(ptr + 4);
227 return 0;
228}
229
230#ifndef VBOX
231static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
232#else /* VBOX */
233DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
234#endif /* VBOX */
235{
236 unsigned int limit;
237 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
238 if (e2 & DESC_G_MASK)
239 limit = (limit << 12) | 0xfff;
240 return limit;
241}
242
243#ifndef VBOX
244static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
245#else /* VBOX */
246DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
247#endif /* VBOX */
248{
249 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
250}
251
252#ifndef VBOX
253static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#else /* VBOX */
255DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
256#endif /* VBOX */
257{
258 sc->base = get_seg_base(e1, e2);
259 sc->limit = get_seg_limit(e1, e2);
260 sc->flags = e2;
261}
262
263/* init the segment cache in vm86 mode. */
264#ifndef VBOX
265static inline void load_seg_vm(int seg, int selector)
266#else /* VBOX */
267DECLINLINE(void) load_seg_vm(int seg, int selector)
268#endif /* VBOX */
269{
270 selector &= 0xffff;
271#ifdef VBOX
272 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
273
274 if (seg == R_CS)
275 flags |= DESC_CS_MASK;
276
277 cpu_x86_load_seg_cache(env, seg, selector,
278 (selector << 4), 0xffff, flags);
279#else
280 cpu_x86_load_seg_cache(env, seg, selector,
281 (selector << 4), 0xffff, 0);
282#endif
283}
284
285#ifndef VBOX
286static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
287#else /* VBOX */
288DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
289#endif /* VBOX */
290 uint32_t *esp_ptr, int dpl)
291{
292#ifndef VBOX
293 int type, index, shift;
294#else
295 unsigned int type, index, shift;
296#endif
297
298#if 0
299 {
300 int i;
301 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
302 for(i=0;i<env->tr.limit;i++) {
303 printf("%02x ", env->tr.base[i]);
304 if ((i & 7) == 7) printf("\n");
305 }
306 printf("\n");
307 }
308#endif
309
310 if (!(env->tr.flags & DESC_P_MASK))
311 cpu_abort(env, "invalid tss");
312 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if ((type & 7) != 1)
314 cpu_abort(env, "invalid tss type");
315 shift = type >> 3;
316 index = (dpl * 4 + 2) << shift;
317 if (index + (4 << shift) - 1 > env->tr.limit)
318 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
319 if (shift == 0) {
320 *esp_ptr = lduw_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
322 } else {
323 *esp_ptr = ldl_kernel(env->tr.base + index);
324 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
325 }
326}
327
328/* XXX: merge with load_seg() */
329static void tss_load_seg(int seg_reg, int selector)
330{
331 uint32_t e1, e2;
332 int rpl, dpl, cpl;
333
334#ifdef VBOX
335 e1 = e2 = 0;
336 cpl = env->hflags & HF_CPL_MASK;
337 /* Trying to load a selector with CPL=1? */
338 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
339 {
340 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
341 selector = selector & 0xfffc;
342 }
343#endif
344
345 if ((selector & 0xfffc) != 0) {
346 if (load_segment(&e1, &e2, selector) != 0)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 if (!(e2 & DESC_S_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 rpl = selector & 3;
351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
352 cpl = env->hflags & HF_CPL_MASK;
353 if (seg_reg == R_CS) {
354 if (!(e2 & DESC_CS_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* XXX: is it correct ? */
357 if (dpl != rpl)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if ((e2 & DESC_C_MASK) && dpl > rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 } else if (seg_reg == R_SS) {
362 /* SS must be writable data */
363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 if (dpl != cpl || dpl != rpl)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 } else {
368 /* not readable code */
369 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* if data or non conforming code, checks the rights */
372 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 if (dpl < cpl || dpl < rpl)
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 }
376 }
377 if (!(e2 & DESC_P_MASK))
378 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
379 cpu_x86_load_seg_cache(env, seg_reg, selector,
380 get_seg_base(e1, e2),
381 get_seg_limit(e1, e2),
382 e2);
383 } else {
384 if (seg_reg == R_SS || seg_reg == R_CS)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386#ifdef VBOX
387#if 0
388 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
389 cpu_x86_load_seg_cache(env, seg_reg, selector,
390 0, 0, 0);
391#endif
392#endif
393 }
394}
395
396#define SWITCH_TSS_JMP 0
397#define SWITCH_TSS_IRET 1
398#define SWITCH_TSS_CALL 2
399
400/* XXX: restore CPU state in registers (PowerPC case) */
401static void switch_tss(int tss_selector,
402 uint32_t e1, uint32_t e2, int source,
403 uint32_t next_eip)
404{
405 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
406 target_ulong tss_base;
407 uint32_t new_regs[8], new_segs[6];
408 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
409 uint32_t old_eflags, eflags_mask;
410 SegmentCache *dt;
411#ifndef VBOX
412 int index;
413#else
414 unsigned int index;
415#endif
416 target_ulong ptr;
417
418 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
419#ifdef DEBUG_PCALL
420 if (loglevel & CPU_LOG_PCALL)
421 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
422#endif
423
424#if defined(VBOX) && defined(DEBUG)
425 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
426#endif
427
428 /* if task gate, we read the TSS segment and we load it */
429 if (type == 5) {
430 if (!(e2 & DESC_P_MASK))
431 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
432 tss_selector = e1 >> 16;
433 if (tss_selector & 4)
434 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
435 if (load_segment(&e1, &e2, tss_selector) != 0)
436 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
437 if (e2 & DESC_S_MASK)
438 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
439 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
440 if ((type & 7) != 1)
441 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
442 }
443
444 if (!(e2 & DESC_P_MASK))
445 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
446
447 if (type & 8)
448 tss_limit_max = 103;
449 else
450 tss_limit_max = 43;
451 tss_limit = get_seg_limit(e1, e2);
452 tss_base = get_seg_base(e1, e2);
453 if ((tss_selector & 4) != 0 ||
454 tss_limit < tss_limit_max)
455 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
456 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
457 if (old_type & 8)
458 old_tss_limit_max = 103;
459 else
460 old_tss_limit_max = 43;
461
462 /* read all the registers from the new TSS */
463 if (type & 8) {
464 /* 32 bit */
465 new_cr3 = ldl_kernel(tss_base + 0x1c);
466 new_eip = ldl_kernel(tss_base + 0x20);
467 new_eflags = ldl_kernel(tss_base + 0x24);
468 for(i = 0; i < 8; i++)
469 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
470 for(i = 0; i < 6; i++)
471 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
472 new_ldt = lduw_kernel(tss_base + 0x60);
473 new_trap = ldl_kernel(tss_base + 0x64);
474 } else {
475 /* 16 bit */
476 new_cr3 = 0;
477 new_eip = lduw_kernel(tss_base + 0x0e);
478 new_eflags = lduw_kernel(tss_base + 0x10);
479 for(i = 0; i < 8; i++)
480 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
481 for(i = 0; i < 4; i++)
482 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
483 new_ldt = lduw_kernel(tss_base + 0x2a);
484 new_segs[R_FS] = 0;
485 new_segs[R_GS] = 0;
486 new_trap = 0;
487 }
488
489 /* NOTE: we must avoid memory exceptions during the task switch,
490 so we make dummy accesses before */
491 /* XXX: it can still fail in some cases, so a bigger hack is
492 necessary to valid the TLB after having done the accesses */
493
494 v1 = ldub_kernel(env->tr.base);
495 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
496 stb_kernel(env->tr.base, v1);
497 stb_kernel(env->tr.base + old_tss_limit_max, v2);
498
499 /* clear busy bit (it is restartable) */
500 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
501 target_ulong ptr;
502 uint32_t e2;
503 ptr = env->gdt.base + (env->tr.selector & ~7);
504 e2 = ldl_kernel(ptr + 4);
505 e2 &= ~DESC_TSS_BUSY_MASK;
506 stl_kernel(ptr + 4, e2);
507 }
508 old_eflags = compute_eflags();
509 if (source == SWITCH_TSS_IRET)
510 old_eflags &= ~NT_MASK;
511
512 /* save the current state in the old TSS */
513 if (type & 8) {
514 /* 32 bit */
515 stl_kernel(env->tr.base + 0x20, next_eip);
516 stl_kernel(env->tr.base + 0x24, old_eflags);
517 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
518 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
519 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
520 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
521 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
522 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
523 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
524 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
525 for(i = 0; i < 6; i++)
526 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
527#if defined(VBOX) && defined(DEBUG)
528 printf("TSS 32 bits switch\n");
529 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
530#endif
531 } else {
532 /* 16 bit */
533 stw_kernel(env->tr.base + 0x0e, next_eip);
534 stw_kernel(env->tr.base + 0x10, old_eflags);
535 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
536 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
537 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
538 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
539 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
540 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
541 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
542 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
543 for(i = 0; i < 4; i++)
544 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
545 }
546
547 /* now if an exception occurs, it will occurs in the next task
548 context */
549
550 if (source == SWITCH_TSS_CALL) {
551 stw_kernel(tss_base, env->tr.selector);
552 new_eflags |= NT_MASK;
553 }
554
555 /* set busy bit */
556 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
557 target_ulong ptr;
558 uint32_t e2;
559 ptr = env->gdt.base + (tss_selector & ~7);
560 e2 = ldl_kernel(ptr + 4);
561 e2 |= DESC_TSS_BUSY_MASK;
562 stl_kernel(ptr + 4, e2);
563 }
564
565 /* set the new CPU state */
566 /* from this point, any exception which occurs can give problems */
567 env->cr[0] |= CR0_TS_MASK;
568 env->hflags |= HF_TS_MASK;
569 env->tr.selector = tss_selector;
570 env->tr.base = tss_base;
571 env->tr.limit = tss_limit;
572 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
573
574 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
575 cpu_x86_update_cr3(env, new_cr3);
576 }
577
578 /* load all registers without an exception, then reload them with
579 possible exception */
580 env->eip = new_eip;
581 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
582 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
583 if (!(type & 8))
584 eflags_mask &= 0xffff;
585 load_eflags(new_eflags, eflags_mask);
586 /* XXX: what to do in 16 bit case ? */
587 EAX = new_regs[0];
588 ECX = new_regs[1];
589 EDX = new_regs[2];
590 EBX = new_regs[3];
591 ESP = new_regs[4];
592 EBP = new_regs[5];
593 ESI = new_regs[6];
594 EDI = new_regs[7];
595 if (new_eflags & VM_MASK) {
596 for(i = 0; i < 6; i++)
597 load_seg_vm(i, new_segs[i]);
598 /* in vm86, CPL is always 3 */
599 cpu_x86_set_cpl(env, 3);
600 } else {
601 /* CPL is set the RPL of CS */
602 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
603 /* first just selectors as the rest may trigger exceptions */
604 for(i = 0; i < 6; i++)
605 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
606 }
607
608 env->ldt.selector = new_ldt & ~4;
609 env->ldt.base = 0;
610 env->ldt.limit = 0;
611 env->ldt.flags = 0;
612
613 /* load the LDT */
614 if (new_ldt & 4)
615 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
616
617 if ((new_ldt & 0xfffc) != 0) {
618 dt = &env->gdt;
619 index = new_ldt & ~7;
620 if ((index + 7) > dt->limit)
621 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
622 ptr = dt->base + index;
623 e1 = ldl_kernel(ptr);
624 e2 = ldl_kernel(ptr + 4);
625 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
626 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
627 if (!(e2 & DESC_P_MASK))
628 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
629 load_seg_cache_raw_dt(&env->ldt, e1, e2);
630 }
631
632 /* load the segments */
633 if (!(new_eflags & VM_MASK)) {
634 tss_load_seg(R_CS, new_segs[R_CS]);
635 tss_load_seg(R_SS, new_segs[R_SS]);
636 tss_load_seg(R_ES, new_segs[R_ES]);
637 tss_load_seg(R_DS, new_segs[R_DS]);
638 tss_load_seg(R_FS, new_segs[R_FS]);
639 tss_load_seg(R_GS, new_segs[R_GS]);
640 }
641
642 /* check that EIP is in the CS segment limits */
643 if (new_eip > env->segs[R_CS].limit) {
644 /* XXX: different exception if CALL ? */
645 raise_exception_err(EXCP0D_GPF, 0);
646 }
647}
648
649/* check if Port I/O is allowed in TSS */
650#ifndef VBOX
651static inline void check_io(int addr, int size)
652{
653 int io_offset, val, mask;
654
655#else /* VBOX */
656DECLINLINE(void) check_io(int addr, int size)
657{
658 int val, mask;
659 unsigned int io_offset;
660#endif /* VBOX */
661 /* TSS must be a valid 32 bit one */
662 if (!(env->tr.flags & DESC_P_MASK) ||
663 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
664 env->tr.limit < 103)
665 goto fail;
666 io_offset = lduw_kernel(env->tr.base + 0x66);
667 io_offset += (addr >> 3);
668 /* Note: the check needs two bytes */
669 if ((io_offset + 1) > env->tr.limit)
670 goto fail;
671 val = lduw_kernel(env->tr.base + io_offset);
672 val >>= (addr & 7);
673 mask = (1 << size) - 1;
674 /* all bits must be zero to allow the I/O */
675 if ((val & mask) != 0) {
676 fail:
677 raise_exception_err(EXCP0D_GPF, 0);
678 }
679}
680
681#ifdef VBOX
682/* Keep in sync with gen_check_external_event() */
683void helper_check_external_event()
684{
685 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
686 | CPU_INTERRUPT_EXTERNAL_TIMER
687 | CPU_INTERRUPT_EXTERNAL_DMA))
688 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
689 && (env->eflags & IF_MASK)
690 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
691 {
692 helper_external_event();
693 }
694
695}
696
697void helper_sync_seg(uint32_t reg)
698{
699 if (env->segs[reg].newselector)
700 sync_seg(env, reg, env->segs[reg].newselector);
701}
702#endif
703
704void helper_check_iob(uint32_t t0)
705{
706 check_io(t0, 1);
707}
708
709void helper_check_iow(uint32_t t0)
710{
711 check_io(t0, 2);
712}
713
714void helper_check_iol(uint32_t t0)
715{
716 check_io(t0, 4);
717}
718
719void helper_outb(uint32_t port, uint32_t data)
720{
721 cpu_outb(env, port, data & 0xff);
722}
723
724target_ulong helper_inb(uint32_t port)
725{
726 return cpu_inb(env, port);
727}
728
729void helper_outw(uint32_t port, uint32_t data)
730{
731 cpu_outw(env, port, data & 0xffff);
732}
733
734target_ulong helper_inw(uint32_t port)
735{
736 return cpu_inw(env, port);
737}
738
739void helper_outl(uint32_t port, uint32_t data)
740{
741 cpu_outl(env, port, data);
742}
743
744target_ulong helper_inl(uint32_t port)
745{
746 return cpu_inl(env, port);
747}
748
749#ifndef VBOX
750static inline unsigned int get_sp_mask(unsigned int e2)
751#else /* VBOX */
752DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
753#endif /* VBOX */
754{
755 if (e2 & DESC_B_MASK)
756 return 0xffffffff;
757 else
758 return 0xffff;
759}
760
761#ifdef TARGET_X86_64
762#define SET_ESP(val, sp_mask)\
763do {\
764 if ((sp_mask) == 0xffff)\
765 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
766 else if ((sp_mask) == 0xffffffffLL)\
767 ESP = (uint32_t)(val);\
768 else\
769 ESP = (val);\
770} while (0)
771#else
772#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
773#endif
774
775/* in 64-bit machines, this can overflow. So this segment addition macro
776 * can be used to trim the value to 32-bit whenever needed */
777#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
778
779/* XXX: add a is_user flag to have proper security support */
780#define PUSHW(ssp, sp, sp_mask, val)\
781{\
782 sp -= 2;\
783 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
784}
785
786#define PUSHL(ssp, sp, sp_mask, val)\
787{\
788 sp -= 4;\
789 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
790}
791
792#define POPW(ssp, sp, sp_mask, val)\
793{\
794 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
795 sp += 2;\
796}
797
798#define POPL(ssp, sp, sp_mask, val)\
799{\
800 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
801 sp += 4;\
802}
803
804/* protected mode interrupt */
805static void do_interrupt_protected(int intno, int is_int, int error_code,
806 unsigned int next_eip, int is_hw)
807{
808 SegmentCache *dt;
809 target_ulong ptr, ssp;
810 int type, dpl, selector, ss_dpl, cpl;
811 int has_error_code, new_stack, shift;
812 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
813 uint32_t old_eip, sp_mask;
814
815#ifdef VBOX
816 ss = ss_e1 = ss_e2 = 0;
817# ifdef VBOX_WITH_VMI
818 if ( intno == 6
819 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
820 {
821 env->exception_index = EXCP_PARAV_CALL;
822 cpu_loop_exit();
823 }
824# endif
825 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
826 cpu_loop_exit();
827#endif
828
829 has_error_code = 0;
830 if (!is_int && !is_hw) {
831 switch(intno) {
832 case 8:
833 case 10:
834 case 11:
835 case 12:
836 case 13:
837 case 14:
838 case 17:
839 has_error_code = 1;
840 break;
841 }
842 }
843 if (is_int)
844 old_eip = next_eip;
845 else
846 old_eip = env->eip;
847
848 dt = &env->idt;
849#ifndef VBOX
850 if (intno * 8 + 7 > dt->limit)
851#else
852 if ((unsigned)intno * 8 + 7 > dt->limit)
853#endif
854 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
855 ptr = dt->base + intno * 8;
856 e1 = ldl_kernel(ptr);
857 e2 = ldl_kernel(ptr + 4);
858 /* check gate type */
859 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
860 switch(type) {
861 case 5: /* task gate */
862 /* must do that check here to return the correct error code */
863 if (!(e2 & DESC_P_MASK))
864 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
865 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
866 if (has_error_code) {
867 int type;
868 uint32_t mask;
869 /* push the error code */
870 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
871 shift = type >> 3;
872 if (env->segs[R_SS].flags & DESC_B_MASK)
873 mask = 0xffffffff;
874 else
875 mask = 0xffff;
876 esp = (ESP - (2 << shift)) & mask;
877 ssp = env->segs[R_SS].base + esp;
878 if (shift)
879 stl_kernel(ssp, error_code);
880 else
881 stw_kernel(ssp, error_code);
882 SET_ESP(esp, mask);
883 }
884 return;
885 case 6: /* 286 interrupt gate */
886 case 7: /* 286 trap gate */
887 case 14: /* 386 interrupt gate */
888 case 15: /* 386 trap gate */
889 break;
890 default:
891 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
892 break;
893 }
894 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
895 cpl = env->hflags & HF_CPL_MASK;
896 /* check privilege if software int */
897 if (is_int && dpl < cpl)
898 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
899 /* check valid bit */
900 if (!(e2 & DESC_P_MASK))
901 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
902 selector = e1 >> 16;
903 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
904 if ((selector & 0xfffc) == 0)
905 raise_exception_err(EXCP0D_GPF, 0);
906
907 if (load_segment(&e1, &e2, selector) != 0)
908 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
909 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
910 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
911 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
912 if (dpl > cpl)
913 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
914 if (!(e2 & DESC_P_MASK))
915 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
916 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
917 /* to inner privilege */
918 get_ss_esp_from_tss(&ss, &esp, dpl);
919 if ((ss & 0xfffc) == 0)
920 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
921 if ((ss & 3) != dpl)
922 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
923 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
924 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
926 if (ss_dpl != dpl)
927 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928 if (!(ss_e2 & DESC_S_MASK) ||
929 (ss_e2 & DESC_CS_MASK) ||
930 !(ss_e2 & DESC_W_MASK))
931 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
932 if (!(ss_e2 & DESC_P_MASK))
933#ifdef VBOX /* See page 3-477 of 253666.pdf */
934 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
935#else
936 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
937#endif
938 new_stack = 1;
939 sp_mask = get_sp_mask(ss_e2);
940 ssp = get_seg_base(ss_e1, ss_e2);
941#if defined(VBOX) && defined(DEBUG)
942 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
943#endif
944 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
945 /* to same privilege */
946 if (env->eflags & VM_MASK)
947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 new_stack = 0;
949 sp_mask = get_sp_mask(env->segs[R_SS].flags);
950 ssp = env->segs[R_SS].base;
951 esp = ESP;
952 dpl = cpl;
953 } else {
954 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955 new_stack = 0; /* avoid warning */
956 sp_mask = 0; /* avoid warning */
957 ssp = 0; /* avoid warning */
958 esp = 0; /* avoid warning */
959 }
960
961 shift = type >> 3;
962
963#if 0
964 /* XXX: check that enough room is available */
965 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
966 if (env->eflags & VM_MASK)
967 push_size += 8;
968 push_size <<= shift;
969#endif
970 if (shift == 1) {
971 if (new_stack) {
972 if (env->eflags & VM_MASK) {
973 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
974 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
975 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
976 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
977 }
978 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
979 PUSHL(ssp, esp, sp_mask, ESP);
980 }
981 PUSHL(ssp, esp, sp_mask, compute_eflags());
982 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
983 PUSHL(ssp, esp, sp_mask, old_eip);
984 if (has_error_code) {
985 PUSHL(ssp, esp, sp_mask, error_code);
986 }
987 } else {
988 if (new_stack) {
989 if (env->eflags & VM_MASK) {
990 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
991 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
992 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
993 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
994 }
995 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
996 PUSHW(ssp, esp, sp_mask, ESP);
997 }
998 PUSHW(ssp, esp, sp_mask, compute_eflags());
999 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1000 PUSHW(ssp, esp, sp_mask, old_eip);
1001 if (has_error_code) {
1002 PUSHW(ssp, esp, sp_mask, error_code);
1003 }
1004 }
1005
1006 if (new_stack) {
1007 if (env->eflags & VM_MASK) {
1008 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1009 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1010 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1011 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1012 }
1013 ss = (ss & ~3) | dpl;
1014 cpu_x86_load_seg_cache(env, R_SS, ss,
1015 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1016 }
1017 SET_ESP(esp, sp_mask);
1018
1019 selector = (selector & ~3) | dpl;
1020 cpu_x86_load_seg_cache(env, R_CS, selector,
1021 get_seg_base(e1, e2),
1022 get_seg_limit(e1, e2),
1023 e2);
1024 cpu_x86_set_cpl(env, dpl);
1025 env->eip = offset;
1026
1027 /* interrupt gate clear IF mask */
1028 if ((type & 1) == 0) {
1029 env->eflags &= ~IF_MASK;
1030 }
1031#ifndef VBOX
1032 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1033#else
1034 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1035#endif
1036}
1037#ifdef VBOX
1038
1039/* check if VME interrupt redirection is enabled in TSS */
1040DECLINLINE(bool) is_vme_irq_redirected(int intno)
1041{
1042 unsigned int io_offset, intredir_offset;
1043 unsigned char val, mask;
1044
1045 /* TSS must be a valid 32 bit one */
1046 if (!(env->tr.flags & DESC_P_MASK) ||
1047 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1048 env->tr.limit < 103)
1049 goto fail;
1050 io_offset = lduw_kernel(env->tr.base + 0x66);
1051 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1052 if (io_offset < 0x68 + 0x20)
1053 io_offset = 0x68 + 0x20;
1054 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1055 intredir_offset = io_offset - 0x20;
1056
1057 intredir_offset += (intno >> 3);
1058 if ((intredir_offset) > env->tr.limit)
1059 goto fail;
1060
1061 val = ldub_kernel(env->tr.base + intredir_offset);
1062 mask = 1 << (unsigned char)(intno & 7);
1063
1064 /* bit set means no redirection. */
1065 if ((val & mask) != 0) {
1066 return false;
1067 }
1068 return true;
1069
1070fail:
1071 raise_exception_err(EXCP0D_GPF, 0);
1072 return true;
1073}
1074
1075/* V86 mode software interrupt with CR4.VME=1 */
1076static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1077{
1078 target_ulong ptr, ssp;
1079 int selector;
1080 uint32_t offset, esp;
1081 uint32_t old_cs, old_eflags;
1082 uint32_t iopl;
1083
1084 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1085
1086 if (!is_vme_irq_redirected(intno))
1087 {
1088 if (iopl == 3)
1089 {
1090 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1091 return;
1092 }
1093 else
1094 raise_exception_err(EXCP0D_GPF, 0);
1095 }
1096
1097 /* virtual mode idt is at linear address 0 */
1098 ptr = 0 + intno * 4;
1099 offset = lduw_kernel(ptr);
1100 selector = lduw_kernel(ptr + 2);
1101 esp = ESP;
1102 ssp = env->segs[R_SS].base;
1103 old_cs = env->segs[R_CS].selector;
1104
1105 old_eflags = compute_eflags();
1106 if (iopl < 3)
1107 {
1108 /* copy VIF into IF and set IOPL to 3 */
1109 if (env->eflags & VIF_MASK)
1110 old_eflags |= IF_MASK;
1111 else
1112 old_eflags &= ~IF_MASK;
1113
1114 old_eflags |= (3 << IOPL_SHIFT);
1115 }
1116
1117 /* XXX: use SS segment size ? */
1118 PUSHW(ssp, esp, 0xffff, old_eflags);
1119 PUSHW(ssp, esp, 0xffff, old_cs);
1120 PUSHW(ssp, esp, 0xffff, next_eip);
1121
1122 /* update processor state */
1123 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1124 env->eip = offset;
1125 env->segs[R_CS].selector = selector;
1126 env->segs[R_CS].base = (selector << 4);
1127 env->eflags &= ~(TF_MASK | RF_MASK);
1128
1129 if (iopl < 3)
1130 env->eflags &= ~VIF_MASK;
1131 else
1132 env->eflags &= ~IF_MASK;
1133}
1134#endif /* VBOX */
1135
1136#ifdef TARGET_X86_64
1137
1138#define PUSHQ(sp, val)\
1139{\
1140 sp -= 8;\
1141 stq_kernel(sp, (val));\
1142}
1143
1144#define POPQ(sp, val)\
1145{\
1146 val = ldq_kernel(sp);\
1147 sp += 8;\
1148}
1149
1150#ifndef VBOX
1151static inline target_ulong get_rsp_from_tss(int level)
1152#else /* VBOX */
1153DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1154#endif /* VBOX */
1155{
1156 int index;
1157
1158#if 0
1159 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1160 env->tr.base, env->tr.limit);
1161#endif
1162
1163 if (!(env->tr.flags & DESC_P_MASK))
1164 cpu_abort(env, "invalid tss");
1165 index = 8 * level + 4;
1166 if ((index + 7) > env->tr.limit)
1167 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1168 return ldq_kernel(env->tr.base + index);
1169}
1170
1171/* 64 bit interrupt */
1172static void do_interrupt64(int intno, int is_int, int error_code,
1173 target_ulong next_eip, int is_hw)
1174{
1175 SegmentCache *dt;
1176 target_ulong ptr;
1177 int type, dpl, selector, cpl, ist;
1178 int has_error_code, new_stack;
1179 uint32_t e1, e2, e3, ss;
1180 target_ulong old_eip, esp, offset;
1181
1182#ifdef VBOX
1183 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1184 cpu_loop_exit();
1185#endif
1186
1187 has_error_code = 0;
1188 if (!is_int && !is_hw) {
1189 switch(intno) {
1190 case 8:
1191 case 10:
1192 case 11:
1193 case 12:
1194 case 13:
1195 case 14:
1196 case 17:
1197 has_error_code = 1;
1198 break;
1199 }
1200 }
1201 if (is_int)
1202 old_eip = next_eip;
1203 else
1204 old_eip = env->eip;
1205
1206 dt = &env->idt;
1207 if (intno * 16 + 15 > dt->limit)
1208 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1209 ptr = dt->base + intno * 16;
1210 e1 = ldl_kernel(ptr);
1211 e2 = ldl_kernel(ptr + 4);
1212 e3 = ldl_kernel(ptr + 8);
1213 /* check gate type */
1214 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1215 switch(type) {
1216 case 14: /* 386 interrupt gate */
1217 case 15: /* 386 trap gate */
1218 break;
1219 default:
1220 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1221 break;
1222 }
1223 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1224 cpl = env->hflags & HF_CPL_MASK;
1225 /* check privilege if software int */
1226 if (is_int && dpl < cpl)
1227 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1228 /* check valid bit */
1229 if (!(e2 & DESC_P_MASK))
1230 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1231 selector = e1 >> 16;
1232 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1233 ist = e2 & 7;
1234 if ((selector & 0xfffc) == 0)
1235 raise_exception_err(EXCP0D_GPF, 0);
1236
1237 if (load_segment(&e1, &e2, selector) != 0)
1238 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1239 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1240 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1241 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1242 if (dpl > cpl)
1243 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1244 if (!(e2 & DESC_P_MASK))
1245 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1246 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1247 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1248 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1249 /* to inner privilege */
1250 if (ist != 0)
1251 esp = get_rsp_from_tss(ist + 3);
1252 else
1253 esp = get_rsp_from_tss(dpl);
1254 esp &= ~0xfLL; /* align stack */
1255 ss = 0;
1256 new_stack = 1;
1257 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1258 /* to same privilege */
1259 if (env->eflags & VM_MASK)
1260 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1261 new_stack = 0;
1262 if (ist != 0)
1263 esp = get_rsp_from_tss(ist + 3);
1264 else
1265 esp = ESP;
1266 esp &= ~0xfLL; /* align stack */
1267 dpl = cpl;
1268 } else {
1269 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1270 new_stack = 0; /* avoid warning */
1271 esp = 0; /* avoid warning */
1272 }
1273
1274 PUSHQ(esp, env->segs[R_SS].selector);
1275 PUSHQ(esp, ESP);
1276 PUSHQ(esp, compute_eflags());
1277 PUSHQ(esp, env->segs[R_CS].selector);
1278 PUSHQ(esp, old_eip);
1279 if (has_error_code) {
1280 PUSHQ(esp, error_code);
1281 }
1282
1283 if (new_stack) {
1284 ss = 0 | dpl;
1285 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1286 }
1287 ESP = esp;
1288
1289 selector = (selector & ~3) | dpl;
1290 cpu_x86_load_seg_cache(env, R_CS, selector,
1291 get_seg_base(e1, e2),
1292 get_seg_limit(e1, e2),
1293 e2);
1294 cpu_x86_set_cpl(env, dpl);
1295 env->eip = offset;
1296
1297 /* interrupt gate clear IF mask */
1298 if ((type & 1) == 0) {
1299 env->eflags &= ~IF_MASK;
1300 }
1301
1302#ifndef VBOX
1303 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1304#else
1305 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1306#endif
1307}
1308#endif
1309
1310#if defined(CONFIG_USER_ONLY)
1311void helper_syscall(int next_eip_addend)
1312{
1313 env->exception_index = EXCP_SYSCALL;
1314 env->exception_next_eip = env->eip + next_eip_addend;
1315 cpu_loop_exit();
1316}
1317#else
1318void helper_syscall(int next_eip_addend)
1319{
1320 int selector;
1321
1322 if (!(env->efer & MSR_EFER_SCE)) {
1323 raise_exception_err(EXCP06_ILLOP, 0);
1324 }
1325 selector = (env->star >> 32) & 0xffff;
1326#ifdef TARGET_X86_64
1327 if (env->hflags & HF_LMA_MASK) {
1328 int code64;
1329
1330 ECX = env->eip + next_eip_addend;
1331 env->regs[11] = compute_eflags();
1332
1333 code64 = env->hflags & HF_CS64_MASK;
1334
1335 cpu_x86_set_cpl(env, 0);
1336 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1337 0, 0xffffffff,
1338 DESC_G_MASK | DESC_P_MASK |
1339 DESC_S_MASK |
1340 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1341 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1342 0, 0xffffffff,
1343 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1344 DESC_S_MASK |
1345 DESC_W_MASK | DESC_A_MASK);
1346 env->eflags &= ~env->fmask;
1347 load_eflags(env->eflags, 0);
1348 if (code64)
1349 env->eip = env->lstar;
1350 else
1351 env->eip = env->cstar;
1352 } else
1353#endif
1354 {
1355 ECX = (uint32_t)(env->eip + next_eip_addend);
1356
1357 cpu_x86_set_cpl(env, 0);
1358 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1359 0, 0xffffffff,
1360 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1361 DESC_S_MASK |
1362 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1363 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1364 0, 0xffffffff,
1365 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1366 DESC_S_MASK |
1367 DESC_W_MASK | DESC_A_MASK);
1368 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1369 env->eip = (uint32_t)env->star;
1370 }
1371}
1372#endif
1373
1374void helper_sysret(int dflag)
1375{
1376 int cpl, selector;
1377
1378 if (!(env->efer & MSR_EFER_SCE)) {
1379 raise_exception_err(EXCP06_ILLOP, 0);
1380 }
1381 cpl = env->hflags & HF_CPL_MASK;
1382 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1383 raise_exception_err(EXCP0D_GPF, 0);
1384 }
1385 selector = (env->star >> 48) & 0xffff;
1386#ifdef TARGET_X86_64
1387 if (env->hflags & HF_LMA_MASK) {
1388 if (dflag == 2) {
1389 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1390 0, 0xffffffff,
1391 DESC_G_MASK | DESC_P_MASK |
1392 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1393 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1394 DESC_L_MASK);
1395 env->eip = ECX;
1396 } else {
1397 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1398 0, 0xffffffff,
1399 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1400 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1401 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1402 env->eip = (uint32_t)ECX;
1403 }
1404 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1405 0, 0xffffffff,
1406 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1407 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1408 DESC_W_MASK | DESC_A_MASK);
1409 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1410 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1411 cpu_x86_set_cpl(env, 3);
1412 } else
1413#endif
1414 {
1415 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1416 0, 0xffffffff,
1417 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1418 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1419 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1420 env->eip = (uint32_t)ECX;
1421 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1422 0, 0xffffffff,
1423 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1424 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1425 DESC_W_MASK | DESC_A_MASK);
1426 env->eflags |= IF_MASK;
1427 cpu_x86_set_cpl(env, 3);
1428 }
1429#ifdef USE_KQEMU
1430 if (kqemu_is_ok(env)) {
1431 if (env->hflags & HF_LMA_MASK)
1432 CC_OP = CC_OP_EFLAGS;
1433 env->exception_index = -1;
1434 cpu_loop_exit();
1435 }
1436#endif
1437}
1438
1439#ifdef VBOX
1440/**
1441 * Checks and processes external VMM events.
1442 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1443 */
1444void helper_external_event(void)
1445{
1446#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1447 uintptr_t uSP;
1448# ifdef RT_ARCH_AMD64
1449 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1450# else
1451 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1452# endif
1453 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1454#endif
1455 /* Keep in sync with flags checked by gen_check_external_event() */
1456 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1457 {
1458 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1459 ~CPU_INTERRUPT_EXTERNAL_HARD);
1460 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1461 }
1462 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1463 {
1464 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1465 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1466 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1467 }
1468 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1469 {
1470 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1471 ~CPU_INTERRUPT_EXTERNAL_DMA);
1472 remR3DmaRun(env);
1473 }
1474 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1475 {
1476 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1477 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1478 remR3TimersRun(env);
1479 }
1480}
1481/* helper for recording call instruction addresses for later scanning */
1482void helper_record_call()
1483{
1484 if ( !(env->state & CPU_RAW_RING0)
1485 && (env->cr[0] & CR0_PG_MASK)
1486 && !(env->eflags & X86_EFL_IF))
1487 remR3RecordCall(env);
1488}
1489#endif /* VBOX */
1490
1491/* real mode interrupt */
1492static void do_interrupt_real(int intno, int is_int, int error_code,
1493 unsigned int next_eip)
1494{
1495 SegmentCache *dt;
1496 target_ulong ptr, ssp;
1497 int selector;
1498 uint32_t offset, esp;
1499 uint32_t old_cs, old_eip;
1500
1501 /* real mode (simpler !) */
1502 dt = &env->idt;
1503#ifndef VBOX
1504 if (intno * 4 + 3 > dt->limit)
1505#else
1506 if ((unsigned)intno * 4 + 3 > dt->limit)
1507#endif
1508 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1509 ptr = dt->base + intno * 4;
1510 offset = lduw_kernel(ptr);
1511 selector = lduw_kernel(ptr + 2);
1512 esp = ESP;
1513 ssp = env->segs[R_SS].base;
1514 if (is_int)
1515 old_eip = next_eip;
1516 else
1517 old_eip = env->eip;
1518 old_cs = env->segs[R_CS].selector;
1519 /* XXX: use SS segment size ? */
1520 PUSHW(ssp, esp, 0xffff, compute_eflags());
1521 PUSHW(ssp, esp, 0xffff, old_cs);
1522 PUSHW(ssp, esp, 0xffff, old_eip);
1523
1524 /* update processor state */
1525 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1526 env->eip = offset;
1527 env->segs[R_CS].selector = selector;
1528 env->segs[R_CS].base = (selector << 4);
1529 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1530}
1531
1532/* fake user mode interrupt */
1533void do_interrupt_user(int intno, int is_int, int error_code,
1534 target_ulong next_eip)
1535{
1536 SegmentCache *dt;
1537 target_ulong ptr;
1538 int dpl, cpl, shift;
1539 uint32_t e2;
1540
1541 dt = &env->idt;
1542 if (env->hflags & HF_LMA_MASK) {
1543 shift = 4;
1544 } else {
1545 shift = 3;
1546 }
1547 ptr = dt->base + (intno << shift);
1548 e2 = ldl_kernel(ptr + 4);
1549
1550 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1551 cpl = env->hflags & HF_CPL_MASK;
1552 /* check privilege if software int */
1553 if (is_int && dpl < cpl)
1554 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1555
1556 /* Since we emulate only user space, we cannot do more than
1557 exiting the emulation with the suitable exception and error
1558 code */
1559 if (is_int)
1560 EIP = next_eip;
1561}
1562
1563/*
1564 * Begin execution of an interruption. is_int is TRUE if coming from
1565 * the int instruction. next_eip is the EIP value AFTER the interrupt
1566 * instruction. It is only relevant if is_int is TRUE.
1567 */
1568void do_interrupt(int intno, int is_int, int error_code,
1569 target_ulong next_eip, int is_hw)
1570{
1571 if (loglevel & CPU_LOG_INT) {
1572 if ((env->cr[0] & CR0_PE_MASK)) {
1573 static int count;
1574 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1575 count, intno, error_code, is_int,
1576 env->hflags & HF_CPL_MASK,
1577 env->segs[R_CS].selector, EIP,
1578 (int)env->segs[R_CS].base + EIP,
1579 env->segs[R_SS].selector, ESP);
1580 if (intno == 0x0e) {
1581 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1582 } else {
1583 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1584 }
1585 fprintf(logfile, "\n");
1586 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1587#if 0
1588 {
1589 int i;
1590 uint8_t *ptr;
1591 fprintf(logfile, " code=");
1592 ptr = env->segs[R_CS].base + env->eip;
1593 for(i = 0; i < 16; i++) {
1594 fprintf(logfile, " %02x", ldub(ptr + i));
1595 }
1596 fprintf(logfile, "\n");
1597 }
1598#endif
1599 count++;
1600 }
1601 }
1602 if (env->cr[0] & CR0_PE_MASK) {
1603#ifdef TARGET_X86_64
1604 if (env->hflags & HF_LMA_MASK) {
1605 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1606 } else
1607#endif
1608 {
1609#ifdef VBOX
1610 /* int xx *, v86 code and VME enabled? */
1611 if ( (env->eflags & VM_MASK)
1612 && (env->cr[4] & CR4_VME_MASK)
1613 && is_int
1614 && !is_hw
1615 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1616 )
1617 do_soft_interrupt_vme(intno, error_code, next_eip);
1618 else
1619#endif /* VBOX */
1620 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1621 }
1622 } else {
1623 do_interrupt_real(intno, is_int, error_code, next_eip);
1624 }
1625}
1626
1627/*
1628 * Check nested exceptions and change to double or triple fault if
1629 * needed. It should only be called, if this is not an interrupt.
1630 * Returns the new exception number.
1631 */
1632static int check_exception(int intno, int *error_code)
1633{
1634 int first_contributory = env->old_exception == 0 ||
1635 (env->old_exception >= 10 &&
1636 env->old_exception <= 13);
1637 int second_contributory = intno == 0 ||
1638 (intno >= 10 && intno <= 13);
1639
1640 if (loglevel & CPU_LOG_INT)
1641 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1642 env->old_exception, intno);
1643
1644 if (env->old_exception == EXCP08_DBLE)
1645 cpu_abort(env, "triple fault");
1646
1647 if ((first_contributory && second_contributory)
1648 || (env->old_exception == EXCP0E_PAGE &&
1649 (second_contributory || (intno == EXCP0E_PAGE)))) {
1650 intno = EXCP08_DBLE;
1651 *error_code = 0;
1652 }
1653
1654 if (second_contributory || (intno == EXCP0E_PAGE) ||
1655 (intno == EXCP08_DBLE))
1656 env->old_exception = intno;
1657
1658 return intno;
1659}
1660
1661/*
1662 * Signal an interruption. It is executed in the main CPU loop.
1663 * is_int is TRUE if coming from the int instruction. next_eip is the
1664 * EIP value AFTER the interrupt instruction. It is only relevant if
1665 * is_int is TRUE.
1666 */
1667void raise_interrupt(int intno, int is_int, int error_code,
1668 int next_eip_addend)
1669{
1670#if defined(VBOX) && defined(DEBUG)
1671 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1672#endif
1673 if (!is_int) {
1674 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1675 intno = check_exception(intno, &error_code);
1676 } else {
1677 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1678 }
1679
1680 env->exception_index = intno;
1681 env->error_code = error_code;
1682 env->exception_is_int = is_int;
1683 env->exception_next_eip = env->eip + next_eip_addend;
1684 cpu_loop_exit();
1685}
1686
1687/* shortcuts to generate exceptions */
1688
1689void (raise_exception_err)(int exception_index, int error_code)
1690{
1691 raise_interrupt(exception_index, 0, error_code, 0);
1692}
1693
1694void raise_exception(int exception_index)
1695{
1696 raise_interrupt(exception_index, 0, 0, 0);
1697}
1698
1699/* SMM support */
1700
1701#if defined(CONFIG_USER_ONLY)
1702
1703void do_smm_enter(void)
1704{
1705}
1706
1707void helper_rsm(void)
1708{
1709}
1710
1711#else
1712
1713#ifdef TARGET_X86_64
1714#define SMM_REVISION_ID 0x00020064
1715#else
1716#define SMM_REVISION_ID 0x00020000
1717#endif
1718
1719void do_smm_enter(void)
1720{
1721 target_ulong sm_state;
1722 SegmentCache *dt;
1723 int i, offset;
1724
1725 if (loglevel & CPU_LOG_INT) {
1726 fprintf(logfile, "SMM: enter\n");
1727 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1728 }
1729
1730 env->hflags |= HF_SMM_MASK;
1731 cpu_smm_update(env);
1732
1733 sm_state = env->smbase + 0x8000;
1734
1735#ifdef TARGET_X86_64
1736 for(i = 0; i < 6; i++) {
1737 dt = &env->segs[i];
1738 offset = 0x7e00 + i * 16;
1739 stw_phys(sm_state + offset, dt->selector);
1740 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1741 stl_phys(sm_state + offset + 4, dt->limit);
1742 stq_phys(sm_state + offset + 8, dt->base);
1743 }
1744
1745 stq_phys(sm_state + 0x7e68, env->gdt.base);
1746 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1747
1748 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1749 stq_phys(sm_state + 0x7e78, env->ldt.base);
1750 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1751 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1752
1753 stq_phys(sm_state + 0x7e88, env->idt.base);
1754 stl_phys(sm_state + 0x7e84, env->idt.limit);
1755
1756 stw_phys(sm_state + 0x7e90, env->tr.selector);
1757 stq_phys(sm_state + 0x7e98, env->tr.base);
1758 stl_phys(sm_state + 0x7e94, env->tr.limit);
1759 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1760
1761 stq_phys(sm_state + 0x7ed0, env->efer);
1762
1763 stq_phys(sm_state + 0x7ff8, EAX);
1764 stq_phys(sm_state + 0x7ff0, ECX);
1765 stq_phys(sm_state + 0x7fe8, EDX);
1766 stq_phys(sm_state + 0x7fe0, EBX);
1767 stq_phys(sm_state + 0x7fd8, ESP);
1768 stq_phys(sm_state + 0x7fd0, EBP);
1769 stq_phys(sm_state + 0x7fc8, ESI);
1770 stq_phys(sm_state + 0x7fc0, EDI);
1771 for(i = 8; i < 16; i++)
1772 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1773 stq_phys(sm_state + 0x7f78, env->eip);
1774 stl_phys(sm_state + 0x7f70, compute_eflags());
1775 stl_phys(sm_state + 0x7f68, env->dr[6]);
1776 stl_phys(sm_state + 0x7f60, env->dr[7]);
1777
1778 stl_phys(sm_state + 0x7f48, env->cr[4]);
1779 stl_phys(sm_state + 0x7f50, env->cr[3]);
1780 stl_phys(sm_state + 0x7f58, env->cr[0]);
1781
1782 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1783 stl_phys(sm_state + 0x7f00, env->smbase);
1784#else
1785 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1786 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1787 stl_phys(sm_state + 0x7ff4, compute_eflags());
1788 stl_phys(sm_state + 0x7ff0, env->eip);
1789 stl_phys(sm_state + 0x7fec, EDI);
1790 stl_phys(sm_state + 0x7fe8, ESI);
1791 stl_phys(sm_state + 0x7fe4, EBP);
1792 stl_phys(sm_state + 0x7fe0, ESP);
1793 stl_phys(sm_state + 0x7fdc, EBX);
1794 stl_phys(sm_state + 0x7fd8, EDX);
1795 stl_phys(sm_state + 0x7fd4, ECX);
1796 stl_phys(sm_state + 0x7fd0, EAX);
1797 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1798 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1799
1800 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1801 stl_phys(sm_state + 0x7f64, env->tr.base);
1802 stl_phys(sm_state + 0x7f60, env->tr.limit);
1803 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1804
1805 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1806 stl_phys(sm_state + 0x7f80, env->ldt.base);
1807 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1808 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1809
1810 stl_phys(sm_state + 0x7f74, env->gdt.base);
1811 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1812
1813 stl_phys(sm_state + 0x7f58, env->idt.base);
1814 stl_phys(sm_state + 0x7f54, env->idt.limit);
1815
1816 for(i = 0; i < 6; i++) {
1817 dt = &env->segs[i];
1818 if (i < 3)
1819 offset = 0x7f84 + i * 12;
1820 else
1821 offset = 0x7f2c + (i - 3) * 12;
1822 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1823 stl_phys(sm_state + offset + 8, dt->base);
1824 stl_phys(sm_state + offset + 4, dt->limit);
1825 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1826 }
1827 stl_phys(sm_state + 0x7f14, env->cr[4]);
1828
1829 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1830 stl_phys(sm_state + 0x7ef8, env->smbase);
1831#endif
1832 /* init SMM cpu state */
1833
1834#ifdef TARGET_X86_64
1835 cpu_load_efer(env, 0);
1836#endif
1837 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1838 env->eip = 0x00008000;
1839 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1840 0xffffffff, 0);
1841 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1842 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1843 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1844 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1845 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1846
1847 cpu_x86_update_cr0(env,
1848 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1849 cpu_x86_update_cr4(env, 0);
1850 env->dr[7] = 0x00000400;
1851 CC_OP = CC_OP_EFLAGS;
1852}
1853
1854void helper_rsm(void)
1855{
1856#ifdef VBOX
1857 cpu_abort(env, "helper_rsm");
1858#else /* !VBOX */
1859 target_ulong sm_
1860
1861 target_ulong sm_state;
1862 int i, offset;
1863 uint32_t val;
1864
1865 sm_state = env->smbase + 0x8000;
1866#ifdef TARGET_X86_64
1867 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1868
1869 for(i = 0; i < 6; i++) {
1870 offset = 0x7e00 + i * 16;
1871 cpu_x86_load_seg_cache(env, i,
1872 lduw_phys(sm_state + offset),
1873 ldq_phys(sm_state + offset + 8),
1874 ldl_phys(sm_state + offset + 4),
1875 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1876 }
1877
1878 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1879 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1880
1881 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1882 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1883 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1884 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1885
1886 env->idt.base = ldq_phys(sm_state + 0x7e88);
1887 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1888
1889 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1890 env->tr.base = ldq_phys(sm_state + 0x7e98);
1891 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1892 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1893
1894 EAX = ldq_phys(sm_state + 0x7ff8);
1895 ECX = ldq_phys(sm_state + 0x7ff0);
1896 EDX = ldq_phys(sm_state + 0x7fe8);
1897 EBX = ldq_phys(sm_state + 0x7fe0);
1898 ESP = ldq_phys(sm_state + 0x7fd8);
1899 EBP = ldq_phys(sm_state + 0x7fd0);
1900 ESI = ldq_phys(sm_state + 0x7fc8);
1901 EDI = ldq_phys(sm_state + 0x7fc0);
1902 for(i = 8; i < 16; i++)
1903 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1904 env->eip = ldq_phys(sm_state + 0x7f78);
1905 load_eflags(ldl_phys(sm_state + 0x7f70),
1906 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1907 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1908 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1909
1910 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1911 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1912 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1913
1914 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1915 if (val & 0x20000) {
1916 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1917 }
1918#else
1919 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1920 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1921 load_eflags(ldl_phys(sm_state + 0x7ff4),
1922 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1923 env->eip = ldl_phys(sm_state + 0x7ff0);
1924 EDI = ldl_phys(sm_state + 0x7fec);
1925 ESI = ldl_phys(sm_state + 0x7fe8);
1926 EBP = ldl_phys(sm_state + 0x7fe4);
1927 ESP = ldl_phys(sm_state + 0x7fe0);
1928 EBX = ldl_phys(sm_state + 0x7fdc);
1929 EDX = ldl_phys(sm_state + 0x7fd8);
1930 ECX = ldl_phys(sm_state + 0x7fd4);
1931 EAX = ldl_phys(sm_state + 0x7fd0);
1932 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1933 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1934
1935 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1936 env->tr.base = ldl_phys(sm_state + 0x7f64);
1937 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1938 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1939
1940 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1941 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1942 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1943 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1944
1945 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1946 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1947
1948 env->idt.base = ldl_phys(sm_state + 0x7f58);
1949 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1950
1951 for(i = 0; i < 6; i++) {
1952 if (i < 3)
1953 offset = 0x7f84 + i * 12;
1954 else
1955 offset = 0x7f2c + (i - 3) * 12;
1956 cpu_x86_load_seg_cache(env, i,
1957 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1958 ldl_phys(sm_state + offset + 8),
1959 ldl_phys(sm_state + offset + 4),
1960 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1961 }
1962 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1963
1964 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1965 if (val & 0x20000) {
1966 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1967 }
1968#endif
1969 CC_OP = CC_OP_EFLAGS;
1970 env->hflags &= ~HF_SMM_MASK;
1971 cpu_smm_update(env);
1972
1973 if (loglevel & CPU_LOG_INT) {
1974 fprintf(logfile, "SMM: after RSM\n");
1975 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1976 }
1977#endif /* !VBOX */
1978}
1979
1980#endif /* !CONFIG_USER_ONLY */
1981
1982
1983/* division, flags are undefined */
1984
1985void helper_divb_AL(target_ulong t0)
1986{
1987 unsigned int num, den, q, r;
1988
1989 num = (EAX & 0xffff);
1990 den = (t0 & 0xff);
1991 if (den == 0) {
1992 raise_exception(EXCP00_DIVZ);
1993 }
1994 q = (num / den);
1995 if (q > 0xff)
1996 raise_exception(EXCP00_DIVZ);
1997 q &= 0xff;
1998 r = (num % den) & 0xff;
1999 EAX = (EAX & ~0xffff) | (r << 8) | q;
2000}
2001
2002void helper_idivb_AL(target_ulong t0)
2003{
2004 int num, den, q, r;
2005
2006 num = (int16_t)EAX;
2007 den = (int8_t)t0;
2008 if (den == 0) {
2009 raise_exception(EXCP00_DIVZ);
2010 }
2011 q = (num / den);
2012 if (q != (int8_t)q)
2013 raise_exception(EXCP00_DIVZ);
2014 q &= 0xff;
2015 r = (num % den) & 0xff;
2016 EAX = (EAX & ~0xffff) | (r << 8) | q;
2017}
2018
2019void helper_divw_AX(target_ulong t0)
2020{
2021 unsigned int num, den, q, r;
2022
2023 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2024 den = (t0 & 0xffff);
2025 if (den == 0) {
2026 raise_exception(EXCP00_DIVZ);
2027 }
2028 q = (num / den);
2029 if (q > 0xffff)
2030 raise_exception(EXCP00_DIVZ);
2031 q &= 0xffff;
2032 r = (num % den) & 0xffff;
2033 EAX = (EAX & ~0xffff) | q;
2034 EDX = (EDX & ~0xffff) | r;
2035}
2036
2037void helper_idivw_AX(target_ulong t0)
2038{
2039 int num, den, q, r;
2040
2041 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2042 den = (int16_t)t0;
2043 if (den == 0) {
2044 raise_exception(EXCP00_DIVZ);
2045 }
2046 q = (num / den);
2047 if (q != (int16_t)q)
2048 raise_exception(EXCP00_DIVZ);
2049 q &= 0xffff;
2050 r = (num % den) & 0xffff;
2051 EAX = (EAX & ~0xffff) | q;
2052 EDX = (EDX & ~0xffff) | r;
2053}
2054
2055void helper_divl_EAX(target_ulong t0)
2056{
2057 unsigned int den, r;
2058 uint64_t num, q;
2059
2060 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2061 den = t0;
2062 if (den == 0) {
2063 raise_exception(EXCP00_DIVZ);
2064 }
2065 q = (num / den);
2066 r = (num % den);
2067 if (q > 0xffffffff)
2068 raise_exception(EXCP00_DIVZ);
2069 EAX = (uint32_t)q;
2070 EDX = (uint32_t)r;
2071}
2072
2073void helper_idivl_EAX(target_ulong t0)
2074{
2075 int den, r;
2076 int64_t num, q;
2077
2078 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2079 den = t0;
2080 if (den == 0) {
2081 raise_exception(EXCP00_DIVZ);
2082 }
2083 q = (num / den);
2084 r = (num % den);
2085 if (q != (int32_t)q)
2086 raise_exception(EXCP00_DIVZ);
2087 EAX = (uint32_t)q;
2088 EDX = (uint32_t)r;
2089}
2090
2091/* bcd */
2092
2093/* XXX: exception */
2094void helper_aam(int base)
2095{
2096 int al, ah;
2097 al = EAX & 0xff;
2098 ah = al / base;
2099 al = al % base;
2100 EAX = (EAX & ~0xffff) | al | (ah << 8);
2101 CC_DST = al;
2102}
2103
2104void helper_aad(int base)
2105{
2106 int al, ah;
2107 al = EAX & 0xff;
2108 ah = (EAX >> 8) & 0xff;
2109 al = ((ah * base) + al) & 0xff;
2110 EAX = (EAX & ~0xffff) | al;
2111 CC_DST = al;
2112}
2113
2114void helper_aaa(void)
2115{
2116 int icarry;
2117 int al, ah, af;
2118 int eflags;
2119
2120 eflags = cc_table[CC_OP].compute_all();
2121 af = eflags & CC_A;
2122 al = EAX & 0xff;
2123 ah = (EAX >> 8) & 0xff;
2124
2125 icarry = (al > 0xf9);
2126 if (((al & 0x0f) > 9 ) || af) {
2127 al = (al + 6) & 0x0f;
2128 ah = (ah + 1 + icarry) & 0xff;
2129 eflags |= CC_C | CC_A;
2130 } else {
2131 eflags &= ~(CC_C | CC_A);
2132 al &= 0x0f;
2133 }
2134 EAX = (EAX & ~0xffff) | al | (ah << 8);
2135 CC_SRC = eflags;
2136 FORCE_RET();
2137}
2138
2139void helper_aas(void)
2140{
2141 int icarry;
2142 int al, ah, af;
2143 int eflags;
2144
2145 eflags = cc_table[CC_OP].compute_all();
2146 af = eflags & CC_A;
2147 al = EAX & 0xff;
2148 ah = (EAX >> 8) & 0xff;
2149
2150 icarry = (al < 6);
2151 if (((al & 0x0f) > 9 ) || af) {
2152 al = (al - 6) & 0x0f;
2153 ah = (ah - 1 - icarry) & 0xff;
2154 eflags |= CC_C | CC_A;
2155 } else {
2156 eflags &= ~(CC_C | CC_A);
2157 al &= 0x0f;
2158 }
2159 EAX = (EAX & ~0xffff) | al | (ah << 8);
2160 CC_SRC = eflags;
2161 FORCE_RET();
2162}
2163
2164void helper_daa(void)
2165{
2166 int al, af, cf;
2167 int eflags;
2168
2169 eflags = cc_table[CC_OP].compute_all();
2170 cf = eflags & CC_C;
2171 af = eflags & CC_A;
2172 al = EAX & 0xff;
2173
2174 eflags = 0;
2175 if (((al & 0x0f) > 9 ) || af) {
2176 al = (al + 6) & 0xff;
2177 eflags |= CC_A;
2178 }
2179 if ((al > 0x9f) || cf) {
2180 al = (al + 0x60) & 0xff;
2181 eflags |= CC_C;
2182 }
2183 EAX = (EAX & ~0xff) | al;
2184 /* well, speed is not an issue here, so we compute the flags by hand */
2185 eflags |= (al == 0) << 6; /* zf */
2186 eflags |= parity_table[al]; /* pf */
2187 eflags |= (al & 0x80); /* sf */
2188 CC_SRC = eflags;
2189 FORCE_RET();
2190}
2191
2192void helper_das(void)
2193{
2194 int al, al1, af, cf;
2195 int eflags;
2196
2197 eflags = cc_table[CC_OP].compute_all();
2198 cf = eflags & CC_C;
2199 af = eflags & CC_A;
2200 al = EAX & 0xff;
2201
2202 eflags = 0;
2203 al1 = al;
2204 if (((al & 0x0f) > 9 ) || af) {
2205 eflags |= CC_A;
2206 if (al < 6 || cf)
2207 eflags |= CC_C;
2208 al = (al - 6) & 0xff;
2209 }
2210 if ((al1 > 0x99) || cf) {
2211 al = (al - 0x60) & 0xff;
2212 eflags |= CC_C;
2213 }
2214 EAX = (EAX & ~0xff) | al;
2215 /* well, speed is not an issue here, so we compute the flags by hand */
2216 eflags |= (al == 0) << 6; /* zf */
2217 eflags |= parity_table[al]; /* pf */
2218 eflags |= (al & 0x80); /* sf */
2219 CC_SRC = eflags;
2220 FORCE_RET();
2221}
2222
2223void helper_into(int next_eip_addend)
2224{
2225 int eflags;
2226 eflags = cc_table[CC_OP].compute_all();
2227 if (eflags & CC_O) {
2228 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2229 }
2230}
2231
2232void helper_cmpxchg8b(target_ulong a0)
2233{
2234 uint64_t d;
2235 int eflags;
2236
2237 eflags = cc_table[CC_OP].compute_all();
2238 d = ldq(a0);
2239 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2240 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2241 eflags |= CC_Z;
2242 } else {
2243 /* always do the store */
2244 stq(a0, d);
2245 EDX = (uint32_t)(d >> 32);
2246 EAX = (uint32_t)d;
2247 eflags &= ~CC_Z;
2248 }
2249 CC_SRC = eflags;
2250}
2251
2252#ifdef TARGET_X86_64
2253void helper_cmpxchg16b(target_ulong a0)
2254{
2255 uint64_t d0, d1;
2256 int eflags;
2257
2258 if ((a0 & 0xf) != 0)
2259 raise_exception(EXCP0D_GPF);
2260 eflags = cc_table[CC_OP].compute_all();
2261 d0 = ldq(a0);
2262 d1 = ldq(a0 + 8);
2263 if (d0 == EAX && d1 == EDX) {
2264 stq(a0, EBX);
2265 stq(a0 + 8, ECX);
2266 eflags |= CC_Z;
2267 } else {
2268 /* always do the store */
2269 stq(a0, d0);
2270 stq(a0 + 8, d1);
2271 EDX = d1;
2272 EAX = d0;
2273 eflags &= ~CC_Z;
2274 }
2275 CC_SRC = eflags;
2276}
2277#endif
2278
2279void helper_single_step(void)
2280{
2281 env->dr[6] |= 0x4000;
2282 raise_exception(EXCP01_SSTP);
2283}
2284
2285void helper_cpuid(void)
2286{
2287#ifndef VBOX
2288 uint32_t index;
2289
2290 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2291
2292 index = (uint32_t)EAX;
2293 /* test if maximum index reached */
2294 if (index & 0x80000000) {
2295 if (index > env->cpuid_xlevel)
2296 index = env->cpuid_level;
2297 } else {
2298 if (index > env->cpuid_level)
2299 index = env->cpuid_level;
2300 }
2301
2302 switch(index) {
2303 case 0:
2304 EAX = env->cpuid_level;
2305 EBX = env->cpuid_vendor1;
2306 EDX = env->cpuid_vendor2;
2307 ECX = env->cpuid_vendor3;
2308 break;
2309 case 1:
2310 EAX = env->cpuid_version;
2311 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2312 ECX = env->cpuid_ext_features;
2313 EDX = env->cpuid_features;
2314 break;
2315 case 2:
2316 /* cache info: needed for Pentium Pro compatibility */
2317 EAX = 1;
2318 EBX = 0;
2319 ECX = 0;
2320 EDX = 0x2c307d;
2321 break;
2322 case 4:
2323 /* cache info: needed for Core compatibility */
2324 switch (ECX) {
2325 case 0: /* L1 dcache info */
2326 EAX = 0x0000121;
2327 EBX = 0x1c0003f;
2328 ECX = 0x000003f;
2329 EDX = 0x0000001;
2330 break;
2331 case 1: /* L1 icache info */
2332 EAX = 0x0000122;
2333 EBX = 0x1c0003f;
2334 ECX = 0x000003f;
2335 EDX = 0x0000001;
2336 break;
2337 case 2: /* L2 cache info */
2338 EAX = 0x0000143;
2339 EBX = 0x3c0003f;
2340 ECX = 0x0000fff;
2341 EDX = 0x0000001;
2342 break;
2343 default: /* end of info */
2344 EAX = 0;
2345 EBX = 0;
2346 ECX = 0;
2347 EDX = 0;
2348 break;
2349 }
2350
2351 break;
2352 case 5:
2353 /* mwait info: needed for Core compatibility */
2354 EAX = 0; /* Smallest monitor-line size in bytes */
2355 EBX = 0; /* Largest monitor-line size in bytes */
2356 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2357 EDX = 0;
2358 break;
2359 case 6:
2360 /* Thermal and Power Leaf */
2361 EAX = 0;
2362 EBX = 0;
2363 ECX = 0;
2364 EDX = 0;
2365 break;
2366 case 9:
2367 /* Direct Cache Access Information Leaf */
2368 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2369 EBX = 0;
2370 ECX = 0;
2371 EDX = 0;
2372 break;
2373 case 0xA:
2374 /* Architectural Performance Monitoring Leaf */
2375 EAX = 0;
2376 EBX = 0;
2377 ECX = 0;
2378 EDX = 0;
2379 break;
2380 case 0x80000000:
2381 EAX = env->cpuid_xlevel;
2382 EBX = env->cpuid_vendor1;
2383 EDX = env->cpuid_vendor2;
2384 ECX = env->cpuid_vendor3;
2385 break;
2386 case 0x80000001:
2387 EAX = env->cpuid_features;
2388 EBX = 0;
2389 ECX = env->cpuid_ext3_features;
2390 EDX = env->cpuid_ext2_features;
2391 break;
2392 case 0x80000002:
2393 case 0x80000003:
2394 case 0x80000004:
2395 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2396 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2397 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2398 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2399 break;
2400 case 0x80000005:
2401 /* cache info (L1 cache) */
2402 EAX = 0x01ff01ff;
2403 EBX = 0x01ff01ff;
2404 ECX = 0x40020140;
2405 EDX = 0x40020140;
2406 break;
2407 case 0x80000006:
2408 /* cache info (L2 cache) */
2409 EAX = 0;
2410 EBX = 0x42004200;
2411 ECX = 0x02008140;
2412 EDX = 0;
2413 break;
2414 case 0x80000008:
2415 /* virtual & phys address size in low 2 bytes. */
2416/* XXX: This value must match the one used in the MMU code. */
2417 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2418 /* 64 bit processor */
2419#if defined(USE_KQEMU)
2420 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2421#else
2422/* XXX: The physical address space is limited to 42 bits in exec.c. */
2423 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2424#endif
2425 } else {
2426#if defined(USE_KQEMU)
2427 EAX = 0x00000020; /* 32 bits physical */
2428#else
2429 if (env->cpuid_features & CPUID_PSE36)
2430 EAX = 0x00000024; /* 36 bits physical */
2431 else
2432 EAX = 0x00000020; /* 32 bits physical */
2433#endif
2434 }
2435 EBX = 0;
2436 ECX = 0;
2437 EDX = 0;
2438 break;
2439 case 0x8000000A:
2440 EAX = 0x00000001;
2441 EBX = 0;
2442 ECX = 0;
2443 EDX = 0;
2444 break;
2445 default:
2446 /* reserved values: zero */
2447 EAX = 0;
2448 EBX = 0;
2449 ECX = 0;
2450 EDX = 0;
2451 break;
2452 }
2453#else /* VBOX */
2454 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2455#endif /* VBOX */
2456}
2457
2458void helper_enter_level(int level, int data32, target_ulong t1)
2459{
2460 target_ulong ssp;
2461 uint32_t esp_mask, esp, ebp;
2462
2463 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2464 ssp = env->segs[R_SS].base;
2465 ebp = EBP;
2466 esp = ESP;
2467 if (data32) {
2468 /* 32 bit */
2469 esp -= 4;
2470 while (--level) {
2471 esp -= 4;
2472 ebp -= 4;
2473 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2474 }
2475 esp -= 4;
2476 stl(ssp + (esp & esp_mask), t1);
2477 } else {
2478 /* 16 bit */
2479 esp -= 2;
2480 while (--level) {
2481 esp -= 2;
2482 ebp -= 2;
2483 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2484 }
2485 esp -= 2;
2486 stw(ssp + (esp & esp_mask), t1);
2487 }
2488}
2489
2490#ifdef TARGET_X86_64
2491void helper_enter64_level(int level, int data64, target_ulong t1)
2492{
2493 target_ulong esp, ebp;
2494 ebp = EBP;
2495 esp = ESP;
2496
2497 if (data64) {
2498 /* 64 bit */
2499 esp -= 8;
2500 while (--level) {
2501 esp -= 8;
2502 ebp -= 8;
2503 stq(esp, ldq(ebp));
2504 }
2505 esp -= 8;
2506 stq(esp, t1);
2507 } else {
2508 /* 16 bit */
2509 esp -= 2;
2510 while (--level) {
2511 esp -= 2;
2512 ebp -= 2;
2513 stw(esp, lduw(ebp));
2514 }
2515 esp -= 2;
2516 stw(esp, t1);
2517 }
2518}
2519#endif
2520
2521void helper_lldt(int selector)
2522{
2523 SegmentCache *dt;
2524 uint32_t e1, e2;
2525#ifndef VBOX
2526 int index, entry_limit;
2527#else
2528 unsigned int index, entry_limit;
2529#endif
2530 target_ulong ptr;
2531
2532#ifdef VBOX
2533 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2534 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2535#endif
2536
2537 selector &= 0xffff;
2538 if ((selector & 0xfffc) == 0) {
2539 /* XXX: NULL selector case: invalid LDT */
2540 env->ldt.base = 0;
2541 env->ldt.limit = 0;
2542 } else {
2543 if (selector & 0x4)
2544 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2545 dt = &env->gdt;
2546 index = selector & ~7;
2547#ifdef TARGET_X86_64
2548 if (env->hflags & HF_LMA_MASK)
2549 entry_limit = 15;
2550 else
2551#endif
2552 entry_limit = 7;
2553 if ((index + entry_limit) > dt->limit)
2554 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2555 ptr = dt->base + index;
2556 e1 = ldl_kernel(ptr);
2557 e2 = ldl_kernel(ptr + 4);
2558 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2559 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2560 if (!(e2 & DESC_P_MASK))
2561 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2562#ifdef TARGET_X86_64
2563 if (env->hflags & HF_LMA_MASK) {
2564 uint32_t e3;
2565 e3 = ldl_kernel(ptr + 8);
2566 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2567 env->ldt.base |= (target_ulong)e3 << 32;
2568 } else
2569#endif
2570 {
2571 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2572 }
2573 }
2574 env->ldt.selector = selector;
2575#ifdef VBOX
2576 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2577 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2578#endif
2579}
2580
2581void helper_ltr(int selector)
2582{
2583 SegmentCache *dt;
2584 uint32_t e1, e2;
2585#ifndef VBOX
2586 int index, type, entry_limit;
2587#else
2588 unsigned int index;
2589 int type, entry_limit;
2590#endif
2591 target_ulong ptr;
2592
2593#ifdef VBOX
2594 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2595 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2596 env->tr.flags, (RTSEL)(selector & 0xffff)));
2597#endif
2598 selector &= 0xffff;
2599 if ((selector & 0xfffc) == 0) {
2600 /* NULL selector case: invalid TR */
2601 env->tr.base = 0;
2602 env->tr.limit = 0;
2603 env->tr.flags = 0;
2604 } else {
2605 if (selector & 0x4)
2606 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2607 dt = &env->gdt;
2608 index = selector & ~7;
2609#ifdef TARGET_X86_64
2610 if (env->hflags & HF_LMA_MASK)
2611 entry_limit = 15;
2612 else
2613#endif
2614 entry_limit = 7;
2615 if ((index + entry_limit) > dt->limit)
2616 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2617 ptr = dt->base + index;
2618 e1 = ldl_kernel(ptr);
2619 e2 = ldl_kernel(ptr + 4);
2620 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2621 if ((e2 & DESC_S_MASK) ||
2622 (type != 1 && type != 9))
2623 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2624 if (!(e2 & DESC_P_MASK))
2625 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2626#ifdef TARGET_X86_64
2627 if (env->hflags & HF_LMA_MASK) {
2628 uint32_t e3, e4;
2629 e3 = ldl_kernel(ptr + 8);
2630 e4 = ldl_kernel(ptr + 12);
2631 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2632 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2633 load_seg_cache_raw_dt(&env->tr, e1, e2);
2634 env->tr.base |= (target_ulong)e3 << 32;
2635 } else
2636#endif
2637 {
2638 load_seg_cache_raw_dt(&env->tr, e1, e2);
2639 }
2640 e2 |= DESC_TSS_BUSY_MASK;
2641 stl_kernel(ptr + 4, e2);
2642 }
2643 env->tr.selector = selector;
2644#ifdef VBOX
2645 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2646 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2647 env->tr.flags, (RTSEL)(selector & 0xffff)));
2648#endif
2649}
2650
2651/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2652void helper_load_seg(int seg_reg, int selector)
2653{
2654 uint32_t e1, e2;
2655 int cpl, dpl, rpl;
2656 SegmentCache *dt;
2657#ifndef VBOX
2658 int index;
2659#else
2660 unsigned int index;
2661#endif
2662 target_ulong ptr;
2663
2664 selector &= 0xffff;
2665 cpl = env->hflags & HF_CPL_MASK;
2666
2667#ifdef VBOX
2668 /* Trying to load a selector with CPL=1? */
2669 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2670 {
2671 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2672 selector = selector & 0xfffc;
2673 }
2674#endif
2675 if ((selector & 0xfffc) == 0) {
2676 /* null selector case */
2677 if (seg_reg == R_SS
2678#ifdef TARGET_X86_64
2679 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2680#endif
2681 )
2682 raise_exception_err(EXCP0D_GPF, 0);
2683 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2684 } else {
2685
2686 if (selector & 0x4)
2687 dt = &env->ldt;
2688 else
2689 dt = &env->gdt;
2690 index = selector & ~7;
2691 if ((index + 7) > dt->limit)
2692 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2693 ptr = dt->base + index;
2694 e1 = ldl_kernel(ptr);
2695 e2 = ldl_kernel(ptr + 4);
2696
2697 if (!(e2 & DESC_S_MASK))
2698 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2699 rpl = selector & 3;
2700 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2701 if (seg_reg == R_SS) {
2702 /* must be writable segment */
2703 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2705 if (rpl != cpl || dpl != cpl)
2706 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2707 } else {
2708 /* must be readable segment */
2709 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2710 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2711
2712 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2713 /* if not conforming code, test rights */
2714 if (dpl < cpl || dpl < rpl)
2715 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2716 }
2717 }
2718
2719 if (!(e2 & DESC_P_MASK)) {
2720 if (seg_reg == R_SS)
2721 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2722 else
2723 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2724 }
2725
2726 /* set the access bit if not already set */
2727 if (!(e2 & DESC_A_MASK)) {
2728 e2 |= DESC_A_MASK;
2729 stl_kernel(ptr + 4, e2);
2730 }
2731
2732 cpu_x86_load_seg_cache(env, seg_reg, selector,
2733 get_seg_base(e1, e2),
2734 get_seg_limit(e1, e2),
2735 e2);
2736#if 0
2737 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2738 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2739#endif
2740 }
2741}
2742
2743/* protected mode jump */
2744void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2745 int next_eip_addend)
2746{
2747 int gate_cs, type;
2748 uint32_t e1, e2, cpl, dpl, rpl, limit;
2749 target_ulong next_eip;
2750
2751#ifdef VBOX
2752 e1 = e2 = 0;
2753#endif
2754 if ((new_cs & 0xfffc) == 0)
2755 raise_exception_err(EXCP0D_GPF, 0);
2756 if (load_segment(&e1, &e2, new_cs) != 0)
2757 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2758 cpl = env->hflags & HF_CPL_MASK;
2759 if (e2 & DESC_S_MASK) {
2760 if (!(e2 & DESC_CS_MASK))
2761 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2762 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2763 if (e2 & DESC_C_MASK) {
2764 /* conforming code segment */
2765 if (dpl > cpl)
2766 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2767 } else {
2768 /* non conforming code segment */
2769 rpl = new_cs & 3;
2770 if (rpl > cpl)
2771 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2772 if (dpl != cpl)
2773 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2774 }
2775 if (!(e2 & DESC_P_MASK))
2776 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2777 limit = get_seg_limit(e1, e2);
2778 if (new_eip > limit &&
2779 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2780 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2781 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2782 get_seg_base(e1, e2), limit, e2);
2783 EIP = new_eip;
2784 } else {
2785 /* jump to call or task gate */
2786 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2787 rpl = new_cs & 3;
2788 cpl = env->hflags & HF_CPL_MASK;
2789 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2790 switch(type) {
2791 case 1: /* 286 TSS */
2792 case 9: /* 386 TSS */
2793 case 5: /* task gate */
2794 if (dpl < cpl || dpl < rpl)
2795 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2796 next_eip = env->eip + next_eip_addend;
2797 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2798 CC_OP = CC_OP_EFLAGS;
2799 break;
2800 case 4: /* 286 call gate */
2801 case 12: /* 386 call gate */
2802 if ((dpl < cpl) || (dpl < rpl))
2803 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2804 if (!(e2 & DESC_P_MASK))
2805 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2806 gate_cs = e1 >> 16;
2807 new_eip = (e1 & 0xffff);
2808 if (type == 12)
2809 new_eip |= (e2 & 0xffff0000);
2810 if (load_segment(&e1, &e2, gate_cs) != 0)
2811 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2812 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2813 /* must be code segment */
2814 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2815 (DESC_S_MASK | DESC_CS_MASK)))
2816 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2817 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2818 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2819 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2820 if (!(e2 & DESC_P_MASK))
2821#ifdef VBOX /* See page 3-514 of 253666.pdf */
2822 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2823#else
2824 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2825#endif
2826 limit = get_seg_limit(e1, e2);
2827 if (new_eip > limit)
2828 raise_exception_err(EXCP0D_GPF, 0);
2829 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2830 get_seg_base(e1, e2), limit, e2);
2831 EIP = new_eip;
2832 break;
2833 default:
2834 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2835 break;
2836 }
2837 }
2838}
2839
2840/* real mode call */
2841void helper_lcall_real(int new_cs, target_ulong new_eip1,
2842 int shift, int next_eip)
2843{
2844 int new_eip;
2845 uint32_t esp, esp_mask;
2846 target_ulong ssp;
2847
2848 new_eip = new_eip1;
2849 esp = ESP;
2850 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2851 ssp = env->segs[R_SS].base;
2852 if (shift) {
2853 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2854 PUSHL(ssp, esp, esp_mask, next_eip);
2855 } else {
2856 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2857 PUSHW(ssp, esp, esp_mask, next_eip);
2858 }
2859
2860 SET_ESP(esp, esp_mask);
2861 env->eip = new_eip;
2862 env->segs[R_CS].selector = new_cs;
2863 env->segs[R_CS].base = (new_cs << 4);
2864}
2865
2866/* protected mode call */
2867void helper_lcall_protected(int new_cs, target_ulong new_eip,
2868 int shift, int next_eip_addend)
2869{
2870 int new_stack, i;
2871 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2872 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2873 uint32_t val, limit, old_sp_mask;
2874 target_ulong ssp, old_ssp, next_eip;
2875
2876#ifdef VBOX
2877 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2878#endif
2879 next_eip = env->eip + next_eip_addend;
2880#ifdef DEBUG_PCALL
2881 if (loglevel & CPU_LOG_PCALL) {
2882 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2883 new_cs, (uint32_t)new_eip, shift);
2884 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2885 }
2886#endif
2887 if ((new_cs & 0xfffc) == 0)
2888 raise_exception_err(EXCP0D_GPF, 0);
2889 if (load_segment(&e1, &e2, new_cs) != 0)
2890 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2891 cpl = env->hflags & HF_CPL_MASK;
2892#ifdef DEBUG_PCALL
2893 if (loglevel & CPU_LOG_PCALL) {
2894 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2895 }
2896#endif
2897 if (e2 & DESC_S_MASK) {
2898 if (!(e2 & DESC_CS_MASK))
2899 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2900 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2901 if (e2 & DESC_C_MASK) {
2902 /* conforming code segment */
2903 if (dpl > cpl)
2904 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2905 } else {
2906 /* non conforming code segment */
2907 rpl = new_cs & 3;
2908 if (rpl > cpl)
2909 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2910 if (dpl != cpl)
2911 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2912 }
2913 if (!(e2 & DESC_P_MASK))
2914 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2915
2916#ifdef TARGET_X86_64
2917 /* XXX: check 16/32 bit cases in long mode */
2918 if (shift == 2) {
2919 target_ulong rsp;
2920 /* 64 bit case */
2921 rsp = ESP;
2922 PUSHQ(rsp, env->segs[R_CS].selector);
2923 PUSHQ(rsp, next_eip);
2924 /* from this point, not restartable */
2925 ESP = rsp;
2926 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2927 get_seg_base(e1, e2),
2928 get_seg_limit(e1, e2), e2);
2929 EIP = new_eip;
2930 } else
2931#endif
2932 {
2933 sp = ESP;
2934 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2935 ssp = env->segs[R_SS].base;
2936 if (shift) {
2937 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2938 PUSHL(ssp, sp, sp_mask, next_eip);
2939 } else {
2940 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2941 PUSHW(ssp, sp, sp_mask, next_eip);
2942 }
2943
2944 limit = get_seg_limit(e1, e2);
2945 if (new_eip > limit)
2946 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2947 /* from this point, not restartable */
2948 SET_ESP(sp, sp_mask);
2949 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2950 get_seg_base(e1, e2), limit, e2);
2951 EIP = new_eip;
2952 }
2953 } else {
2954 /* check gate type */
2955 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2956 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2957 rpl = new_cs & 3;
2958 switch(type) {
2959 case 1: /* available 286 TSS */
2960 case 9: /* available 386 TSS */
2961 case 5: /* task gate */
2962 if (dpl < cpl || dpl < rpl)
2963 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2964 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2965 CC_OP = CC_OP_EFLAGS;
2966 return;
2967 case 4: /* 286 call gate */
2968 case 12: /* 386 call gate */
2969 break;
2970 default:
2971 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2972 break;
2973 }
2974 shift = type >> 3;
2975
2976 if (dpl < cpl || dpl < rpl)
2977 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2978 /* check valid bit */
2979 if (!(e2 & DESC_P_MASK))
2980 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2981 selector = e1 >> 16;
2982 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2983 param_count = e2 & 0x1f;
2984 if ((selector & 0xfffc) == 0)
2985 raise_exception_err(EXCP0D_GPF, 0);
2986
2987 if (load_segment(&e1, &e2, selector) != 0)
2988 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2989 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2990 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2991 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2992 if (dpl > cpl)
2993 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2994 if (!(e2 & DESC_P_MASK))
2995 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2996
2997 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2998 /* to inner privilege */
2999 get_ss_esp_from_tss(&ss, &sp, dpl);
3000#ifdef DEBUG_PCALL
3001 if (loglevel & CPU_LOG_PCALL)
3002 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3003 ss, sp, param_count, ESP);
3004#endif
3005 if ((ss & 0xfffc) == 0)
3006 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3007 if ((ss & 3) != dpl)
3008 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3009 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3010 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3011 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3012 if (ss_dpl != dpl)
3013 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3014 if (!(ss_e2 & DESC_S_MASK) ||
3015 (ss_e2 & DESC_CS_MASK) ||
3016 !(ss_e2 & DESC_W_MASK))
3017 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3018 if (!(ss_e2 & DESC_P_MASK))
3019#ifdef VBOX /* See page 3-99 of 253666.pdf */
3020 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3021#else
3022 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3023#endif
3024
3025 // push_size = ((param_count * 2) + 8) << shift;
3026
3027 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3028 old_ssp = env->segs[R_SS].base;
3029
3030 sp_mask = get_sp_mask(ss_e2);
3031 ssp = get_seg_base(ss_e1, ss_e2);
3032 if (shift) {
3033 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3034 PUSHL(ssp, sp, sp_mask, ESP);
3035 for(i = param_count - 1; i >= 0; i--) {
3036 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3037 PUSHL(ssp, sp, sp_mask, val);
3038 }
3039 } else {
3040 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3041 PUSHW(ssp, sp, sp_mask, ESP);
3042 for(i = param_count - 1; i >= 0; i--) {
3043 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3044 PUSHW(ssp, sp, sp_mask, val);
3045 }
3046 }
3047 new_stack = 1;
3048 } else {
3049 /* to same privilege */
3050 sp = ESP;
3051 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3052 ssp = env->segs[R_SS].base;
3053 // push_size = (4 << shift);
3054 new_stack = 0;
3055 }
3056
3057 if (shift) {
3058 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3059 PUSHL(ssp, sp, sp_mask, next_eip);
3060 } else {
3061 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3062 PUSHW(ssp, sp, sp_mask, next_eip);
3063 }
3064
3065 /* from this point, not restartable */
3066
3067 if (new_stack) {
3068 ss = (ss & ~3) | dpl;
3069 cpu_x86_load_seg_cache(env, R_SS, ss,
3070 ssp,
3071 get_seg_limit(ss_e1, ss_e2),
3072 ss_e2);
3073 }
3074
3075 selector = (selector & ~3) | dpl;
3076 cpu_x86_load_seg_cache(env, R_CS, selector,
3077 get_seg_base(e1, e2),
3078 get_seg_limit(e1, e2),
3079 e2);
3080 cpu_x86_set_cpl(env, dpl);
3081 SET_ESP(sp, sp_mask);
3082 EIP = offset;
3083 }
3084#ifdef USE_KQEMU
3085 if (kqemu_is_ok(env)) {
3086 env->exception_index = -1;
3087 cpu_loop_exit();
3088 }
3089#endif
3090}
3091
3092/* real and vm86 mode iret */
3093void helper_iret_real(int shift)
3094{
3095 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3096 target_ulong ssp;
3097 int eflags_mask;
3098#ifdef VBOX
3099 bool fVME = false;
3100
3101 remR3TrapClear(env->pVM);
3102#endif /* VBOX */
3103
3104 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3105 sp = ESP;
3106 ssp = env->segs[R_SS].base;
3107 if (shift == 1) {
3108 /* 32 bits */
3109 POPL(ssp, sp, sp_mask, new_eip);
3110 POPL(ssp, sp, sp_mask, new_cs);
3111 new_cs &= 0xffff;
3112 POPL(ssp, sp, sp_mask, new_eflags);
3113 } else {
3114 /* 16 bits */
3115 POPW(ssp, sp, sp_mask, new_eip);
3116 POPW(ssp, sp, sp_mask, new_cs);
3117 POPW(ssp, sp, sp_mask, new_eflags);
3118 }
3119#ifdef VBOX
3120 if ( (env->eflags & VM_MASK)
3121 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3122 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3123 {
3124 fVME = true;
3125 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3126 /* if TF will be set -> #GP */
3127 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3128 || (new_eflags & TF_MASK))
3129 raise_exception(EXCP0D_GPF);
3130 }
3131#endif /* VBOX */
3132 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3133 env->segs[R_CS].selector = new_cs;
3134 env->segs[R_CS].base = (new_cs << 4);
3135 env->eip = new_eip;
3136#ifdef VBOX
3137 if (fVME)
3138 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3139 else
3140#endif
3141 if (env->eflags & VM_MASK)
3142 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3143 else
3144 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3145 if (shift == 0)
3146 eflags_mask &= 0xffff;
3147 load_eflags(new_eflags, eflags_mask);
3148 env->hflags2 &= ~HF2_NMI_MASK;
3149#ifdef VBOX
3150 if (fVME)
3151 {
3152 if (new_eflags & IF_MASK)
3153 env->eflags |= VIF_MASK;
3154 else
3155 env->eflags &= ~VIF_MASK;
3156 }
3157#endif /* VBOX */
3158}
3159
3160#ifndef VBOX
3161static inline void validate_seg(int seg_reg, int cpl)
3162#else /* VBOX */
3163DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3164#endif /* VBOX */
3165{
3166 int dpl;
3167 uint32_t e2;
3168
3169 /* XXX: on x86_64, we do not want to nullify FS and GS because
3170 they may still contain a valid base. I would be interested to
3171 know how a real x86_64 CPU behaves */
3172 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3173 (env->segs[seg_reg].selector & 0xfffc) == 0)
3174 return;
3175
3176 e2 = env->segs[seg_reg].flags;
3177 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3178 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3179 /* data or non conforming code segment */
3180 if (dpl < cpl) {
3181 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3182 }
3183 }
3184}
3185
3186/* protected mode iret */
3187#ifndef VBOX
3188static inline void helper_ret_protected(int shift, int is_iret, int addend)
3189#else /* VBOX */
3190DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3191#endif /* VBOX */
3192{
3193 uint32_t new_cs, new_eflags, new_ss;
3194 uint32_t new_es, new_ds, new_fs, new_gs;
3195 uint32_t e1, e2, ss_e1, ss_e2;
3196 int cpl, dpl, rpl, eflags_mask, iopl;
3197 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3198
3199#ifdef VBOX
3200 ss_e1 = ss_e2 = e1 = e2 = 0;
3201#endif
3202
3203#ifdef TARGET_X86_64
3204 if (shift == 2)
3205 sp_mask = -1;
3206 else
3207#endif
3208 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3209 sp = ESP;
3210 ssp = env->segs[R_SS].base;
3211 new_eflags = 0; /* avoid warning */
3212#ifdef TARGET_X86_64
3213 if (shift == 2) {
3214 POPQ(sp, new_eip);
3215 POPQ(sp, new_cs);
3216 new_cs &= 0xffff;
3217 if (is_iret) {
3218 POPQ(sp, new_eflags);
3219 }
3220 } else
3221#endif
3222 if (shift == 1) {
3223 /* 32 bits */
3224 POPL(ssp, sp, sp_mask, new_eip);
3225 POPL(ssp, sp, sp_mask, new_cs);
3226 new_cs &= 0xffff;
3227 if (is_iret) {
3228 POPL(ssp, sp, sp_mask, new_eflags);
3229#if defined(VBOX) && defined(DEBUG)
3230 printf("iret: new CS %04X\n", new_cs);
3231 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3232 printf("iret: new EFLAGS %08X\n", new_eflags);
3233 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3234#endif
3235 if (new_eflags & VM_MASK)
3236 goto return_to_vm86;
3237 }
3238#ifdef VBOX
3239 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3240 {
3241#ifdef DEBUG
3242 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3243#endif
3244 new_cs = new_cs & 0xfffc;
3245 }
3246#endif
3247 } else {
3248 /* 16 bits */
3249 POPW(ssp, sp, sp_mask, new_eip);
3250 POPW(ssp, sp, sp_mask, new_cs);
3251 if (is_iret)
3252 POPW(ssp, sp, sp_mask, new_eflags);
3253 }
3254#ifdef DEBUG_PCALL
3255 if (loglevel & CPU_LOG_PCALL) {
3256 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3257 new_cs, new_eip, shift, addend);
3258 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3259 }
3260#endif
3261 if ((new_cs & 0xfffc) == 0)
3262 {
3263#if defined(VBOX) && defined(DEBUG)
3264 printf("new_cs & 0xfffc) == 0\n");
3265#endif
3266 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3267 }
3268 if (load_segment(&e1, &e2, new_cs) != 0)
3269 {
3270#if defined(VBOX) && defined(DEBUG)
3271 printf("load_segment failed\n");
3272#endif
3273 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3274 }
3275 if (!(e2 & DESC_S_MASK) ||
3276 !(e2 & DESC_CS_MASK))
3277 {
3278#if defined(VBOX) && defined(DEBUG)
3279 printf("e2 mask %08x\n", e2);
3280#endif
3281 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3282 }
3283 cpl = env->hflags & HF_CPL_MASK;
3284 rpl = new_cs & 3;
3285 if (rpl < cpl)
3286 {
3287#if defined(VBOX) && defined(DEBUG)
3288 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3289#endif
3290 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3291 }
3292 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3293 if (e2 & DESC_C_MASK) {
3294 if (dpl > rpl)
3295 {
3296#if defined(VBOX) && defined(DEBUG)
3297 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3298#endif
3299 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3300 }
3301 } else {
3302 if (dpl != rpl)
3303 {
3304#if defined(VBOX) && defined(DEBUG)
3305 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3306#endif
3307 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3308 }
3309 }
3310 if (!(e2 & DESC_P_MASK))
3311 {
3312#if defined(VBOX) && defined(DEBUG)
3313 printf("DESC_P_MASK e2=%08x\n", e2);
3314#endif
3315 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3316 }
3317
3318 sp += addend;
3319 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3320 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3321 /* return to same privilege level */
3322 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3323 get_seg_base(e1, e2),
3324 get_seg_limit(e1, e2),
3325 e2);
3326 } else {
3327 /* return to different privilege level */
3328#ifdef TARGET_X86_64
3329 if (shift == 2) {
3330 POPQ(sp, new_esp);
3331 POPQ(sp, new_ss);
3332 new_ss &= 0xffff;
3333 } else
3334#endif
3335 if (shift == 1) {
3336 /* 32 bits */
3337 POPL(ssp, sp, sp_mask, new_esp);
3338 POPL(ssp, sp, sp_mask, new_ss);
3339 new_ss &= 0xffff;
3340 } else {
3341 /* 16 bits */
3342 POPW(ssp, sp, sp_mask, new_esp);
3343 POPW(ssp, sp, sp_mask, new_ss);
3344 }
3345#ifdef DEBUG_PCALL
3346 if (loglevel & CPU_LOG_PCALL) {
3347 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3348 new_ss, new_esp);
3349 }
3350#endif
3351 if ((new_ss & 0xfffc) == 0) {
3352#ifdef TARGET_X86_64
3353 /* NULL ss is allowed in long mode if cpl != 3*/
3354 /* XXX: test CS64 ? */
3355 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3356 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3357 0, 0xffffffff,
3358 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3359 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3360 DESC_W_MASK | DESC_A_MASK);
3361 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3362 } else
3363#endif
3364 {
3365 raise_exception_err(EXCP0D_GPF, 0);
3366 }
3367 } else {
3368 if ((new_ss & 3) != rpl)
3369 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3370 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3371 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3372 if (!(ss_e2 & DESC_S_MASK) ||
3373 (ss_e2 & DESC_CS_MASK) ||
3374 !(ss_e2 & DESC_W_MASK))
3375 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3376 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3377 if (dpl != rpl)
3378 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3379 if (!(ss_e2 & DESC_P_MASK))
3380 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3381 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3382 get_seg_base(ss_e1, ss_e2),
3383 get_seg_limit(ss_e1, ss_e2),
3384 ss_e2);
3385 }
3386
3387 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3388 get_seg_base(e1, e2),
3389 get_seg_limit(e1, e2),
3390 e2);
3391 cpu_x86_set_cpl(env, rpl);
3392 sp = new_esp;
3393#ifdef TARGET_X86_64
3394 if (env->hflags & HF_CS64_MASK)
3395 sp_mask = -1;
3396 else
3397#endif
3398 sp_mask = get_sp_mask(ss_e2);
3399
3400 /* validate data segments */
3401 validate_seg(R_ES, rpl);
3402 validate_seg(R_DS, rpl);
3403 validate_seg(R_FS, rpl);
3404 validate_seg(R_GS, rpl);
3405
3406 sp += addend;
3407 }
3408 SET_ESP(sp, sp_mask);
3409 env->eip = new_eip;
3410 if (is_iret) {
3411 /* NOTE: 'cpl' is the _old_ CPL */
3412 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3413 if (cpl == 0)
3414#ifdef VBOX
3415 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3416#else
3417 eflags_mask |= IOPL_MASK;
3418#endif
3419 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3420 if (cpl <= iopl)
3421 eflags_mask |= IF_MASK;
3422 if (shift == 0)
3423 eflags_mask &= 0xffff;
3424 load_eflags(new_eflags, eflags_mask);
3425 }
3426 return;
3427
3428 return_to_vm86:
3429 POPL(ssp, sp, sp_mask, new_esp);
3430 POPL(ssp, sp, sp_mask, new_ss);
3431 POPL(ssp, sp, sp_mask, new_es);
3432 POPL(ssp, sp, sp_mask, new_ds);
3433 POPL(ssp, sp, sp_mask, new_fs);
3434 POPL(ssp, sp, sp_mask, new_gs);
3435
3436 /* modify processor state */
3437 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3438 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3439 load_seg_vm(R_CS, new_cs & 0xffff);
3440 cpu_x86_set_cpl(env, 3);
3441 load_seg_vm(R_SS, new_ss & 0xffff);
3442 load_seg_vm(R_ES, new_es & 0xffff);
3443 load_seg_vm(R_DS, new_ds & 0xffff);
3444 load_seg_vm(R_FS, new_fs & 0xffff);
3445 load_seg_vm(R_GS, new_gs & 0xffff);
3446
3447 env->eip = new_eip & 0xffff;
3448 ESP = new_esp;
3449}
3450
3451void helper_iret_protected(int shift, int next_eip)
3452{
3453 int tss_selector, type;
3454 uint32_t e1, e2;
3455
3456#ifdef VBOX
3457 e1 = e2 = 0;
3458 remR3TrapClear(env->pVM);
3459#endif
3460
3461 /* specific case for TSS */
3462 if (env->eflags & NT_MASK) {
3463#ifdef TARGET_X86_64
3464 if (env->hflags & HF_LMA_MASK)
3465 raise_exception_err(EXCP0D_GPF, 0);
3466#endif
3467 tss_selector = lduw_kernel(env->tr.base + 0);
3468 if (tss_selector & 4)
3469 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3470 if (load_segment(&e1, &e2, tss_selector) != 0)
3471 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3472 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3473 /* NOTE: we check both segment and busy TSS */
3474 if (type != 3)
3475 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3476 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3477 } else {
3478 helper_ret_protected(shift, 1, 0);
3479 }
3480 env->hflags2 &= ~HF2_NMI_MASK;
3481#ifdef USE_KQEMU
3482 if (kqemu_is_ok(env)) {
3483 CC_OP = CC_OP_EFLAGS;
3484 env->exception_index = -1;
3485 cpu_loop_exit();
3486 }
3487#endif
3488}
3489
3490void helper_lret_protected(int shift, int addend)
3491{
3492 helper_ret_protected(shift, 0, addend);
3493#ifdef USE_KQEMU
3494 if (kqemu_is_ok(env)) {
3495 env->exception_index = -1;
3496 cpu_loop_exit();
3497 }
3498#endif
3499}
3500
3501void helper_sysenter(void)
3502{
3503 if (env->sysenter_cs == 0) {
3504 raise_exception_err(EXCP0D_GPF, 0);
3505 }
3506 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3507 cpu_x86_set_cpl(env, 0);
3508
3509#ifdef TARGET_X86_64
3510 if (env->hflags & HF_LMA_MASK) {
3511 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3512 0, 0xffffffff,
3513 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3514 DESC_S_MASK |
3515 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3516 } else
3517#endif
3518 {
3519 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3520 0, 0xffffffff,
3521 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3522 DESC_S_MASK |
3523 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3524 }
3525 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3526 0, 0xffffffff,
3527 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3528 DESC_S_MASK |
3529 DESC_W_MASK | DESC_A_MASK);
3530 ESP = env->sysenter_esp;
3531 EIP = env->sysenter_eip;
3532}
3533
3534void helper_sysexit(int dflag)
3535{
3536 int cpl;
3537
3538 cpl = env->hflags & HF_CPL_MASK;
3539 if (env->sysenter_cs == 0 || cpl != 0) {
3540 raise_exception_err(EXCP0D_GPF, 0);
3541 }
3542 cpu_x86_set_cpl(env, 3);
3543#ifdef TARGET_X86_64
3544 if (dflag == 2) {
3545 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3546 0, 0xffffffff,
3547 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3548 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3549 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3550 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3551 0, 0xffffffff,
3552 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3553 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3554 DESC_W_MASK | DESC_A_MASK);
3555 } else
3556#endif
3557 {
3558 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3559 0, 0xffffffff,
3560 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3561 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3562 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3563 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3564 0, 0xffffffff,
3565 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3566 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3567 DESC_W_MASK | DESC_A_MASK);
3568 }
3569 ESP = ECX;
3570 EIP = EDX;
3571#ifdef USE_KQEMU
3572 if (kqemu_is_ok(env)) {
3573 env->exception_index = -1;
3574 cpu_loop_exit();
3575 }
3576#endif
3577}
3578
3579#if defined(CONFIG_USER_ONLY)
3580target_ulong helper_read_crN(int reg)
3581{
3582 return 0;
3583}
3584
3585void helper_write_crN(int reg, target_ulong t0)
3586{
3587}
3588#else
3589target_ulong helper_read_crN(int reg)
3590{
3591 target_ulong val;
3592
3593 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3594 switch(reg) {
3595 default:
3596 val = env->cr[reg];
3597 break;
3598 case 8:
3599 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3600 val = cpu_get_apic_tpr(env);
3601 } else {
3602 val = env->v_tpr;
3603 }
3604 break;
3605 }
3606 return val;
3607}
3608
3609void helper_write_crN(int reg, target_ulong t0)
3610{
3611 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3612 switch(reg) {
3613 case 0:
3614 cpu_x86_update_cr0(env, t0);
3615 break;
3616 case 3:
3617 cpu_x86_update_cr3(env, t0);
3618 break;
3619 case 4:
3620 cpu_x86_update_cr4(env, t0);
3621 break;
3622 case 8:
3623 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3624 cpu_set_apic_tpr(env, t0);
3625 }
3626 env->v_tpr = t0 & 0x0f;
3627 break;
3628 default:
3629 env->cr[reg] = t0;
3630 break;
3631 }
3632}
3633#endif
3634
3635void helper_lmsw(target_ulong t0)
3636{
3637 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3638 if already set to one. */
3639 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3640 helper_write_crN(0, t0);
3641}
3642
3643void helper_clts(void)
3644{
3645 env->cr[0] &= ~CR0_TS_MASK;
3646 env->hflags &= ~HF_TS_MASK;
3647}
3648
3649/* XXX: do more */
3650void helper_movl_drN_T0(int reg, target_ulong t0)
3651{
3652 env->dr[reg] = t0;
3653}
3654
3655void helper_invlpg(target_ulong addr)
3656{
3657 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3658 tlb_flush_page(env, addr);
3659}
3660
3661void helper_rdtsc(void)
3662{
3663 uint64_t val;
3664
3665 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3666 raise_exception(EXCP0D_GPF);
3667 }
3668 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3669
3670 val = cpu_get_tsc(env) + env->tsc_offset;
3671 EAX = (uint32_t)(val);
3672 EDX = (uint32_t)(val >> 32);
3673}
3674
3675#ifdef VBOX
3676void helper_rdtscp(void)
3677{
3678 uint64_t val;
3679 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3680 raise_exception(EXCP0D_GPF);
3681 }
3682
3683 val = cpu_get_tsc(env);
3684 EAX = (uint32_t)(val);
3685 EDX = (uint32_t)(val >> 32);
3686 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3687}
3688#endif
3689
3690void helper_rdpmc(void)
3691{
3692 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3693 raise_exception(EXCP0D_GPF);
3694 }
3695 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3696
3697 /* currently unimplemented */
3698 raise_exception_err(EXCP06_ILLOP, 0);
3699}
3700
3701#if defined(CONFIG_USER_ONLY)
3702void helper_wrmsr(void)
3703{
3704}
3705
3706void helper_rdmsr(void)
3707{
3708}
3709#else
3710void helper_wrmsr(void)
3711{
3712 uint64_t val;
3713
3714 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3715
3716 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3717
3718 switch((uint32_t)ECX) {
3719 case MSR_IA32_SYSENTER_CS:
3720 env->sysenter_cs = val & 0xffff;
3721 break;
3722 case MSR_IA32_SYSENTER_ESP:
3723 env->sysenter_esp = val;
3724 break;
3725 case MSR_IA32_SYSENTER_EIP:
3726 env->sysenter_eip = val;
3727 break;
3728 case MSR_IA32_APICBASE:
3729 cpu_set_apic_base(env, val);
3730 break;
3731 case MSR_EFER:
3732 {
3733 uint64_t update_mask;
3734 update_mask = 0;
3735 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3736 update_mask |= MSR_EFER_SCE;
3737 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3738 update_mask |= MSR_EFER_LME;
3739 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3740 update_mask |= MSR_EFER_FFXSR;
3741 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3742 update_mask |= MSR_EFER_NXE;
3743 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3744 update_mask |= MSR_EFER_SVME;
3745 cpu_load_efer(env, (env->efer & ~update_mask) |
3746 (val & update_mask));
3747 }
3748 break;
3749 case MSR_STAR:
3750 env->star = val;
3751 break;
3752 case MSR_PAT:
3753 env->pat = val;
3754 break;
3755 case MSR_VM_HSAVE_PA:
3756 env->vm_hsave = val;
3757 break;
3758#ifdef TARGET_X86_64
3759 case MSR_LSTAR:
3760 env->lstar = val;
3761 break;
3762 case MSR_CSTAR:
3763 env->cstar = val;
3764 break;
3765 case MSR_FMASK:
3766 env->fmask = val;
3767 break;
3768 case MSR_FSBASE:
3769 env->segs[R_FS].base = val;
3770 break;
3771 case MSR_GSBASE:
3772 env->segs[R_GS].base = val;
3773 break;
3774 case MSR_KERNELGSBASE:
3775 env->kernelgsbase = val;
3776 break;
3777#endif
3778 default:
3779#ifndef VBOX
3780 /* XXX: exception ? */
3781 break;
3782#else /* VBOX */
3783 {
3784 uint32_t ecx = (uint32_t)ECX;
3785 /* In X2APIC specification this range is reserved for APIC control. */
3786 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3787 cpu_apic_wrmsr(env, ecx, val);
3788 /** @todo else exception? */
3789 break;
3790 }
3791 case MSR_K8_TSC_AUX:
3792 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3793 break;
3794#endif /* VBOX */
3795 }
3796}
3797
3798void helper_rdmsr(void)
3799{
3800 uint64_t val;
3801
3802 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3803
3804 switch((uint32_t)ECX) {
3805 case MSR_IA32_SYSENTER_CS:
3806 val = env->sysenter_cs;
3807 break;
3808 case MSR_IA32_SYSENTER_ESP:
3809 val = env->sysenter_esp;
3810 break;
3811 case MSR_IA32_SYSENTER_EIP:
3812 val = env->sysenter_eip;
3813 break;
3814 case MSR_IA32_APICBASE:
3815 val = cpu_get_apic_base(env);
3816 break;
3817 case MSR_EFER:
3818 val = env->efer;
3819 break;
3820 case MSR_STAR:
3821 val = env->star;
3822 break;
3823 case MSR_PAT:
3824 val = env->pat;
3825 break;
3826 case MSR_VM_HSAVE_PA:
3827 val = env->vm_hsave;
3828 break;
3829 case MSR_IA32_PERF_STATUS:
3830 /* tsc_increment_by_tick */
3831 val = 1000ULL;
3832 /* CPU multiplier */
3833 val |= (((uint64_t)4ULL) << 40);
3834 break;
3835#ifdef TARGET_X86_64
3836 case MSR_LSTAR:
3837 val = env->lstar;
3838 break;
3839 case MSR_CSTAR:
3840 val = env->cstar;
3841 break;
3842 case MSR_FMASK:
3843 val = env->fmask;
3844 break;
3845 case MSR_FSBASE:
3846 val = env->segs[R_FS].base;
3847 break;
3848 case MSR_GSBASE:
3849 val = env->segs[R_GS].base;
3850 break;
3851 case MSR_KERNELGSBASE:
3852 val = env->kernelgsbase;
3853 break;
3854#endif
3855#ifdef USE_KQEMU
3856 case MSR_QPI_COMMBASE:
3857 if (env->kqemu_enabled) {
3858 val = kqemu_comm_base;
3859 } else {
3860 val = 0;
3861 }
3862 break;
3863#endif
3864 default:
3865#ifndef VBOX
3866 /* XXX: exception ? */
3867 val = 0;
3868 break;
3869#else /* VBOX */
3870 {
3871 uint32_t ecx = (uint32_t)ECX;
3872 /* In X2APIC specification this range is reserved for APIC control. */
3873 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3874 val = cpu_apic_rdmsr(env, ecx);
3875 else
3876 val = 0; /** @todo else exception? */
3877 break;
3878 }
3879 case MSR_K8_TSC_AUX:
3880 val = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3881 break;
3882#endif /* VBOX */
3883 }
3884 EAX = (uint32_t)(val);
3885 EDX = (uint32_t)(val >> 32);
3886}
3887#endif
3888
3889target_ulong helper_lsl(target_ulong selector1)
3890{
3891 unsigned int limit;
3892 uint32_t e1, e2, eflags, selector;
3893 int rpl, dpl, cpl, type;
3894
3895 selector = selector1 & 0xffff;
3896 eflags = cc_table[CC_OP].compute_all();
3897 if (load_segment(&e1, &e2, selector) != 0)
3898 goto fail;
3899 rpl = selector & 3;
3900 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3901 cpl = env->hflags & HF_CPL_MASK;
3902 if (e2 & DESC_S_MASK) {
3903 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3904 /* conforming */
3905 } else {
3906 if (dpl < cpl || dpl < rpl)
3907 goto fail;
3908 }
3909 } else {
3910 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3911 switch(type) {
3912 case 1:
3913 case 2:
3914 case 3:
3915 case 9:
3916 case 11:
3917 break;
3918 default:
3919 goto fail;
3920 }
3921 if (dpl < cpl || dpl < rpl) {
3922 fail:
3923 CC_SRC = eflags & ~CC_Z;
3924 return 0;
3925 }
3926 }
3927 limit = get_seg_limit(e1, e2);
3928 CC_SRC = eflags | CC_Z;
3929 return limit;
3930}
3931
3932target_ulong helper_lar(target_ulong selector1)
3933{
3934 uint32_t e1, e2, eflags, selector;
3935 int rpl, dpl, cpl, type;
3936
3937 selector = selector1 & 0xffff;
3938 eflags = cc_table[CC_OP].compute_all();
3939 if ((selector & 0xfffc) == 0)
3940 goto fail;
3941 if (load_segment(&e1, &e2, selector) != 0)
3942 goto fail;
3943 rpl = selector & 3;
3944 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3945 cpl = env->hflags & HF_CPL_MASK;
3946 if (e2 & DESC_S_MASK) {
3947 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3948 /* conforming */
3949 } else {
3950 if (dpl < cpl || dpl < rpl)
3951 goto fail;
3952 }
3953 } else {
3954 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3955 switch(type) {
3956 case 1:
3957 case 2:
3958 case 3:
3959 case 4:
3960 case 5:
3961 case 9:
3962 case 11:
3963 case 12:
3964 break;
3965 default:
3966 goto fail;
3967 }
3968 if (dpl < cpl || dpl < rpl) {
3969 fail:
3970 CC_SRC = eflags & ~CC_Z;
3971 return 0;
3972 }
3973 }
3974 CC_SRC = eflags | CC_Z;
3975 return e2 & 0x00f0ff00;
3976}
3977
3978void helper_verr(target_ulong selector1)
3979{
3980 uint32_t e1, e2, eflags, selector;
3981 int rpl, dpl, cpl;
3982
3983 selector = selector1 & 0xffff;
3984 eflags = cc_table[CC_OP].compute_all();
3985 if ((selector & 0xfffc) == 0)
3986 goto fail;
3987 if (load_segment(&e1, &e2, selector) != 0)
3988 goto fail;
3989 if (!(e2 & DESC_S_MASK))
3990 goto fail;
3991 rpl = selector & 3;
3992 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3993 cpl = env->hflags & HF_CPL_MASK;
3994 if (e2 & DESC_CS_MASK) {
3995 if (!(e2 & DESC_R_MASK))
3996 goto fail;
3997 if (!(e2 & DESC_C_MASK)) {
3998 if (dpl < cpl || dpl < rpl)
3999 goto fail;
4000 }
4001 } else {
4002 if (dpl < cpl || dpl < rpl) {
4003 fail:
4004 CC_SRC = eflags & ~CC_Z;
4005 return;
4006 }
4007 }
4008 CC_SRC = eflags | CC_Z;
4009}
4010
4011void helper_verw(target_ulong selector1)
4012{
4013 uint32_t e1, e2, eflags, selector;
4014 int rpl, dpl, cpl;
4015
4016 selector = selector1 & 0xffff;
4017 eflags = cc_table[CC_OP].compute_all();
4018 if ((selector & 0xfffc) == 0)
4019 goto fail;
4020 if (load_segment(&e1, &e2, selector) != 0)
4021 goto fail;
4022 if (!(e2 & DESC_S_MASK))
4023 goto fail;
4024 rpl = selector & 3;
4025 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4026 cpl = env->hflags & HF_CPL_MASK;
4027 if (e2 & DESC_CS_MASK) {
4028 goto fail;
4029 } else {
4030 if (dpl < cpl || dpl < rpl)
4031 goto fail;
4032 if (!(e2 & DESC_W_MASK)) {
4033 fail:
4034 CC_SRC = eflags & ~CC_Z;
4035 return;
4036 }
4037 }
4038 CC_SRC = eflags | CC_Z;
4039}
4040
4041/* x87 FPU helpers */
4042
4043static void fpu_set_exception(int mask)
4044{
4045 env->fpus |= mask;
4046 if (env->fpus & (~env->fpuc & FPUC_EM))
4047 env->fpus |= FPUS_SE | FPUS_B;
4048}
4049
4050#ifndef VBOX
4051static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4052#else /* VBOX */
4053DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4054#endif /* VBOX */
4055{
4056 if (b == 0.0)
4057 fpu_set_exception(FPUS_ZE);
4058 return a / b;
4059}
4060
4061void fpu_raise_exception(void)
4062{
4063 if (env->cr[0] & CR0_NE_MASK) {
4064 raise_exception(EXCP10_COPR);
4065 }
4066#if !defined(CONFIG_USER_ONLY)
4067 else {
4068 cpu_set_ferr(env);
4069 }
4070#endif
4071}
4072
4073void helper_flds_FT0(uint32_t val)
4074{
4075 union {
4076 float32 f;
4077 uint32_t i;
4078 } u;
4079 u.i = val;
4080 FT0 = float32_to_floatx(u.f, &env->fp_status);
4081}
4082
4083void helper_fldl_FT0(uint64_t val)
4084{
4085 union {
4086 float64 f;
4087 uint64_t i;
4088 } u;
4089 u.i = val;
4090 FT0 = float64_to_floatx(u.f, &env->fp_status);
4091}
4092
4093void helper_fildl_FT0(int32_t val)
4094{
4095 FT0 = int32_to_floatx(val, &env->fp_status);
4096}
4097
4098void helper_flds_ST0(uint32_t val)
4099{
4100 int new_fpstt;
4101 union {
4102 float32 f;
4103 uint32_t i;
4104 } u;
4105 new_fpstt = (env->fpstt - 1) & 7;
4106 u.i = val;
4107 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4108 env->fpstt = new_fpstt;
4109 env->fptags[new_fpstt] = 0; /* validate stack entry */
4110}
4111
4112void helper_fldl_ST0(uint64_t val)
4113{
4114 int new_fpstt;
4115 union {
4116 float64 f;
4117 uint64_t i;
4118 } u;
4119 new_fpstt = (env->fpstt - 1) & 7;
4120 u.i = val;
4121 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4122 env->fpstt = new_fpstt;
4123 env->fptags[new_fpstt] = 0; /* validate stack entry */
4124}
4125
4126void helper_fildl_ST0(int32_t val)
4127{
4128 int new_fpstt;
4129 new_fpstt = (env->fpstt - 1) & 7;
4130 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4131 env->fpstt = new_fpstt;
4132 env->fptags[new_fpstt] = 0; /* validate stack entry */
4133}
4134
4135void helper_fildll_ST0(int64_t val)
4136{
4137 int new_fpstt;
4138 new_fpstt = (env->fpstt - 1) & 7;
4139 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4140 env->fpstt = new_fpstt;
4141 env->fptags[new_fpstt] = 0; /* validate stack entry */
4142}
4143
4144#ifndef VBOX
4145uint32_t helper_fsts_ST0(void)
4146#else
4147RTCCUINTREG helper_fsts_ST0(void)
4148#endif
4149{
4150 union {
4151 float32 f;
4152 uint32_t i;
4153 } u;
4154 u.f = floatx_to_float32(ST0, &env->fp_status);
4155 return u.i;
4156}
4157
4158uint64_t helper_fstl_ST0(void)
4159{
4160 union {
4161 float64 f;
4162 uint64_t i;
4163 } u;
4164 u.f = floatx_to_float64(ST0, &env->fp_status);
4165 return u.i;
4166}
4167#ifndef VBOX
4168int32_t helper_fist_ST0(void)
4169#else
4170RTCCINTREG helper_fist_ST0(void)
4171#endif
4172{
4173 int32_t val;
4174 val = floatx_to_int32(ST0, &env->fp_status);
4175 if (val != (int16_t)val)
4176 val = -32768;
4177 return val;
4178}
4179
4180#ifndef VBOX
4181int32_t helper_fistl_ST0(void)
4182#else
4183RTCCINTREG helper_fistl_ST0(void)
4184#endif
4185{
4186 int32_t val;
4187 val = floatx_to_int32(ST0, &env->fp_status);
4188 return val;
4189}
4190
4191int64_t helper_fistll_ST0(void)
4192{
4193 int64_t val;
4194 val = floatx_to_int64(ST0, &env->fp_status);
4195 return val;
4196}
4197
4198#ifndef VBOX
4199int32_t helper_fistt_ST0(void)
4200#else
4201RTCCINTREG helper_fistt_ST0(void)
4202#endif
4203{
4204 int32_t val;
4205 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4206 if (val != (int16_t)val)
4207 val = -32768;
4208 return val;
4209}
4210
4211#ifndef VBOX
4212int32_t helper_fisttl_ST0(void)
4213#else
4214RTCCINTREG helper_fisttl_ST0(void)
4215#endif
4216{
4217 int32_t val;
4218 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4219 return val;
4220}
4221
4222int64_t helper_fisttll_ST0(void)
4223{
4224 int64_t val;
4225 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4226 return val;
4227}
4228
4229void helper_fldt_ST0(target_ulong ptr)
4230{
4231 int new_fpstt;
4232 new_fpstt = (env->fpstt - 1) & 7;
4233 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4234 env->fpstt = new_fpstt;
4235 env->fptags[new_fpstt] = 0; /* validate stack entry */
4236}
4237
4238void helper_fstt_ST0(target_ulong ptr)
4239{
4240 helper_fstt(ST0, ptr);
4241}
4242
4243void helper_fpush(void)
4244{
4245 fpush();
4246}
4247
4248void helper_fpop(void)
4249{
4250 fpop();
4251}
4252
4253void helper_fdecstp(void)
4254{
4255 env->fpstt = (env->fpstt - 1) & 7;
4256 env->fpus &= (~0x4700);
4257}
4258
4259void helper_fincstp(void)
4260{
4261 env->fpstt = (env->fpstt + 1) & 7;
4262 env->fpus &= (~0x4700);
4263}
4264
4265/* FPU move */
4266
4267void helper_ffree_STN(int st_index)
4268{
4269 env->fptags[(env->fpstt + st_index) & 7] = 1;
4270}
4271
4272void helper_fmov_ST0_FT0(void)
4273{
4274 ST0 = FT0;
4275}
4276
4277void helper_fmov_FT0_STN(int st_index)
4278{
4279 FT0 = ST(st_index);
4280}
4281
4282void helper_fmov_ST0_STN(int st_index)
4283{
4284 ST0 = ST(st_index);
4285}
4286
4287void helper_fmov_STN_ST0(int st_index)
4288{
4289 ST(st_index) = ST0;
4290}
4291
4292void helper_fxchg_ST0_STN(int st_index)
4293{
4294 CPU86_LDouble tmp;
4295 tmp = ST(st_index);
4296 ST(st_index) = ST0;
4297 ST0 = tmp;
4298}
4299
4300/* FPU operations */
4301
4302static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4303
4304void helper_fcom_ST0_FT0(void)
4305{
4306 int ret;
4307
4308 ret = floatx_compare(ST0, FT0, &env->fp_status);
4309 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4310 FORCE_RET();
4311}
4312
4313void helper_fucom_ST0_FT0(void)
4314{
4315 int ret;
4316
4317 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4318 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4319 FORCE_RET();
4320}
4321
4322static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4323
4324void helper_fcomi_ST0_FT0(void)
4325{
4326 int eflags;
4327 int ret;
4328
4329 ret = floatx_compare(ST0, FT0, &env->fp_status);
4330 eflags = cc_table[CC_OP].compute_all();
4331 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4332 CC_SRC = eflags;
4333 FORCE_RET();
4334}
4335
4336void helper_fucomi_ST0_FT0(void)
4337{
4338 int eflags;
4339 int ret;
4340
4341 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4342 eflags = cc_table[CC_OP].compute_all();
4343 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4344 CC_SRC = eflags;
4345 FORCE_RET();
4346}
4347
4348void helper_fadd_ST0_FT0(void)
4349{
4350 ST0 += FT0;
4351}
4352
4353void helper_fmul_ST0_FT0(void)
4354{
4355 ST0 *= FT0;
4356}
4357
4358void helper_fsub_ST0_FT0(void)
4359{
4360 ST0 -= FT0;
4361}
4362
4363void helper_fsubr_ST0_FT0(void)
4364{
4365 ST0 = FT0 - ST0;
4366}
4367
4368void helper_fdiv_ST0_FT0(void)
4369{
4370 ST0 = helper_fdiv(ST0, FT0);
4371}
4372
4373void helper_fdivr_ST0_FT0(void)
4374{
4375 ST0 = helper_fdiv(FT0, ST0);
4376}
4377
4378/* fp operations between STN and ST0 */
4379
4380void helper_fadd_STN_ST0(int st_index)
4381{
4382 ST(st_index) += ST0;
4383}
4384
4385void helper_fmul_STN_ST0(int st_index)
4386{
4387 ST(st_index) *= ST0;
4388}
4389
4390void helper_fsub_STN_ST0(int st_index)
4391{
4392 ST(st_index) -= ST0;
4393}
4394
4395void helper_fsubr_STN_ST0(int st_index)
4396{
4397 CPU86_LDouble *p;
4398 p = &ST(st_index);
4399 *p = ST0 - *p;
4400}
4401
4402void helper_fdiv_STN_ST0(int st_index)
4403{
4404 CPU86_LDouble *p;
4405 p = &ST(st_index);
4406 *p = helper_fdiv(*p, ST0);
4407}
4408
4409void helper_fdivr_STN_ST0(int st_index)
4410{
4411 CPU86_LDouble *p;
4412 p = &ST(st_index);
4413 *p = helper_fdiv(ST0, *p);
4414}
4415
4416/* misc FPU operations */
4417void helper_fchs_ST0(void)
4418{
4419 ST0 = floatx_chs(ST0);
4420}
4421
4422void helper_fabs_ST0(void)
4423{
4424 ST0 = floatx_abs(ST0);
4425}
4426
4427void helper_fld1_ST0(void)
4428{
4429 ST0 = f15rk[1];
4430}
4431
4432void helper_fldl2t_ST0(void)
4433{
4434 ST0 = f15rk[6];
4435}
4436
4437void helper_fldl2e_ST0(void)
4438{
4439 ST0 = f15rk[5];
4440}
4441
4442void helper_fldpi_ST0(void)
4443{
4444 ST0 = f15rk[2];
4445}
4446
4447void helper_fldlg2_ST0(void)
4448{
4449 ST0 = f15rk[3];
4450}
4451
4452void helper_fldln2_ST0(void)
4453{
4454 ST0 = f15rk[4];
4455}
4456
4457void helper_fldz_ST0(void)
4458{
4459 ST0 = f15rk[0];
4460}
4461
4462void helper_fldz_FT0(void)
4463{
4464 FT0 = f15rk[0];
4465}
4466
4467#ifndef VBOX
4468uint32_t helper_fnstsw(void)
4469#else
4470RTCCUINTREG helper_fnstsw(void)
4471#endif
4472{
4473 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4474}
4475
4476#ifndef VBOX
4477uint32_t helper_fnstcw(void)
4478#else
4479RTCCUINTREG helper_fnstcw(void)
4480#endif
4481{
4482 return env->fpuc;
4483}
4484
4485static void update_fp_status(void)
4486{
4487 int rnd_type;
4488
4489 /* set rounding mode */
4490 switch(env->fpuc & RC_MASK) {
4491 default:
4492 case RC_NEAR:
4493 rnd_type = float_round_nearest_even;
4494 break;
4495 case RC_DOWN:
4496 rnd_type = float_round_down;
4497 break;
4498 case RC_UP:
4499 rnd_type = float_round_up;
4500 break;
4501 case RC_CHOP:
4502 rnd_type = float_round_to_zero;
4503 break;
4504 }
4505 set_float_rounding_mode(rnd_type, &env->fp_status);
4506#ifdef FLOATX80
4507 switch((env->fpuc >> 8) & 3) {
4508 case 0:
4509 rnd_type = 32;
4510 break;
4511 case 2:
4512 rnd_type = 64;
4513 break;
4514 case 3:
4515 default:
4516 rnd_type = 80;
4517 break;
4518 }
4519 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4520#endif
4521}
4522
4523void helper_fldcw(uint32_t val)
4524{
4525 env->fpuc = val;
4526 update_fp_status();
4527}
4528
4529void helper_fclex(void)
4530{
4531 env->fpus &= 0x7f00;
4532}
4533
4534void helper_fwait(void)
4535{
4536 if (env->fpus & FPUS_SE)
4537 fpu_raise_exception();
4538 FORCE_RET();
4539}
4540
4541void helper_fninit(void)
4542{
4543 env->fpus = 0;
4544 env->fpstt = 0;
4545 env->fpuc = 0x37f;
4546 env->fptags[0] = 1;
4547 env->fptags[1] = 1;
4548 env->fptags[2] = 1;
4549 env->fptags[3] = 1;
4550 env->fptags[4] = 1;
4551 env->fptags[5] = 1;
4552 env->fptags[6] = 1;
4553 env->fptags[7] = 1;
4554}
4555
4556/* BCD ops */
4557
4558void helper_fbld_ST0(target_ulong ptr)
4559{
4560 CPU86_LDouble tmp;
4561 uint64_t val;
4562 unsigned int v;
4563 int i;
4564
4565 val = 0;
4566 for(i = 8; i >= 0; i--) {
4567 v = ldub(ptr + i);
4568 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4569 }
4570 tmp = val;
4571 if (ldub(ptr + 9) & 0x80)
4572 tmp = -tmp;
4573 fpush();
4574 ST0 = tmp;
4575}
4576
4577void helper_fbst_ST0(target_ulong ptr)
4578{
4579 int v;
4580 target_ulong mem_ref, mem_end;
4581 int64_t val;
4582
4583 val = floatx_to_int64(ST0, &env->fp_status);
4584 mem_ref = ptr;
4585 mem_end = mem_ref + 9;
4586 if (val < 0) {
4587 stb(mem_end, 0x80);
4588 val = -val;
4589 } else {
4590 stb(mem_end, 0x00);
4591 }
4592 while (mem_ref < mem_end) {
4593 if (val == 0)
4594 break;
4595 v = val % 100;
4596 val = val / 100;
4597 v = ((v / 10) << 4) | (v % 10);
4598 stb(mem_ref++, v);
4599 }
4600 while (mem_ref < mem_end) {
4601 stb(mem_ref++, 0);
4602 }
4603}
4604
4605void helper_f2xm1(void)
4606{
4607 ST0 = pow(2.0,ST0) - 1.0;
4608}
4609
4610void helper_fyl2x(void)
4611{
4612 CPU86_LDouble fptemp;
4613
4614 fptemp = ST0;
4615 if (fptemp>0.0){
4616 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4617 ST1 *= fptemp;
4618 fpop();
4619 } else {
4620 env->fpus &= (~0x4700);
4621 env->fpus |= 0x400;
4622 }
4623}
4624
4625void helper_fptan(void)
4626{
4627 CPU86_LDouble fptemp;
4628
4629 fptemp = ST0;
4630 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4631 env->fpus |= 0x400;
4632 } else {
4633 ST0 = tan(fptemp);
4634 fpush();
4635 ST0 = 1.0;
4636 env->fpus &= (~0x400); /* C2 <-- 0 */
4637 /* the above code is for |arg| < 2**52 only */
4638 }
4639}
4640
4641void helper_fpatan(void)
4642{
4643 CPU86_LDouble fptemp, fpsrcop;
4644
4645 fpsrcop = ST1;
4646 fptemp = ST0;
4647 ST1 = atan2(fpsrcop,fptemp);
4648 fpop();
4649}
4650
4651void helper_fxtract(void)
4652{
4653 CPU86_LDoubleU temp;
4654 unsigned int expdif;
4655
4656 temp.d = ST0;
4657 expdif = EXPD(temp) - EXPBIAS;
4658 /*DP exponent bias*/
4659 ST0 = expdif;
4660 fpush();
4661 BIASEXPONENT(temp);
4662 ST0 = temp.d;
4663}
4664
4665#ifdef VBOX
4666#ifdef _MSC_VER
4667/* MSC cannot divide by zero */
4668extern double _Nan;
4669#define NaN _Nan
4670#else
4671#define NaN (0.0 / 0.0)
4672#endif
4673#endif /* VBOX */
4674
4675void helper_fprem1(void)
4676{
4677 CPU86_LDouble dblq, fpsrcop, fptemp;
4678 CPU86_LDoubleU fpsrcop1, fptemp1;
4679 int expdif;
4680 signed long long int q;
4681
4682#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4683 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4684#else
4685 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4686#endif
4687 ST0 = 0.0 / 0.0; /* NaN */
4688 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4689 return;
4690 }
4691
4692 fpsrcop = ST0;
4693 fptemp = ST1;
4694 fpsrcop1.d = fpsrcop;
4695 fptemp1.d = fptemp;
4696 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4697
4698 if (expdif < 0) {
4699 /* optimisation? taken from the AMD docs */
4700 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4701 /* ST0 is unchanged */
4702 return;
4703 }
4704
4705 if (expdif < 53) {
4706 dblq = fpsrcop / fptemp;
4707 /* round dblq towards nearest integer */
4708 dblq = rint(dblq);
4709 ST0 = fpsrcop - fptemp * dblq;
4710
4711 /* convert dblq to q by truncating towards zero */
4712 if (dblq < 0.0)
4713 q = (signed long long int)(-dblq);
4714 else
4715 q = (signed long long int)dblq;
4716
4717 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4718 /* (C0,C3,C1) <-- (q2,q1,q0) */
4719 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4720 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4721 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4722 } else {
4723 env->fpus |= 0x400; /* C2 <-- 1 */
4724 fptemp = pow(2.0, expdif - 50);
4725 fpsrcop = (ST0 / ST1) / fptemp;
4726 /* fpsrcop = integer obtained by chopping */
4727 fpsrcop = (fpsrcop < 0.0) ?
4728 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4729 ST0 -= (ST1 * fpsrcop * fptemp);
4730 }
4731}
4732
4733void helper_fprem(void)
4734{
4735 CPU86_LDouble dblq, fpsrcop, fptemp;
4736 CPU86_LDoubleU fpsrcop1, fptemp1;
4737 int expdif;
4738 signed long long int q;
4739
4740#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4741 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4742#else
4743 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4744#endif
4745 ST0 = 0.0 / 0.0; /* NaN */
4746 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4747 return;
4748 }
4749
4750 fpsrcop = (CPU86_LDouble)ST0;
4751 fptemp = (CPU86_LDouble)ST1;
4752 fpsrcop1.d = fpsrcop;
4753 fptemp1.d = fptemp;
4754 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4755
4756 if (expdif < 0) {
4757 /* optimisation? taken from the AMD docs */
4758 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4759 /* ST0 is unchanged */
4760 return;
4761 }
4762
4763 if ( expdif < 53 ) {
4764 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4765 /* round dblq towards zero */
4766 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4767 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4768
4769 /* convert dblq to q by truncating towards zero */
4770 if (dblq < 0.0)
4771 q = (signed long long int)(-dblq);
4772 else
4773 q = (signed long long int)dblq;
4774
4775 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4776 /* (C0,C3,C1) <-- (q2,q1,q0) */
4777 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4778 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4779 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4780 } else {
4781 int N = 32 + (expdif % 32); /* as per AMD docs */
4782 env->fpus |= 0x400; /* C2 <-- 1 */
4783 fptemp = pow(2.0, (double)(expdif - N));
4784 fpsrcop = (ST0 / ST1) / fptemp;
4785 /* fpsrcop = integer obtained by chopping */
4786 fpsrcop = (fpsrcop < 0.0) ?
4787 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4788 ST0 -= (ST1 * fpsrcop * fptemp);
4789 }
4790}
4791
4792void helper_fyl2xp1(void)
4793{
4794 CPU86_LDouble fptemp;
4795
4796 fptemp = ST0;
4797 if ((fptemp+1.0)>0.0) {
4798 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4799 ST1 *= fptemp;
4800 fpop();
4801 } else {
4802 env->fpus &= (~0x4700);
4803 env->fpus |= 0x400;
4804 }
4805}
4806
4807void helper_fsqrt(void)
4808{
4809 CPU86_LDouble fptemp;
4810
4811 fptemp = ST0;
4812 if (fptemp<0.0) {
4813 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4814 env->fpus |= 0x400;
4815 }
4816 ST0 = sqrt(fptemp);
4817}
4818
4819void helper_fsincos(void)
4820{
4821 CPU86_LDouble fptemp;
4822
4823 fptemp = ST0;
4824 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4825 env->fpus |= 0x400;
4826 } else {
4827 ST0 = sin(fptemp);
4828 fpush();
4829 ST0 = cos(fptemp);
4830 env->fpus &= (~0x400); /* C2 <-- 0 */
4831 /* the above code is for |arg| < 2**63 only */
4832 }
4833}
4834
4835void helper_frndint(void)
4836{
4837 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4838}
4839
4840void helper_fscale(void)
4841{
4842 ST0 = ldexp (ST0, (int)(ST1));
4843}
4844
4845void helper_fsin(void)
4846{
4847 CPU86_LDouble fptemp;
4848
4849 fptemp = ST0;
4850 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4851 env->fpus |= 0x400;
4852 } else {
4853 ST0 = sin(fptemp);
4854 env->fpus &= (~0x400); /* C2 <-- 0 */
4855 /* the above code is for |arg| < 2**53 only */
4856 }
4857}
4858
4859void helper_fcos(void)
4860{
4861 CPU86_LDouble fptemp;
4862
4863 fptemp = ST0;
4864 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4865 env->fpus |= 0x400;
4866 } else {
4867 ST0 = cos(fptemp);
4868 env->fpus &= (~0x400); /* C2 <-- 0 */
4869 /* the above code is for |arg5 < 2**63 only */
4870 }
4871}
4872
4873void helper_fxam_ST0(void)
4874{
4875 CPU86_LDoubleU temp;
4876 int expdif;
4877
4878 temp.d = ST0;
4879
4880 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4881 if (SIGND(temp))
4882 env->fpus |= 0x200; /* C1 <-- 1 */
4883
4884 /* XXX: test fptags too */
4885 expdif = EXPD(temp);
4886 if (expdif == MAXEXPD) {
4887#ifdef USE_X86LDOUBLE
4888 if (MANTD(temp) == 0x8000000000000000ULL)
4889#else
4890 if (MANTD(temp) == 0)
4891#endif
4892 env->fpus |= 0x500 /*Infinity*/;
4893 else
4894 env->fpus |= 0x100 /*NaN*/;
4895 } else if (expdif == 0) {
4896 if (MANTD(temp) == 0)
4897 env->fpus |= 0x4000 /*Zero*/;
4898 else
4899 env->fpus |= 0x4400 /*Denormal*/;
4900 } else {
4901 env->fpus |= 0x400;
4902 }
4903}
4904
4905void helper_fstenv(target_ulong ptr, int data32)
4906{
4907 int fpus, fptag, exp, i;
4908 uint64_t mant;
4909 CPU86_LDoubleU tmp;
4910
4911 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4912 fptag = 0;
4913 for (i=7; i>=0; i--) {
4914 fptag <<= 2;
4915 if (env->fptags[i]) {
4916 fptag |= 3;
4917 } else {
4918 tmp.d = env->fpregs[i].d;
4919 exp = EXPD(tmp);
4920 mant = MANTD(tmp);
4921 if (exp == 0 && mant == 0) {
4922 /* zero */
4923 fptag |= 1;
4924 } else if (exp == 0 || exp == MAXEXPD
4925#ifdef USE_X86LDOUBLE
4926 || (mant & (1LL << 63)) == 0
4927#endif
4928 ) {
4929 /* NaNs, infinity, denormal */
4930 fptag |= 2;
4931 }
4932 }
4933 }
4934 if (data32) {
4935 /* 32 bit */
4936 stl(ptr, env->fpuc);
4937 stl(ptr + 4, fpus);
4938 stl(ptr + 8, fptag);
4939 stl(ptr + 12, 0); /* fpip */
4940 stl(ptr + 16, 0); /* fpcs */
4941 stl(ptr + 20, 0); /* fpoo */
4942 stl(ptr + 24, 0); /* fpos */
4943 } else {
4944 /* 16 bit */
4945 stw(ptr, env->fpuc);
4946 stw(ptr + 2, fpus);
4947 stw(ptr + 4, fptag);
4948 stw(ptr + 6, 0);
4949 stw(ptr + 8, 0);
4950 stw(ptr + 10, 0);
4951 stw(ptr + 12, 0);
4952 }
4953}
4954
4955void helper_fldenv(target_ulong ptr, int data32)
4956{
4957 int i, fpus, fptag;
4958
4959 if (data32) {
4960 env->fpuc = lduw(ptr);
4961 fpus = lduw(ptr + 4);
4962 fptag = lduw(ptr + 8);
4963 }
4964 else {
4965 env->fpuc = lduw(ptr);
4966 fpus = lduw(ptr + 2);
4967 fptag = lduw(ptr + 4);
4968 }
4969 env->fpstt = (fpus >> 11) & 7;
4970 env->fpus = fpus & ~0x3800;
4971 for(i = 0;i < 8; i++) {
4972 env->fptags[i] = ((fptag & 3) == 3);
4973 fptag >>= 2;
4974 }
4975}
4976
4977void helper_fsave(target_ulong ptr, int data32)
4978{
4979 CPU86_LDouble tmp;
4980 int i;
4981
4982 helper_fstenv(ptr, data32);
4983
4984 ptr += (14 << data32);
4985 for(i = 0;i < 8; i++) {
4986 tmp = ST(i);
4987 helper_fstt(tmp, ptr);
4988 ptr += 10;
4989 }
4990
4991 /* fninit */
4992 env->fpus = 0;
4993 env->fpstt = 0;
4994 env->fpuc = 0x37f;
4995 env->fptags[0] = 1;
4996 env->fptags[1] = 1;
4997 env->fptags[2] = 1;
4998 env->fptags[3] = 1;
4999 env->fptags[4] = 1;
5000 env->fptags[5] = 1;
5001 env->fptags[6] = 1;
5002 env->fptags[7] = 1;
5003}
5004
5005void helper_frstor(target_ulong ptr, int data32)
5006{
5007 CPU86_LDouble tmp;
5008 int i;
5009
5010 helper_fldenv(ptr, data32);
5011 ptr += (14 << data32);
5012
5013 for(i = 0;i < 8; i++) {
5014 tmp = helper_fldt(ptr);
5015 ST(i) = tmp;
5016 ptr += 10;
5017 }
5018}
5019
5020void helper_fxsave(target_ulong ptr, int data64)
5021{
5022 int fpus, fptag, i, nb_xmm_regs;
5023 CPU86_LDouble tmp;
5024 target_ulong addr;
5025
5026 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5027 fptag = 0;
5028 for(i = 0; i < 8; i++) {
5029 fptag |= (env->fptags[i] << i);
5030 }
5031 stw(ptr, env->fpuc);
5032 stw(ptr + 2, fpus);
5033 stw(ptr + 4, fptag ^ 0xff);
5034#ifdef TARGET_X86_64
5035 if (data64) {
5036 stq(ptr + 0x08, 0); /* rip */
5037 stq(ptr + 0x10, 0); /* rdp */
5038 } else
5039#endif
5040 {
5041 stl(ptr + 0x08, 0); /* eip */
5042 stl(ptr + 0x0c, 0); /* sel */
5043 stl(ptr + 0x10, 0); /* dp */
5044 stl(ptr + 0x14, 0); /* sel */
5045 }
5046
5047 addr = ptr + 0x20;
5048 for(i = 0;i < 8; i++) {
5049 tmp = ST(i);
5050 helper_fstt(tmp, addr);
5051 addr += 16;
5052 }
5053
5054 if (env->cr[4] & CR4_OSFXSR_MASK) {
5055 /* XXX: finish it */
5056 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5057 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5058 if (env->hflags & HF_CS64_MASK)
5059 nb_xmm_regs = 16;
5060 else
5061 nb_xmm_regs = 8;
5062 addr = ptr + 0xa0;
5063 for(i = 0; i < nb_xmm_regs; i++) {
5064 stq(addr, env->xmm_regs[i].XMM_Q(0));
5065 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5066 addr += 16;
5067 }
5068 }
5069}
5070
5071void helper_fxrstor(target_ulong ptr, int data64)
5072{
5073 int i, fpus, fptag, nb_xmm_regs;
5074 CPU86_LDouble tmp;
5075 target_ulong addr;
5076
5077 env->fpuc = lduw(ptr);
5078 fpus = lduw(ptr + 2);
5079 fptag = lduw(ptr + 4);
5080 env->fpstt = (fpus >> 11) & 7;
5081 env->fpus = fpus & ~0x3800;
5082 fptag ^= 0xff;
5083 for(i = 0;i < 8; i++) {
5084 env->fptags[i] = ((fptag >> i) & 1);
5085 }
5086
5087 addr = ptr + 0x20;
5088 for(i = 0;i < 8; i++) {
5089 tmp = helper_fldt(addr);
5090 ST(i) = tmp;
5091 addr += 16;
5092 }
5093
5094 if (env->cr[4] & CR4_OSFXSR_MASK) {
5095 /* XXX: finish it */
5096 env->mxcsr = ldl(ptr + 0x18);
5097 //ldl(ptr + 0x1c);
5098 if (env->hflags & HF_CS64_MASK)
5099 nb_xmm_regs = 16;
5100 else
5101 nb_xmm_regs = 8;
5102 addr = ptr + 0xa0;
5103 for(i = 0; i < nb_xmm_regs; i++) {
5104#if !defined(VBOX) || __GNUC__ < 4
5105 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5106 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5107#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5108# if 1
5109 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5110 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5111 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5112 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5113# else
5114 /* this works fine on Mac OS X, gcc 4.0.1 */
5115 uint64_t u64 = ldq(addr);
5116 env->xmm_regs[i].XMM_Q(0);
5117 u64 = ldq(addr + 4);
5118 env->xmm_regs[i].XMM_Q(1) = u64;
5119# endif
5120#endif
5121 addr += 16;
5122 }
5123 }
5124}
5125
5126#ifndef USE_X86LDOUBLE
5127
5128void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5129{
5130 CPU86_LDoubleU temp;
5131 int e;
5132
5133 temp.d = f;
5134 /* mantissa */
5135 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5136 /* exponent + sign */
5137 e = EXPD(temp) - EXPBIAS + 16383;
5138 e |= SIGND(temp) >> 16;
5139 *pexp = e;
5140}
5141
5142CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5143{
5144 CPU86_LDoubleU temp;
5145 int e;
5146 uint64_t ll;
5147
5148 /* XXX: handle overflow ? */
5149 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5150 e |= (upper >> 4) & 0x800; /* sign */
5151 ll = (mant >> 11) & ((1LL << 52) - 1);
5152#ifdef __arm__
5153 temp.l.upper = (e << 20) | (ll >> 32);
5154 temp.l.lower = ll;
5155#else
5156 temp.ll = ll | ((uint64_t)e << 52);
5157#endif
5158 return temp.d;
5159}
5160
5161#else
5162
5163void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5164{
5165 CPU86_LDoubleU temp;
5166
5167 temp.d = f;
5168 *pmant = temp.l.lower;
5169 *pexp = temp.l.upper;
5170}
5171
5172CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5173{
5174 CPU86_LDoubleU temp;
5175
5176 temp.l.upper = upper;
5177 temp.l.lower = mant;
5178 return temp.d;
5179}
5180#endif
5181
5182#ifdef TARGET_X86_64
5183
5184//#define DEBUG_MULDIV
5185
5186static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5187{
5188 *plow += a;
5189 /* carry test */
5190 if (*plow < a)
5191 (*phigh)++;
5192 *phigh += b;
5193}
5194
5195static void neg128(uint64_t *plow, uint64_t *phigh)
5196{
5197 *plow = ~ *plow;
5198 *phigh = ~ *phigh;
5199 add128(plow, phigh, 1, 0);
5200}
5201
5202/* return TRUE if overflow */
5203static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5204{
5205 uint64_t q, r, a1, a0;
5206 int i, qb, ab;
5207
5208 a0 = *plow;
5209 a1 = *phigh;
5210 if (a1 == 0) {
5211 q = a0 / b;
5212 r = a0 % b;
5213 *plow = q;
5214 *phigh = r;
5215 } else {
5216 if (a1 >= b)
5217 return 1;
5218 /* XXX: use a better algorithm */
5219 for(i = 0; i < 64; i++) {
5220 ab = a1 >> 63;
5221 a1 = (a1 << 1) | (a0 >> 63);
5222 if (ab || a1 >= b) {
5223 a1 -= b;
5224 qb = 1;
5225 } else {
5226 qb = 0;
5227 }
5228 a0 = (a0 << 1) | qb;
5229 }
5230#if defined(DEBUG_MULDIV)
5231 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5232 *phigh, *plow, b, a0, a1);
5233#endif
5234 *plow = a0;
5235 *phigh = a1;
5236 }
5237 return 0;
5238}
5239
5240/* return TRUE if overflow */
5241static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5242{
5243 int sa, sb;
5244 sa = ((int64_t)*phigh < 0);
5245 if (sa)
5246 neg128(plow, phigh);
5247 sb = (b < 0);
5248 if (sb)
5249 b = -b;
5250 if (div64(plow, phigh, b) != 0)
5251 return 1;
5252 if (sa ^ sb) {
5253 if (*plow > (1ULL << 63))
5254 return 1;
5255 *plow = - *plow;
5256 } else {
5257 if (*plow >= (1ULL << 63))
5258 return 1;
5259 }
5260 if (sa)
5261 *phigh = - *phigh;
5262 return 0;
5263}
5264
5265void helper_mulq_EAX_T0(target_ulong t0)
5266{
5267 uint64_t r0, r1;
5268
5269 mulu64(&r0, &r1, EAX, t0);
5270 EAX = r0;
5271 EDX = r1;
5272 CC_DST = r0;
5273 CC_SRC = r1;
5274}
5275
5276void helper_imulq_EAX_T0(target_ulong t0)
5277{
5278 uint64_t r0, r1;
5279
5280 muls64(&r0, &r1, EAX, t0);
5281 EAX = r0;
5282 EDX = r1;
5283 CC_DST = r0;
5284 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5285}
5286
5287target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5288{
5289 uint64_t r0, r1;
5290
5291 muls64(&r0, &r1, t0, t1);
5292 CC_DST = r0;
5293 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5294 return r0;
5295}
5296
5297void helper_divq_EAX(target_ulong t0)
5298{
5299 uint64_t r0, r1;
5300 if (t0 == 0) {
5301 raise_exception(EXCP00_DIVZ);
5302 }
5303 r0 = EAX;
5304 r1 = EDX;
5305 if (div64(&r0, &r1, t0))
5306 raise_exception(EXCP00_DIVZ);
5307 EAX = r0;
5308 EDX = r1;
5309}
5310
5311void helper_idivq_EAX(target_ulong t0)
5312{
5313 uint64_t r0, r1;
5314 if (t0 == 0) {
5315 raise_exception(EXCP00_DIVZ);
5316 }
5317 r0 = EAX;
5318 r1 = EDX;
5319 if (idiv64(&r0, &r1, t0))
5320 raise_exception(EXCP00_DIVZ);
5321 EAX = r0;
5322 EDX = r1;
5323}
5324#endif
5325
5326static void do_hlt(void)
5327{
5328 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5329 env->halted = 1;
5330 env->exception_index = EXCP_HLT;
5331 cpu_loop_exit();
5332}
5333
5334void helper_hlt(int next_eip_addend)
5335{
5336 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5337 EIP += next_eip_addend;
5338
5339 do_hlt();
5340}
5341
5342void helper_monitor(target_ulong ptr)
5343{
5344 if ((uint32_t)ECX != 0)
5345 raise_exception(EXCP0D_GPF);
5346 /* XXX: store address ? */
5347 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5348}
5349
5350void helper_mwait(int next_eip_addend)
5351{
5352 if ((uint32_t)ECX != 0)
5353 raise_exception(EXCP0D_GPF);
5354#ifdef VBOX
5355 helper_hlt(next_eip_addend);
5356#else
5357 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5358 EIP += next_eip_addend;
5359
5360 /* XXX: not complete but not completely erroneous */
5361 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5362 /* more than one CPU: do not sleep because another CPU may
5363 wake this one */
5364 } else {
5365 do_hlt();
5366 }
5367#endif
5368}
5369
5370void helper_debug(void)
5371{
5372 env->exception_index = EXCP_DEBUG;
5373 cpu_loop_exit();
5374}
5375
5376void helper_raise_interrupt(int intno, int next_eip_addend)
5377{
5378 raise_interrupt(intno, 1, 0, next_eip_addend);
5379}
5380
5381void helper_raise_exception(int exception_index)
5382{
5383 raise_exception(exception_index);
5384}
5385
5386void helper_cli(void)
5387{
5388 env->eflags &= ~IF_MASK;
5389}
5390
5391void helper_sti(void)
5392{
5393 env->eflags |= IF_MASK;
5394}
5395
5396#ifdef VBOX
5397void helper_cli_vme(void)
5398{
5399 env->eflags &= ~VIF_MASK;
5400}
5401
5402void helper_sti_vme(void)
5403{
5404 /* First check, then change eflags according to the AMD manual */
5405 if (env->eflags & VIP_MASK) {
5406 raise_exception(EXCP0D_GPF);
5407 }
5408 env->eflags |= VIF_MASK;
5409}
5410#endif
5411
5412#if 0
5413/* vm86plus instructions */
5414void helper_cli_vm(void)
5415{
5416 env->eflags &= ~VIF_MASK;
5417}
5418
5419void helper_sti_vm(void)
5420{
5421 env->eflags |= VIF_MASK;
5422 if (env->eflags & VIP_MASK) {
5423 raise_exception(EXCP0D_GPF);
5424 }
5425}
5426#endif
5427
5428void helper_set_inhibit_irq(void)
5429{
5430 env->hflags |= HF_INHIBIT_IRQ_MASK;
5431}
5432
5433void helper_reset_inhibit_irq(void)
5434{
5435 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5436}
5437
5438void helper_boundw(target_ulong a0, int v)
5439{
5440 int low, high;
5441 low = ldsw(a0);
5442 high = ldsw(a0 + 2);
5443 v = (int16_t)v;
5444 if (v < low || v > high) {
5445 raise_exception(EXCP05_BOUND);
5446 }
5447 FORCE_RET();
5448}
5449
5450void helper_boundl(target_ulong a0, int v)
5451{
5452 int low, high;
5453 low = ldl(a0);
5454 high = ldl(a0 + 4);
5455 if (v < low || v > high) {
5456 raise_exception(EXCP05_BOUND);
5457 }
5458 FORCE_RET();
5459}
5460
5461static float approx_rsqrt(float a)
5462{
5463 return 1.0 / sqrt(a);
5464}
5465
5466static float approx_rcp(float a)
5467{
5468 return 1.0 / a;
5469}
5470
5471#if !defined(CONFIG_USER_ONLY)
5472
5473#define MMUSUFFIX _mmu
5474
5475#define SHIFT 0
5476#include "softmmu_template.h"
5477
5478#define SHIFT 1
5479#include "softmmu_template.h"
5480
5481#define SHIFT 2
5482#include "softmmu_template.h"
5483
5484#define SHIFT 3
5485#include "softmmu_template.h"
5486
5487#endif
5488
5489#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5490/* This code assumes real physical address always fit into host CPU reg,
5491 which is wrong in general, but true for our current use cases. */
5492RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5493{
5494 return remR3PhysReadS8(addr);
5495}
5496RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5497{
5498 return remR3PhysReadU8(addr);
5499}
5500void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5501{
5502 remR3PhysWriteU8(addr, val);
5503}
5504RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5505{
5506 return remR3PhysReadS16(addr);
5507}
5508RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5509{
5510 return remR3PhysReadU16(addr);
5511}
5512void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5513{
5514 remR3PhysWriteU16(addr, val);
5515}
5516RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5517{
5518 return remR3PhysReadS32(addr);
5519}
5520RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5521{
5522 return remR3PhysReadU32(addr);
5523}
5524void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5525{
5526 remR3PhysWriteU32(addr, val);
5527}
5528uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5529{
5530 return remR3PhysReadU64(addr);
5531}
5532void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5533{
5534 remR3PhysWriteU64(addr, val);
5535}
5536#endif
5537
5538/* try to fill the TLB and return an exception if error. If retaddr is
5539 NULL, it means that the function was called in C code (i.e. not
5540 from generated code or from helper.c) */
5541/* XXX: fix it to restore all registers */
5542void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5543{
5544 TranslationBlock *tb;
5545 int ret;
5546 unsigned long pc;
5547 CPUX86State *saved_env;
5548
5549 /* XXX: hack to restore env in all cases, even if not called from
5550 generated code */
5551 saved_env = env;
5552 env = cpu_single_env;
5553
5554 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5555 if (ret) {
5556 if (retaddr) {
5557 /* now we have a real cpu fault */
5558 pc = (unsigned long)retaddr;
5559 tb = tb_find_pc(pc);
5560 if (tb) {
5561 /* the PC is inside the translated code. It means that we have
5562 a virtual CPU fault */
5563 cpu_restore_state(tb, env, pc, NULL);
5564 }
5565 }
5566 raise_exception_err(env->exception_index, env->error_code);
5567 }
5568 env = saved_env;
5569}
5570
5571#ifdef VBOX
5572
5573/**
5574 * Correctly computes the eflags.
5575 * @returns eflags.
5576 * @param env1 CPU environment.
5577 */
5578uint32_t raw_compute_eflags(CPUX86State *env1)
5579{
5580 CPUX86State *savedenv = env;
5581 uint32_t efl;
5582 env = env1;
5583 efl = compute_eflags();
5584 env = savedenv;
5585 return efl;
5586}
5587
5588/**
5589 * Reads byte from virtual address in guest memory area.
5590 * XXX: is it working for any addresses? swapped out pages?
5591 * @returns readed data byte.
5592 * @param env1 CPU environment.
5593 * @param pvAddr GC Virtual address.
5594 */
5595uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5596{
5597 CPUX86State *savedenv = env;
5598 uint8_t u8;
5599 env = env1;
5600 u8 = ldub_kernel(addr);
5601 env = savedenv;
5602 return u8;
5603}
5604
5605/**
5606 * Reads byte from virtual address in guest memory area.
5607 * XXX: is it working for any addresses? swapped out pages?
5608 * @returns readed data byte.
5609 * @param env1 CPU environment.
5610 * @param pvAddr GC Virtual address.
5611 */
5612uint16_t read_word(CPUX86State *env1, target_ulong addr)
5613{
5614 CPUX86State *savedenv = env;
5615 uint16_t u16;
5616 env = env1;
5617 u16 = lduw_kernel(addr);
5618 env = savedenv;
5619 return u16;
5620}
5621
5622/**
5623 * Reads byte from virtual address in guest memory area.
5624 * XXX: is it working for any addresses? swapped out pages?
5625 * @returns readed data byte.
5626 * @param env1 CPU environment.
5627 * @param pvAddr GC Virtual address.
5628 */
5629uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5630{
5631 CPUX86State *savedenv = env;
5632 uint32_t u32;
5633 env = env1;
5634 u32 = ldl_kernel(addr);
5635 env = savedenv;
5636 return u32;
5637}
5638
5639/**
5640 * Writes byte to virtual address in guest memory area.
5641 * XXX: is it working for any addresses? swapped out pages?
5642 * @returns readed data byte.
5643 * @param env1 CPU environment.
5644 * @param pvAddr GC Virtual address.
5645 * @param val byte value
5646 */
5647void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5648{
5649 CPUX86State *savedenv = env;
5650 env = env1;
5651 stb(addr, val);
5652 env = savedenv;
5653}
5654
5655void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5656{
5657 CPUX86State *savedenv = env;
5658 env = env1;
5659 stw(addr, val);
5660 env = savedenv;
5661}
5662
5663void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5664{
5665 CPUX86State *savedenv = env;
5666 env = env1;
5667 stl(addr, val);
5668 env = savedenv;
5669}
5670
5671/**
5672 * Correctly loads selector into segment register with updating internal
5673 * qemu data/caches.
5674 * @param env1 CPU environment.
5675 * @param seg_reg Segment register.
5676 * @param selector Selector to load.
5677 */
5678void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5679{
5680 CPUX86State *savedenv = env;
5681 jmp_buf old_buf;
5682
5683 env = env1;
5684
5685 if ( env->eflags & X86_EFL_VM
5686 || !(env->cr[0] & X86_CR0_PE))
5687 {
5688 load_seg_vm(seg_reg, selector);
5689
5690 env = savedenv;
5691
5692 /* Successful sync. */
5693 env1->segs[seg_reg].newselector = 0;
5694 }
5695 else
5696 {
5697 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5698 time critical - let's not do that */
5699#ifdef FORCE_SEGMENT_SYNC
5700 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5701#endif
5702 if (setjmp(env1->jmp_env) == 0)
5703 {
5704 if (seg_reg == R_CS)
5705 {
5706 uint32_t e1, e2;
5707 e1 = e2 = 0;
5708 load_segment(&e1, &e2, selector);
5709 cpu_x86_load_seg_cache(env, R_CS, selector,
5710 get_seg_base(e1, e2),
5711 get_seg_limit(e1, e2),
5712 e2);
5713 }
5714 else
5715 helper_load_seg(seg_reg, selector);
5716 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5717 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5718
5719 env = savedenv;
5720
5721 /* Successful sync. */
5722 env1->segs[seg_reg].newselector = 0;
5723 }
5724 else
5725 {
5726 env = savedenv;
5727
5728 /* Postpone sync until the guest uses the selector. */
5729 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5730 env1->segs[seg_reg].newselector = selector;
5731 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5732 env1->exception_index = -1;
5733 env1->error_code = 0;
5734 env1->old_exception = -1;
5735 }
5736#ifdef FORCE_SEGMENT_SYNC
5737 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5738#endif
5739 }
5740
5741}
5742
5743DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5744{
5745 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5746}
5747
5748
5749int emulate_single_instr(CPUX86State *env1)
5750{
5751 TranslationBlock *tb;
5752 TranslationBlock *current;
5753 int flags;
5754 uint8_t *tc_ptr;
5755 target_ulong old_eip;
5756
5757 /* ensures env is loaded! */
5758 CPUX86State *savedenv = env;
5759 env = env1;
5760
5761 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5762
5763 current = env->current_tb;
5764 env->current_tb = NULL;
5765 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5766
5767 /*
5768 * Translate only one instruction.
5769 */
5770 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5771 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5772 env->segs[R_CS].base, flags, 0);
5773
5774 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5775
5776
5777 /* tb_link_phys: */
5778 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5779 tb->jmp_next[0] = NULL;
5780 tb->jmp_next[1] = NULL;
5781 Assert(tb->jmp_next[0] == NULL);
5782 Assert(tb->jmp_next[1] == NULL);
5783 if (tb->tb_next_offset[0] != 0xffff)
5784 tb_reset_jump(tb, 0);
5785 if (tb->tb_next_offset[1] != 0xffff)
5786 tb_reset_jump(tb, 1);
5787
5788 /*
5789 * Execute it using emulation
5790 */
5791 old_eip = env->eip;
5792 env->current_tb = tb;
5793
5794 /*
5795 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5796 * perhaps not a very safe hack
5797 */
5798 while(old_eip == env->eip)
5799 {
5800 tc_ptr = tb->tc_ptr;
5801
5802#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5803 int fake_ret;
5804 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5805#else
5806 tcg_qemu_tb_exec(tc_ptr);
5807#endif
5808 /*
5809 * Exit once we detect an external interrupt and interrupts are enabled
5810 */
5811 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5812 ( (env->eflags & IF_MASK) &&
5813 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5814 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5815 {
5816 break;
5817 }
5818 }
5819 env->current_tb = current;
5820
5821 tb_phys_invalidate(tb, -1);
5822 tb_free(tb);
5823/*
5824 Assert(tb->tb_next_offset[0] == 0xffff);
5825 Assert(tb->tb_next_offset[1] == 0xffff);
5826 Assert(tb->tb_next[0] == 0xffff);
5827 Assert(tb->tb_next[1] == 0xffff);
5828 Assert(tb->jmp_next[0] == NULL);
5829 Assert(tb->jmp_next[1] == NULL);
5830 Assert(tb->jmp_first == NULL); */
5831
5832 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5833
5834 /*
5835 * Execute the next instruction when we encounter instruction fusing.
5836 */
5837 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5838 {
5839 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5840 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5841 emulate_single_instr(env);
5842 }
5843
5844 env = savedenv;
5845 return 0;
5846}
5847
5848/**
5849 * Correctly loads a new ldtr selector.
5850 *
5851 * @param env1 CPU environment.
5852 * @param selector Selector to load.
5853 */
5854void sync_ldtr(CPUX86State *env1, int selector)
5855{
5856 CPUX86State *saved_env = env;
5857 if (setjmp(env1->jmp_env) == 0)
5858 {
5859 env = env1;
5860 helper_lldt(selector);
5861 env = saved_env;
5862 }
5863 else
5864 {
5865 env = saved_env;
5866#ifdef VBOX_STRICT
5867 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5868#endif
5869 }
5870}
5871
5872int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5873 uint32_t *esp_ptr, int dpl)
5874{
5875 int type, index, shift;
5876
5877 CPUX86State *savedenv = env;
5878 env = env1;
5879
5880 if (!(env->tr.flags & DESC_P_MASK))
5881 cpu_abort(env, "invalid tss");
5882 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5883 if ((type & 7) != 1)
5884 cpu_abort(env, "invalid tss type %d", type);
5885 shift = type >> 3;
5886 index = (dpl * 4 + 2) << shift;
5887 if (index + (4 << shift) - 1 > env->tr.limit)
5888 {
5889 env = savedenv;
5890 return 0;
5891 }
5892 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5893
5894 if (shift == 0) {
5895 *esp_ptr = lduw_kernel(env->tr.base + index);
5896 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5897 } else {
5898 *esp_ptr = ldl_kernel(env->tr.base + index);
5899 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5900 }
5901
5902 env = savedenv;
5903 return 1;
5904}
5905
5906//*****************************************************************************
5907// Needs to be at the bottom of the file (overriding macros)
5908
5909#ifndef VBOX
5910static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5911#else /* VBOX */
5912DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5913#endif /* VBOX */
5914{
5915 return *(CPU86_LDouble *)ptr;
5916}
5917
5918#ifndef VBOX
5919static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5920#else /* VBOX */
5921DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5922#endif /* VBOX */
5923{
5924 *(CPU86_LDouble *)ptr = f;
5925}
5926
5927#undef stw
5928#undef stl
5929#undef stq
5930#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5931#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5932#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5933
5934//*****************************************************************************
5935void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5936{
5937 int fpus, fptag, i, nb_xmm_regs;
5938 CPU86_LDouble tmp;
5939 uint8_t *addr;
5940 int data64 = !!(env->hflags & HF_LMA_MASK);
5941
5942 if (env->cpuid_features & CPUID_FXSR)
5943 {
5944 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5945 fptag = 0;
5946 for(i = 0; i < 8; i++) {
5947 fptag |= (env->fptags[i] << i);
5948 }
5949 stw(ptr, env->fpuc);
5950 stw(ptr + 2, fpus);
5951 stw(ptr + 4, fptag ^ 0xff);
5952
5953 addr = ptr + 0x20;
5954 for(i = 0;i < 8; i++) {
5955 tmp = ST(i);
5956 helper_fstt_raw(tmp, addr);
5957 addr += 16;
5958 }
5959
5960 if (env->cr[4] & CR4_OSFXSR_MASK) {
5961 /* XXX: finish it */
5962 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5963 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5964 nb_xmm_regs = 8 << data64;
5965 addr = ptr + 0xa0;
5966 for(i = 0; i < nb_xmm_regs; i++) {
5967#if __GNUC__ < 4
5968 stq(addr, env->xmm_regs[i].XMM_Q(0));
5969 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5970#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5971 stl(addr, env->xmm_regs[i].XMM_L(0));
5972 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5973 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5974 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5975#endif
5976 addr += 16;
5977 }
5978 }
5979 }
5980 else
5981 {
5982 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5983 int fptag;
5984
5985 fp->FCW = env->fpuc;
5986 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5987 fptag = 0;
5988 for (i=7; i>=0; i--) {
5989 fptag <<= 2;
5990 if (env->fptags[i]) {
5991 fptag |= 3;
5992 } else {
5993 /* the FPU automatically computes it */
5994 }
5995 }
5996 fp->FTW = fptag;
5997
5998 for(i = 0;i < 8; i++) {
5999 tmp = ST(i);
6000 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6001 }
6002 }
6003}
6004
6005//*****************************************************************************
6006#undef lduw
6007#undef ldl
6008#undef ldq
6009#define lduw(a) *(uint16_t *)(a)
6010#define ldl(a) *(uint32_t *)(a)
6011#define ldq(a) *(uint64_t *)(a)
6012//*****************************************************************************
6013void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6014{
6015 int i, fpus, fptag, nb_xmm_regs;
6016 CPU86_LDouble tmp;
6017 uint8_t *addr;
6018 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6019
6020 if (env->cpuid_features & CPUID_FXSR)
6021 {
6022 env->fpuc = lduw(ptr);
6023 fpus = lduw(ptr + 2);
6024 fptag = lduw(ptr + 4);
6025 env->fpstt = (fpus >> 11) & 7;
6026 env->fpus = fpus & ~0x3800;
6027 fptag ^= 0xff;
6028 for(i = 0;i < 8; i++) {
6029 env->fptags[i] = ((fptag >> i) & 1);
6030 }
6031
6032 addr = ptr + 0x20;
6033 for(i = 0;i < 8; i++) {
6034 tmp = helper_fldt_raw(addr);
6035 ST(i) = tmp;
6036 addr += 16;
6037 }
6038
6039 if (env->cr[4] & CR4_OSFXSR_MASK) {
6040 /* XXX: finish it, endianness */
6041 env->mxcsr = ldl(ptr + 0x18);
6042 //ldl(ptr + 0x1c);
6043 nb_xmm_regs = 8 << data64;
6044 addr = ptr + 0xa0;
6045 for(i = 0; i < nb_xmm_regs; i++) {
6046#if HC_ARCH_BITS == 32
6047 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6048 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6049 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6050 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6051 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6052#else
6053 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6054 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6055#endif
6056 addr += 16;
6057 }
6058 }
6059 }
6060 else
6061 {
6062 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6063 int fptag, j;
6064
6065 env->fpuc = fp->FCW;
6066 env->fpstt = (fp->FSW >> 11) & 7;
6067 env->fpus = fp->FSW & ~0x3800;
6068 fptag = fp->FTW;
6069 for(i = 0;i < 8; i++) {
6070 env->fptags[i] = ((fptag & 3) == 3);
6071 fptag >>= 2;
6072 }
6073 j = env->fpstt;
6074 for(i = 0;i < 8; i++) {
6075 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6076 ST(i) = tmp;
6077 }
6078 }
6079}
6080//*****************************************************************************
6081//*****************************************************************************
6082
6083#endif /* VBOX */
6084
6085/* Secure Virtual Machine helpers */
6086
6087#if defined(CONFIG_USER_ONLY)
6088
6089void helper_vmrun(int aflag, int next_eip_addend)
6090{
6091}
6092void helper_vmmcall(void)
6093{
6094}
6095void helper_vmload(int aflag)
6096{
6097}
6098void helper_vmsave(int aflag)
6099{
6100}
6101void helper_stgi(void)
6102{
6103}
6104void helper_clgi(void)
6105{
6106}
6107void helper_skinit(void)
6108{
6109}
6110void helper_invlpga(int aflag)
6111{
6112}
6113void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6114{
6115}
6116void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6117{
6118}
6119
6120void helper_svm_check_io(uint32_t port, uint32_t param,
6121 uint32_t next_eip_addend)
6122{
6123}
6124#else
6125
6126#ifndef VBOX
6127static inline void svm_save_seg(target_phys_addr_t addr,
6128#else /* VBOX */
6129DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6130#endif /* VBOX */
6131 const SegmentCache *sc)
6132{
6133 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6134 sc->selector);
6135 stq_phys(addr + offsetof(struct vmcb_seg, base),
6136 sc->base);
6137 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6138 sc->limit);
6139 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6140 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6141}
6142
6143#ifndef VBOX
6144static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6145#else /* VBOX */
6146DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6147#endif /* VBOX */
6148{
6149 unsigned int flags;
6150
6151 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6152 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6153 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6154 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6155 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6156}
6157
6158#ifndef VBOX
6159static inline void svm_load_seg_cache(target_phys_addr_t addr,
6160#else /* VBOX */
6161DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6162#endif /* VBOX */
6163 CPUState *env, int seg_reg)
6164{
6165 SegmentCache sc1, *sc = &sc1;
6166 svm_load_seg(addr, sc);
6167 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6168 sc->base, sc->limit, sc->flags);
6169}
6170
6171void helper_vmrun(int aflag, int next_eip_addend)
6172{
6173 target_ulong addr;
6174 uint32_t event_inj;
6175 uint32_t int_ctl;
6176
6177 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6178
6179 if (aflag == 2)
6180 addr = EAX;
6181 else
6182 addr = (uint32_t)EAX;
6183
6184 if (loglevel & CPU_LOG_TB_IN_ASM)
6185 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6186
6187 env->vm_vmcb = addr;
6188
6189 /* save the current CPU state in the hsave page */
6190 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6191 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6192
6193 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6194 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6195
6196 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6197 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6198 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6199 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6200 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6201 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6202
6203 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6204 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6205
6206 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6207 &env->segs[R_ES]);
6208 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6209 &env->segs[R_CS]);
6210 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6211 &env->segs[R_SS]);
6212 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6213 &env->segs[R_DS]);
6214
6215 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6216 EIP + next_eip_addend);
6217 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6218 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6219
6220 /* load the interception bitmaps so we do not need to access the
6221 vmcb in svm mode */
6222 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6223 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6224 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6225 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6226 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6227 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6228
6229 /* enable intercepts */
6230 env->hflags |= HF_SVMI_MASK;
6231
6232 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6233
6234 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6235 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6236
6237 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6238 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6239
6240 /* clear exit_info_2 so we behave like the real hardware */
6241 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6242
6243 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6244 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6245 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6246 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6247 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6248 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6249 if (int_ctl & V_INTR_MASKING_MASK) {
6250 env->v_tpr = int_ctl & V_TPR_MASK;
6251 env->hflags2 |= HF2_VINTR_MASK;
6252 if (env->eflags & IF_MASK)
6253 env->hflags2 |= HF2_HIF_MASK;
6254 }
6255
6256 cpu_load_efer(env,
6257 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6258 env->eflags = 0;
6259 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6260 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6261 CC_OP = CC_OP_EFLAGS;
6262
6263 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6264 env, R_ES);
6265 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6266 env, R_CS);
6267 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6268 env, R_SS);
6269 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6270 env, R_DS);
6271
6272 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6273 env->eip = EIP;
6274 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6275 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6276 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6277 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6278 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6279
6280 /* FIXME: guest state consistency checks */
6281
6282 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6283 case TLB_CONTROL_DO_NOTHING:
6284 break;
6285 case TLB_CONTROL_FLUSH_ALL_ASID:
6286 /* FIXME: this is not 100% correct but should work for now */
6287 tlb_flush(env, 1);
6288 break;
6289 }
6290
6291 env->hflags2 |= HF2_GIF_MASK;
6292
6293 if (int_ctl & V_IRQ_MASK) {
6294 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6295 }
6296
6297 /* maybe we need to inject an event */
6298 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6299 if (event_inj & SVM_EVTINJ_VALID) {
6300 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6301 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6302 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6303 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6304
6305 if (loglevel & CPU_LOG_TB_IN_ASM)
6306 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6307 /* FIXME: need to implement valid_err */
6308 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6309 case SVM_EVTINJ_TYPE_INTR:
6310 env->exception_index = vector;
6311 env->error_code = event_inj_err;
6312 env->exception_is_int = 0;
6313 env->exception_next_eip = -1;
6314 if (loglevel & CPU_LOG_TB_IN_ASM)
6315 fprintf(logfile, "INTR");
6316 /* XXX: is it always correct ? */
6317 do_interrupt(vector, 0, 0, 0, 1);
6318 break;
6319 case SVM_EVTINJ_TYPE_NMI:
6320 env->exception_index = EXCP02_NMI;
6321 env->error_code = event_inj_err;
6322 env->exception_is_int = 0;
6323 env->exception_next_eip = EIP;
6324 if (loglevel & CPU_LOG_TB_IN_ASM)
6325 fprintf(logfile, "NMI");
6326 cpu_loop_exit();
6327 break;
6328 case SVM_EVTINJ_TYPE_EXEPT:
6329 env->exception_index = vector;
6330 env->error_code = event_inj_err;
6331 env->exception_is_int = 0;
6332 env->exception_next_eip = -1;
6333 if (loglevel & CPU_LOG_TB_IN_ASM)
6334 fprintf(logfile, "EXEPT");
6335 cpu_loop_exit();
6336 break;
6337 case SVM_EVTINJ_TYPE_SOFT:
6338 env->exception_index = vector;
6339 env->error_code = event_inj_err;
6340 env->exception_is_int = 1;
6341 env->exception_next_eip = EIP;
6342 if (loglevel & CPU_LOG_TB_IN_ASM)
6343 fprintf(logfile, "SOFT");
6344 cpu_loop_exit();
6345 break;
6346 }
6347 if (loglevel & CPU_LOG_TB_IN_ASM)
6348 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6349 }
6350}
6351
6352void helper_vmmcall(void)
6353{
6354 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6355 raise_exception(EXCP06_ILLOP);
6356}
6357
6358void helper_vmload(int aflag)
6359{
6360 target_ulong addr;
6361 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6362
6363 if (aflag == 2)
6364 addr = EAX;
6365 else
6366 addr = (uint32_t)EAX;
6367
6368 if (loglevel & CPU_LOG_TB_IN_ASM)
6369 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6370 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6371 env->segs[R_FS].base);
6372
6373 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6374 env, R_FS);
6375 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6376 env, R_GS);
6377 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6378 &env->tr);
6379 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6380 &env->ldt);
6381
6382#ifdef TARGET_X86_64
6383 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6384 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6385 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6386 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6387#endif
6388 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6389 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6390 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6391 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6392}
6393
6394void helper_vmsave(int aflag)
6395{
6396 target_ulong addr;
6397 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6398
6399 if (aflag == 2)
6400 addr = EAX;
6401 else
6402 addr = (uint32_t)EAX;
6403
6404 if (loglevel & CPU_LOG_TB_IN_ASM)
6405 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6406 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6407 env->segs[R_FS].base);
6408
6409 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6410 &env->segs[R_FS]);
6411 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6412 &env->segs[R_GS]);
6413 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6414 &env->tr);
6415 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6416 &env->ldt);
6417
6418#ifdef TARGET_X86_64
6419 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6420 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6421 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6422 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6423#endif
6424 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6425 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6426 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6427 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6428}
6429
6430void helper_stgi(void)
6431{
6432 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6433 env->hflags2 |= HF2_GIF_MASK;
6434}
6435
6436void helper_clgi(void)
6437{
6438 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6439 env->hflags2 &= ~HF2_GIF_MASK;
6440}
6441
6442void helper_skinit(void)
6443{
6444 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6445 /* XXX: not implemented */
6446 raise_exception(EXCP06_ILLOP);
6447}
6448
6449void helper_invlpga(int aflag)
6450{
6451 target_ulong addr;
6452 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6453
6454 if (aflag == 2)
6455 addr = EAX;
6456 else
6457 addr = (uint32_t)EAX;
6458
6459 /* XXX: could use the ASID to see if it is needed to do the
6460 flush */
6461 tlb_flush_page(env, addr);
6462}
6463
6464void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6465{
6466 if (likely(!(env->hflags & HF_SVMI_MASK)))
6467 return;
6468#ifndef VBOX
6469 switch(type) {
6470#ifndef VBOX
6471 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6472#else
6473 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6474 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6475 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6476#endif
6477 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6478 helper_vmexit(type, param);
6479 }
6480 break;
6481#ifndef VBOX
6482 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6483#else
6484 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6485 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6486 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6487#endif
6488 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6489 helper_vmexit(type, param);
6490 }
6491 break;
6492 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6493 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6494 helper_vmexit(type, param);
6495 }
6496 break;
6497 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6498 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6499 helper_vmexit(type, param);
6500 }
6501 break;
6502 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6503 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6504 helper_vmexit(type, param);
6505 }
6506 break;
6507 case SVM_EXIT_MSR:
6508 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6509 /* FIXME: this should be read in at vmrun (faster this way?) */
6510 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6511 uint32_t t0, t1;
6512 switch((uint32_t)ECX) {
6513 case 0 ... 0x1fff:
6514 t0 = (ECX * 2) % 8;
6515 t1 = ECX / 8;
6516 break;
6517 case 0xc0000000 ... 0xc0001fff:
6518 t0 = (8192 + ECX - 0xc0000000) * 2;
6519 t1 = (t0 / 8);
6520 t0 %= 8;
6521 break;
6522 case 0xc0010000 ... 0xc0011fff:
6523 t0 = (16384 + ECX - 0xc0010000) * 2;
6524 t1 = (t0 / 8);
6525 t0 %= 8;
6526 break;
6527 default:
6528 helper_vmexit(type, param);
6529 t0 = 0;
6530 t1 = 0;
6531 break;
6532 }
6533 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6534 helper_vmexit(type, param);
6535 }
6536 break;
6537 default:
6538 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6539 helper_vmexit(type, param);
6540 }
6541 break;
6542 }
6543#else
6544 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6545#endif
6546}
6547
6548void helper_svm_check_io(uint32_t port, uint32_t param,
6549 uint32_t next_eip_addend)
6550{
6551 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6552 /* FIXME: this should be read in at vmrun (faster this way?) */
6553 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6554 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6555 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6556 /* next EIP */
6557 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6558 env->eip + next_eip_addend);
6559 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6560 }
6561 }
6562}
6563
6564/* Note: currently only 32 bits of exit_code are used */
6565void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6566{
6567 uint32_t int_ctl;
6568
6569 if (loglevel & CPU_LOG_TB_IN_ASM)
6570 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6571 exit_code, exit_info_1,
6572 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6573 EIP);
6574
6575 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6576 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6577 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6578 } else {
6579 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6580 }
6581
6582 /* Save the VM state in the vmcb */
6583 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6584 &env->segs[R_ES]);
6585 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6586 &env->segs[R_CS]);
6587 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6588 &env->segs[R_SS]);
6589 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6590 &env->segs[R_DS]);
6591
6592 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6593 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6594
6595 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6596 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6597
6598 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6599 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6600 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6601 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6602 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6603
6604 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6605 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6606 int_ctl |= env->v_tpr & V_TPR_MASK;
6607 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6608 int_ctl |= V_IRQ_MASK;
6609 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6610
6611 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6612 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6613 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6614 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6615 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6616 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6617 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6618
6619 /* Reload the host state from vm_hsave */
6620 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6621 env->hflags &= ~HF_SVMI_MASK;
6622 env->intercept = 0;
6623 env->intercept_exceptions = 0;
6624 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6625 env->tsc_offset = 0;
6626
6627 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6628 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6629
6630 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6631 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6632
6633 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6634 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6635 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6636 /* we need to set the efer after the crs so the hidden flags get
6637 set properly */
6638 cpu_load_efer(env,
6639 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6640 env->eflags = 0;
6641 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6642 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6643 CC_OP = CC_OP_EFLAGS;
6644
6645 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6646 env, R_ES);
6647 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6648 env, R_CS);
6649 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6650 env, R_SS);
6651 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6652 env, R_DS);
6653
6654 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6655 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6656 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6657
6658 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6659 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6660
6661 /* other setups */
6662 cpu_x86_set_cpl(env, 0);
6663 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6664 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6665
6666 env->hflags2 &= ~HF2_GIF_MASK;
6667 /* FIXME: Resets the current ASID register to zero (host ASID). */
6668
6669 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6670
6671 /* Clears the TSC_OFFSET inside the processor. */
6672
6673 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6674 from the page table indicated the host's CR3. If the PDPEs contain
6675 illegal state, the processor causes a shutdown. */
6676
6677 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6678 env->cr[0] |= CR0_PE_MASK;
6679 env->eflags &= ~VM_MASK;
6680
6681 /* Disables all breakpoints in the host DR7 register. */
6682
6683 /* Checks the reloaded host state for consistency. */
6684
6685 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6686 host's code segment or non-canonical (in the case of long mode), a
6687 #GP fault is delivered inside the host.) */
6688
6689 /* remove any pending exception */
6690 env->exception_index = -1;
6691 env->error_code = 0;
6692 env->old_exception = -1;
6693
6694 cpu_loop_exit();
6695}
6696
6697#endif
6698
6699/* MMX/SSE */
6700/* XXX: optimize by storing fptt and fptags in the static cpu state */
6701void helper_enter_mmx(void)
6702{
6703 env->fpstt = 0;
6704 *(uint32_t *)(env->fptags) = 0;
6705 *(uint32_t *)(env->fptags + 4) = 0;
6706}
6707
6708void helper_emms(void)
6709{
6710 /* set to empty state */
6711 *(uint32_t *)(env->fptags) = 0x01010101;
6712 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6713}
6714
6715/* XXX: suppress */
6716void helper_movq(uint64_t *d, uint64_t *s)
6717{
6718 *d = *s;
6719}
6720
6721#define SHIFT 0
6722#include "ops_sse.h"
6723
6724#define SHIFT 1
6725#include "ops_sse.h"
6726
6727#define SHIFT 0
6728#include "helper_template.h"
6729#undef SHIFT
6730
6731#define SHIFT 1
6732#include "helper_template.h"
6733#undef SHIFT
6734
6735#define SHIFT 2
6736#include "helper_template.h"
6737#undef SHIFT
6738
6739#ifdef TARGET_X86_64
6740
6741#define SHIFT 3
6742#include "helper_template.h"
6743#undef SHIFT
6744
6745#endif
6746
6747/* bit operations */
6748target_ulong helper_bsf(target_ulong t0)
6749{
6750 int count;
6751 target_ulong res;
6752
6753 res = t0;
6754 count = 0;
6755 while ((res & 1) == 0) {
6756 count++;
6757 res >>= 1;
6758 }
6759 return count;
6760}
6761
6762target_ulong helper_bsr(target_ulong t0)
6763{
6764 int count;
6765 target_ulong res, mask;
6766
6767 res = t0;
6768 count = TARGET_LONG_BITS - 1;
6769 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6770 while ((res & mask) == 0) {
6771 count--;
6772 res <<= 1;
6773 }
6774 return count;
6775}
6776
6777
6778static int compute_all_eflags(void)
6779{
6780 return CC_SRC;
6781}
6782
6783static int compute_c_eflags(void)
6784{
6785 return CC_SRC & CC_C;
6786}
6787
6788#ifndef VBOX
6789CCTable cc_table[CC_OP_NB] = {
6790 [CC_OP_DYNAMIC] = { /* should never happen */ },
6791
6792 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6793
6794 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6795 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6796 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6797
6798 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6799 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6800 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6801
6802 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6803 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6804 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6805
6806 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6807 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6808 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6809
6810 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6811 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6812 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6813
6814 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6815 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6816 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6817
6818 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6819 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6820 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6821
6822 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6823 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6824 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6825
6826 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6827 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6828 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6829
6830 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6831 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6832 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6833
6834#ifdef TARGET_X86_64
6835 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6836
6837 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6838
6839 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6840
6841 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6842
6843 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6844
6845 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6846
6847 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6848
6849 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6850
6851 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6852
6853 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6854#endif
6855};
6856#else /* VBOX */
6857/* Sync carefully with cpu.h */
6858CCTable cc_table[CC_OP_NB] = {
6859 /* CC_OP_DYNAMIC */ { 0, 0 },
6860
6861 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6862
6863 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6864 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6865 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6866#ifdef TARGET_X86_64
6867 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6868#else
6869 /* CC_OP_MULQ */ { 0, 0 },
6870#endif
6871
6872 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6873 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6874 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6875#ifdef TARGET_X86_64
6876 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6877#else
6878 /* CC_OP_ADDQ */ { 0, 0 },
6879#endif
6880
6881 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6882 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6883 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6884#ifdef TARGET_X86_64
6885 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6886#else
6887 /* CC_OP_ADCQ */ { 0, 0 },
6888#endif
6889
6890 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6891 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6892 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6893#ifdef TARGET_X86_64
6894 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6895#else
6896 /* CC_OP_SUBQ */ { 0, 0 },
6897#endif
6898
6899 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6900 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6901 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6902#ifdef TARGET_X86_64
6903 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6904#else
6905 /* CC_OP_SBBQ */ { 0, 0 },
6906#endif
6907
6908 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6909 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6910 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6911#ifdef TARGET_X86_64
6912 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6913#else
6914 /* CC_OP_LOGICQ */ { 0, 0 },
6915#endif
6916
6917 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6918 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6919 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6920#ifdef TARGET_X86_64
6921 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6922#else
6923 /* CC_OP_INCQ */ { 0, 0 },
6924#endif
6925
6926 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6927 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6928 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6929#ifdef TARGET_X86_64
6930 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6931#else
6932 /* CC_OP_DECQ */ { 0, 0 },
6933#endif
6934
6935 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6936 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6937 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6938#ifdef TARGET_X86_64
6939 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6940#else
6941 /* CC_OP_SHLQ */ { 0, 0 },
6942#endif
6943
6944 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6945 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6946 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6947#ifdef TARGET_X86_64
6948 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6949#else
6950 /* CC_OP_SARQ */ { 0, 0 },
6951#endif
6952};
6953#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette