VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/op_helper.c@ 18650

Last change on this file since 18650 was 18475, checked in by vboxsync, 16 years ago

REM/op_helper.c: Unused var caused by disabled code.

  • Property svn:eol-style set to native
File size: 194.2 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176
177 /* According to AMD manual, should be read with IOPL == 3 */
178 eflags |= (3 << IOPL_SHIFT);
179
180 /* We only use helper_read_eflags_vme() in 16-bits mode */
181 return eflags & 0xffff;
182}
183
184void helper_dump_state()
185{
186 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
187 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
188 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
189 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
190 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
191 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
192 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
193}
194#endif
195
196/* return non zero if error */
197#ifndef VBOX
198static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#else /* VBOX */
200DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
201#endif /* VBOX */
202 int selector)
203{
204 SegmentCache *dt;
205 int index;
206 target_ulong ptr;
207
208#ifdef VBOX
209 /* Trying to load a selector with CPL=1? */
210 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
211 {
212 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
213 selector = selector & 0xfffc;
214 }
215#endif
216
217 if (selector & 0x4)
218 dt = &env->ldt;
219 else
220 dt = &env->gdt;
221 index = selector & ~7;
222 if ((index + 7) > dt->limit)
223 return -1;
224 ptr = dt->base + index;
225 *e1_ptr = ldl_kernel(ptr);
226 *e2_ptr = ldl_kernel(ptr + 4);
227 return 0;
228}
229
230#ifndef VBOX
231static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
232#else /* VBOX */
233DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
234#endif /* VBOX */
235{
236 unsigned int limit;
237 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
238 if (e2 & DESC_G_MASK)
239 limit = (limit << 12) | 0xfff;
240 return limit;
241}
242
243#ifndef VBOX
244static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
245#else /* VBOX */
246DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
247#endif /* VBOX */
248{
249 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
250}
251
252#ifndef VBOX
253static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#else /* VBOX */
255DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
256#endif /* VBOX */
257{
258 sc->base = get_seg_base(e1, e2);
259 sc->limit = get_seg_limit(e1, e2);
260 sc->flags = e2;
261}
262
263/* init the segment cache in vm86 mode. */
264#ifndef VBOX
265static inline void load_seg_vm(int seg, int selector)
266#else /* VBOX */
267DECLINLINE(void) load_seg_vm(int seg, int selector)
268#endif /* VBOX */
269{
270 selector &= 0xffff;
271#ifdef VBOX
272 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
273
274 if (seg == R_CS)
275 flags |= DESC_CS_MASK;
276
277 cpu_x86_load_seg_cache(env, seg, selector,
278 (selector << 4), 0xffff, flags);
279#else
280 cpu_x86_load_seg_cache(env, seg, selector,
281 (selector << 4), 0xffff, 0);
282#endif
283}
284
285#ifndef VBOX
286static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
287#else /* VBOX */
288DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
289#endif /* VBOX */
290 uint32_t *esp_ptr, int dpl)
291{
292#ifndef VBOX
293 int type, index, shift;
294#else
295 unsigned int type, index, shift;
296#endif
297
298#if 0
299 {
300 int i;
301 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
302 for(i=0;i<env->tr.limit;i++) {
303 printf("%02x ", env->tr.base[i]);
304 if ((i & 7) == 7) printf("\n");
305 }
306 printf("\n");
307 }
308#endif
309
310 if (!(env->tr.flags & DESC_P_MASK))
311 cpu_abort(env, "invalid tss");
312 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if ((type & 7) != 1)
314 cpu_abort(env, "invalid tss type");
315 shift = type >> 3;
316 index = (dpl * 4 + 2) << shift;
317 if (index + (4 << shift) - 1 > env->tr.limit)
318 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
319 if (shift == 0) {
320 *esp_ptr = lduw_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
322 } else {
323 *esp_ptr = ldl_kernel(env->tr.base + index);
324 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
325 }
326}
327
328/* XXX: merge with load_seg() */
329static void tss_load_seg(int seg_reg, int selector)
330{
331 uint32_t e1, e2;
332 int rpl, dpl, cpl;
333
334#ifdef VBOX
335 e1 = e2 = 0;
336 cpl = env->hflags & HF_CPL_MASK;
337 /* Trying to load a selector with CPL=1? */
338 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
339 {
340 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
341 selector = selector & 0xfffc;
342 }
343#endif
344
345 if ((selector & 0xfffc) != 0) {
346 if (load_segment(&e1, &e2, selector) != 0)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 if (!(e2 & DESC_S_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 rpl = selector & 3;
351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
352 cpl = env->hflags & HF_CPL_MASK;
353 if (seg_reg == R_CS) {
354 if (!(e2 & DESC_CS_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* XXX: is it correct ? */
357 if (dpl != rpl)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if ((e2 & DESC_C_MASK) && dpl > rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 } else if (seg_reg == R_SS) {
362 /* SS must be writable data */
363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 if (dpl != cpl || dpl != rpl)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 } else {
368 /* not readable code */
369 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* if data or non conforming code, checks the rights */
372 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 if (dpl < cpl || dpl < rpl)
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 }
376 }
377 if (!(e2 & DESC_P_MASK))
378 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
379 cpu_x86_load_seg_cache(env, seg_reg, selector,
380 get_seg_base(e1, e2),
381 get_seg_limit(e1, e2),
382 e2);
383 } else {
384 if (seg_reg == R_SS || seg_reg == R_CS)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386#ifdef VBOX
387#if 0
388 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
389 cpu_x86_load_seg_cache(env, seg_reg, selector,
390 0, 0, 0);
391#endif
392#endif
393 }
394}
395
396#define SWITCH_TSS_JMP 0
397#define SWITCH_TSS_IRET 1
398#define SWITCH_TSS_CALL 2
399
400/* XXX: restore CPU state in registers (PowerPC case) */
401static void switch_tss(int tss_selector,
402 uint32_t e1, uint32_t e2, int source,
403 uint32_t next_eip)
404{
405 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
406 target_ulong tss_base;
407 uint32_t new_regs[8], new_segs[6];
408 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
409 uint32_t old_eflags, eflags_mask;
410 SegmentCache *dt;
411#ifndef VBOX
412 int index;
413#else
414 unsigned int index;
415#endif
416 target_ulong ptr;
417
418 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
419#ifdef DEBUG_PCALL
420 if (loglevel & CPU_LOG_PCALL)
421 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
422#endif
423
424#if defined(VBOX) && defined(DEBUG)
425 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
426#endif
427
428 /* if task gate, we read the TSS segment and we load it */
429 if (type == 5) {
430 if (!(e2 & DESC_P_MASK))
431 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
432 tss_selector = e1 >> 16;
433 if (tss_selector & 4)
434 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
435 if (load_segment(&e1, &e2, tss_selector) != 0)
436 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
437 if (e2 & DESC_S_MASK)
438 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
439 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
440 if ((type & 7) != 1)
441 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
442 }
443
444 if (!(e2 & DESC_P_MASK))
445 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
446
447 if (type & 8)
448 tss_limit_max = 103;
449 else
450 tss_limit_max = 43;
451 tss_limit = get_seg_limit(e1, e2);
452 tss_base = get_seg_base(e1, e2);
453 if ((tss_selector & 4) != 0 ||
454 tss_limit < tss_limit_max)
455 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
456 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
457 if (old_type & 8)
458 old_tss_limit_max = 103;
459 else
460 old_tss_limit_max = 43;
461
462 /* read all the registers from the new TSS */
463 if (type & 8) {
464 /* 32 bit */
465 new_cr3 = ldl_kernel(tss_base + 0x1c);
466 new_eip = ldl_kernel(tss_base + 0x20);
467 new_eflags = ldl_kernel(tss_base + 0x24);
468 for(i = 0; i < 8; i++)
469 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
470 for(i = 0; i < 6; i++)
471 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
472 new_ldt = lduw_kernel(tss_base + 0x60);
473 new_trap = ldl_kernel(tss_base + 0x64);
474 } else {
475 /* 16 bit */
476 new_cr3 = 0;
477 new_eip = lduw_kernel(tss_base + 0x0e);
478 new_eflags = lduw_kernel(tss_base + 0x10);
479 for(i = 0; i < 8; i++)
480 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
481 for(i = 0; i < 4; i++)
482 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
483 new_ldt = lduw_kernel(tss_base + 0x2a);
484 new_segs[R_FS] = 0;
485 new_segs[R_GS] = 0;
486 new_trap = 0;
487 }
488
489 /* NOTE: we must avoid memory exceptions during the task switch,
490 so we make dummy accesses before */
491 /* XXX: it can still fail in some cases, so a bigger hack is
492 necessary to valid the TLB after having done the accesses */
493
494 v1 = ldub_kernel(env->tr.base);
495 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
496 stb_kernel(env->tr.base, v1);
497 stb_kernel(env->tr.base + old_tss_limit_max, v2);
498
499 /* clear busy bit (it is restartable) */
500 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
501 target_ulong ptr;
502 uint32_t e2;
503 ptr = env->gdt.base + (env->tr.selector & ~7);
504 e2 = ldl_kernel(ptr + 4);
505 e2 &= ~DESC_TSS_BUSY_MASK;
506 stl_kernel(ptr + 4, e2);
507 }
508 old_eflags = compute_eflags();
509 if (source == SWITCH_TSS_IRET)
510 old_eflags &= ~NT_MASK;
511
512 /* save the current state in the old TSS */
513 if (type & 8) {
514 /* 32 bit */
515 stl_kernel(env->tr.base + 0x20, next_eip);
516 stl_kernel(env->tr.base + 0x24, old_eflags);
517 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
518 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
519 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
520 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
521 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
522 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
523 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
524 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
525 for(i = 0; i < 6; i++)
526 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
527#if defined(VBOX) && defined(DEBUG)
528 printf("TSS 32 bits switch\n");
529 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
530#endif
531 } else {
532 /* 16 bit */
533 stw_kernel(env->tr.base + 0x0e, next_eip);
534 stw_kernel(env->tr.base + 0x10, old_eflags);
535 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
536 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
537 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
538 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
539 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
540 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
541 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
542 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
543 for(i = 0; i < 4; i++)
544 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
545 }
546
547 /* now if an exception occurs, it will occurs in the next task
548 context */
549
550 if (source == SWITCH_TSS_CALL) {
551 stw_kernel(tss_base, env->tr.selector);
552 new_eflags |= NT_MASK;
553 }
554
555 /* set busy bit */
556 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
557 target_ulong ptr;
558 uint32_t e2;
559 ptr = env->gdt.base + (tss_selector & ~7);
560 e2 = ldl_kernel(ptr + 4);
561 e2 |= DESC_TSS_BUSY_MASK;
562 stl_kernel(ptr + 4, e2);
563 }
564
565 /* set the new CPU state */
566 /* from this point, any exception which occurs can give problems */
567 env->cr[0] |= CR0_TS_MASK;
568 env->hflags |= HF_TS_MASK;
569 env->tr.selector = tss_selector;
570 env->tr.base = tss_base;
571 env->tr.limit = tss_limit;
572 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
573
574 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
575 cpu_x86_update_cr3(env, new_cr3);
576 }
577
578 /* load all registers without an exception, then reload them with
579 possible exception */
580 env->eip = new_eip;
581 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
582 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
583 if (!(type & 8))
584 eflags_mask &= 0xffff;
585 load_eflags(new_eflags, eflags_mask);
586 /* XXX: what to do in 16 bit case ? */
587 EAX = new_regs[0];
588 ECX = new_regs[1];
589 EDX = new_regs[2];
590 EBX = new_regs[3];
591 ESP = new_regs[4];
592 EBP = new_regs[5];
593 ESI = new_regs[6];
594 EDI = new_regs[7];
595 if (new_eflags & VM_MASK) {
596 for(i = 0; i < 6; i++)
597 load_seg_vm(i, new_segs[i]);
598 /* in vm86, CPL is always 3 */
599 cpu_x86_set_cpl(env, 3);
600 } else {
601 /* CPL is set the RPL of CS */
602 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
603 /* first just selectors as the rest may trigger exceptions */
604 for(i = 0; i < 6; i++)
605 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
606 }
607
608 env->ldt.selector = new_ldt & ~4;
609 env->ldt.base = 0;
610 env->ldt.limit = 0;
611 env->ldt.flags = 0;
612
613 /* load the LDT */
614 if (new_ldt & 4)
615 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
616
617 if ((new_ldt & 0xfffc) != 0) {
618 dt = &env->gdt;
619 index = new_ldt & ~7;
620 if ((index + 7) > dt->limit)
621 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
622 ptr = dt->base + index;
623 e1 = ldl_kernel(ptr);
624 e2 = ldl_kernel(ptr + 4);
625 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
626 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
627 if (!(e2 & DESC_P_MASK))
628 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
629 load_seg_cache_raw_dt(&env->ldt, e1, e2);
630 }
631
632 /* load the segments */
633 if (!(new_eflags & VM_MASK)) {
634 tss_load_seg(R_CS, new_segs[R_CS]);
635 tss_load_seg(R_SS, new_segs[R_SS]);
636 tss_load_seg(R_ES, new_segs[R_ES]);
637 tss_load_seg(R_DS, new_segs[R_DS]);
638 tss_load_seg(R_FS, new_segs[R_FS]);
639 tss_load_seg(R_GS, new_segs[R_GS]);
640 }
641
642 /* check that EIP is in the CS segment limits */
643 if (new_eip > env->segs[R_CS].limit) {
644 /* XXX: different exception if CALL ? */
645 raise_exception_err(EXCP0D_GPF, 0);
646 }
647}
648
649/* check if Port I/O is allowed in TSS */
650#ifndef VBOX
651static inline void check_io(int addr, int size)
652{
653 int io_offset, val, mask;
654
655#else /* VBOX */
656DECLINLINE(void) check_io(int addr, int size)
657{
658 int val, mask;
659 unsigned int io_offset;
660#endif /* VBOX */
661 /* TSS must be a valid 32 bit one */
662 if (!(env->tr.flags & DESC_P_MASK) ||
663 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
664 env->tr.limit < 103)
665 goto fail;
666 io_offset = lduw_kernel(env->tr.base + 0x66);
667 io_offset += (addr >> 3);
668 /* Note: the check needs two bytes */
669 if ((io_offset + 1) > env->tr.limit)
670 goto fail;
671 val = lduw_kernel(env->tr.base + io_offset);
672 val >>= (addr & 7);
673 mask = (1 << size) - 1;
674 /* all bits must be zero to allow the I/O */
675 if ((val & mask) != 0) {
676 fail:
677 raise_exception_err(EXCP0D_GPF, 0);
678 }
679}
680
681#ifdef VBOX
682/* Keep in sync with gen_check_external_event() */
683void helper_check_external_event()
684{
685 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
686 | CPU_INTERRUPT_EXTERNAL_TIMER
687 | CPU_INTERRUPT_EXTERNAL_DMA))
688 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
689 && (env->eflags & IF_MASK)
690 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
691 {
692 helper_external_event();
693 }
694
695}
696
697void helper_sync_seg(uint32_t reg)
698{
699 if (env->segs[reg].newselector)
700 sync_seg(env, reg, env->segs[reg].newselector);
701}
702#endif
703
704void helper_check_iob(uint32_t t0)
705{
706 check_io(t0, 1);
707}
708
709void helper_check_iow(uint32_t t0)
710{
711 check_io(t0, 2);
712}
713
714void helper_check_iol(uint32_t t0)
715{
716 check_io(t0, 4);
717}
718
719void helper_outb(uint32_t port, uint32_t data)
720{
721 cpu_outb(env, port, data & 0xff);
722}
723
724target_ulong helper_inb(uint32_t port)
725{
726 return cpu_inb(env, port);
727}
728
729void helper_outw(uint32_t port, uint32_t data)
730{
731 cpu_outw(env, port, data & 0xffff);
732}
733
734target_ulong helper_inw(uint32_t port)
735{
736 return cpu_inw(env, port);
737}
738
739void helper_outl(uint32_t port, uint32_t data)
740{
741 cpu_outl(env, port, data);
742}
743
744target_ulong helper_inl(uint32_t port)
745{
746 return cpu_inl(env, port);
747}
748
749#ifndef VBOX
750static inline unsigned int get_sp_mask(unsigned int e2)
751#else /* VBOX */
752DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
753#endif /* VBOX */
754{
755 if (e2 & DESC_B_MASK)
756 return 0xffffffff;
757 else
758 return 0xffff;
759}
760
761#ifdef TARGET_X86_64
762#define SET_ESP(val, sp_mask)\
763do {\
764 if ((sp_mask) == 0xffff)\
765 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
766 else if ((sp_mask) == 0xffffffffLL)\
767 ESP = (uint32_t)(val);\
768 else\
769 ESP = (val);\
770} while (0)
771#else
772#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
773#endif
774
775/* in 64-bit machines, this can overflow. So this segment addition macro
776 * can be used to trim the value to 32-bit whenever needed */
777#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
778
779/* XXX: add a is_user flag to have proper security support */
780#define PUSHW(ssp, sp, sp_mask, val)\
781{\
782 sp -= 2;\
783 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
784}
785
786#define PUSHL(ssp, sp, sp_mask, val)\
787{\
788 sp -= 4;\
789 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
790}
791
792#define POPW(ssp, sp, sp_mask, val)\
793{\
794 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
795 sp += 2;\
796}
797
798#define POPL(ssp, sp, sp_mask, val)\
799{\
800 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
801 sp += 4;\
802}
803
804/* protected mode interrupt */
805static void do_interrupt_protected(int intno, int is_int, int error_code,
806 unsigned int next_eip, int is_hw)
807{
808 SegmentCache *dt;
809 target_ulong ptr, ssp;
810 int type, dpl, selector, ss_dpl, cpl;
811 int has_error_code, new_stack, shift;
812 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
813 uint32_t old_eip, sp_mask;
814
815#ifdef VBOX
816 ss = ss_e1 = ss_e2 = 0;
817# ifdef VBOX_WITH_VMI
818 if ( intno == 6
819 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
820 {
821 env->exception_index = EXCP_PARAV_CALL;
822 cpu_loop_exit();
823 }
824# endif
825 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
826 cpu_loop_exit();
827#endif
828
829 has_error_code = 0;
830 if (!is_int && !is_hw) {
831 switch(intno) {
832 case 8:
833 case 10:
834 case 11:
835 case 12:
836 case 13:
837 case 14:
838 case 17:
839 has_error_code = 1;
840 break;
841 }
842 }
843 if (is_int)
844 old_eip = next_eip;
845 else
846 old_eip = env->eip;
847
848 dt = &env->idt;
849#ifndef VBOX
850 if (intno * 8 + 7 > dt->limit)
851#else
852 if ((unsigned)intno * 8 + 7 > dt->limit)
853#endif
854 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
855 ptr = dt->base + intno * 8;
856 e1 = ldl_kernel(ptr);
857 e2 = ldl_kernel(ptr + 4);
858 /* check gate type */
859 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
860 switch(type) {
861 case 5: /* task gate */
862 /* must do that check here to return the correct error code */
863 if (!(e2 & DESC_P_MASK))
864 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
865 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
866 if (has_error_code) {
867 int type;
868 uint32_t mask;
869 /* push the error code */
870 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
871 shift = type >> 3;
872 if (env->segs[R_SS].flags & DESC_B_MASK)
873 mask = 0xffffffff;
874 else
875 mask = 0xffff;
876 esp = (ESP - (2 << shift)) & mask;
877 ssp = env->segs[R_SS].base + esp;
878 if (shift)
879 stl_kernel(ssp, error_code);
880 else
881 stw_kernel(ssp, error_code);
882 SET_ESP(esp, mask);
883 }
884 return;
885 case 6: /* 286 interrupt gate */
886 case 7: /* 286 trap gate */
887 case 14: /* 386 interrupt gate */
888 case 15: /* 386 trap gate */
889 break;
890 default:
891 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
892 break;
893 }
894 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
895 cpl = env->hflags & HF_CPL_MASK;
896 /* check privilege if software int */
897 if (is_int && dpl < cpl)
898 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
899 /* check valid bit */
900 if (!(e2 & DESC_P_MASK))
901 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
902 selector = e1 >> 16;
903 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
904 if ((selector & 0xfffc) == 0)
905 raise_exception_err(EXCP0D_GPF, 0);
906
907 if (load_segment(&e1, &e2, selector) != 0)
908 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
909 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
910 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
911 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
912 if (dpl > cpl)
913 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
914 if (!(e2 & DESC_P_MASK))
915 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
916 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
917 /* to inner privilege */
918 get_ss_esp_from_tss(&ss, &esp, dpl);
919 if ((ss & 0xfffc) == 0)
920 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
921 if ((ss & 3) != dpl)
922 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
923 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
924 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
926 if (ss_dpl != dpl)
927 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928 if (!(ss_e2 & DESC_S_MASK) ||
929 (ss_e2 & DESC_CS_MASK) ||
930 !(ss_e2 & DESC_W_MASK))
931 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
932 if (!(ss_e2 & DESC_P_MASK))
933#ifdef VBOX /* See page 3-477 of 253666.pdf */
934 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
935#else
936 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
937#endif
938 new_stack = 1;
939 sp_mask = get_sp_mask(ss_e2);
940 ssp = get_seg_base(ss_e1, ss_e2);
941#if defined(VBOX) && defined(DEBUG)
942 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
943#endif
944 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
945 /* to same privilege */
946 if (env->eflags & VM_MASK)
947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 new_stack = 0;
949 sp_mask = get_sp_mask(env->segs[R_SS].flags);
950 ssp = env->segs[R_SS].base;
951 esp = ESP;
952 dpl = cpl;
953 } else {
954 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955 new_stack = 0; /* avoid warning */
956 sp_mask = 0; /* avoid warning */
957 ssp = 0; /* avoid warning */
958 esp = 0; /* avoid warning */
959 }
960
961 shift = type >> 3;
962
963#if 0
964 /* XXX: check that enough room is available */
965 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
966 if (env->eflags & VM_MASK)
967 push_size += 8;
968 push_size <<= shift;
969#endif
970 if (shift == 1) {
971 if (new_stack) {
972 if (env->eflags & VM_MASK) {
973 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
974 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
975 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
976 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
977 }
978 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
979 PUSHL(ssp, esp, sp_mask, ESP);
980 }
981 PUSHL(ssp, esp, sp_mask, compute_eflags());
982 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
983 PUSHL(ssp, esp, sp_mask, old_eip);
984 if (has_error_code) {
985 PUSHL(ssp, esp, sp_mask, error_code);
986 }
987 } else {
988 if (new_stack) {
989 if (env->eflags & VM_MASK) {
990 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
991 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
992 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
993 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
994 }
995 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
996 PUSHW(ssp, esp, sp_mask, ESP);
997 }
998 PUSHW(ssp, esp, sp_mask, compute_eflags());
999 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1000 PUSHW(ssp, esp, sp_mask, old_eip);
1001 if (has_error_code) {
1002 PUSHW(ssp, esp, sp_mask, error_code);
1003 }
1004 }
1005
1006 if (new_stack) {
1007 if (env->eflags & VM_MASK) {
1008 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1009 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1010 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1011 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1012 }
1013 ss = (ss & ~3) | dpl;
1014 cpu_x86_load_seg_cache(env, R_SS, ss,
1015 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1016 }
1017 SET_ESP(esp, sp_mask);
1018
1019 selector = (selector & ~3) | dpl;
1020 cpu_x86_load_seg_cache(env, R_CS, selector,
1021 get_seg_base(e1, e2),
1022 get_seg_limit(e1, e2),
1023 e2);
1024 cpu_x86_set_cpl(env, dpl);
1025 env->eip = offset;
1026
1027 /* interrupt gate clear IF mask */
1028 if ((type & 1) == 0) {
1029 env->eflags &= ~IF_MASK;
1030 }
1031#ifndef VBOX
1032 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1033#else
1034 /*
1035 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1036 * gets confused by seeingingly changed EFLAGS. See #3491 and
1037 * public bug #2341.
1038 */
1039 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1040#endif
1041}
1042#ifdef VBOX
1043
1044/* check if VME interrupt redirection is enabled in TSS */
1045DECLINLINE(bool) is_vme_irq_redirected(int intno)
1046{
1047 unsigned int io_offset, intredir_offset;
1048 unsigned char val, mask;
1049
1050 /* TSS must be a valid 32 bit one */
1051 if (!(env->tr.flags & DESC_P_MASK) ||
1052 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1053 env->tr.limit < 103)
1054 goto fail;
1055 io_offset = lduw_kernel(env->tr.base + 0x66);
1056 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1057 if (io_offset < 0x68 + 0x20)
1058 io_offset = 0x68 + 0x20;
1059 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1060 intredir_offset = io_offset - 0x20;
1061
1062 intredir_offset += (intno >> 3);
1063 if ((intredir_offset) > env->tr.limit)
1064 goto fail;
1065
1066 val = ldub_kernel(env->tr.base + intredir_offset);
1067 mask = 1 << (unsigned char)(intno & 7);
1068
1069 /* bit set means no redirection. */
1070 if ((val & mask) != 0) {
1071 return false;
1072 }
1073 return true;
1074
1075fail:
1076 raise_exception_err(EXCP0D_GPF, 0);
1077 return true;
1078}
1079
1080/* V86 mode software interrupt with CR4.VME=1 */
1081static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1082{
1083 target_ulong ptr, ssp;
1084 int selector;
1085 uint32_t offset, esp;
1086 uint32_t old_cs, old_eflags;
1087 uint32_t iopl;
1088
1089 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1090
1091 if (!is_vme_irq_redirected(intno))
1092 {
1093 if (iopl == 3)
1094 {
1095 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1096 return;
1097 }
1098 else
1099 raise_exception_err(EXCP0D_GPF, 0);
1100 }
1101
1102 /* virtual mode idt is at linear address 0 */
1103 ptr = 0 + intno * 4;
1104 offset = lduw_kernel(ptr);
1105 selector = lduw_kernel(ptr + 2);
1106 esp = ESP;
1107 ssp = env->segs[R_SS].base;
1108 old_cs = env->segs[R_CS].selector;
1109
1110 old_eflags = compute_eflags();
1111 if (iopl < 3)
1112 {
1113 /* copy VIF into IF and set IOPL to 3 */
1114 if (env->eflags & VIF_MASK)
1115 old_eflags |= IF_MASK;
1116 else
1117 old_eflags &= ~IF_MASK;
1118
1119 old_eflags |= (3 << IOPL_SHIFT);
1120 }
1121
1122 /* XXX: use SS segment size ? */
1123 PUSHW(ssp, esp, 0xffff, old_eflags);
1124 PUSHW(ssp, esp, 0xffff, old_cs);
1125 PUSHW(ssp, esp, 0xffff, next_eip);
1126
1127 /* update processor state */
1128 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1129 env->eip = offset;
1130 env->segs[R_CS].selector = selector;
1131 env->segs[R_CS].base = (selector << 4);
1132 env->eflags &= ~(TF_MASK | RF_MASK);
1133
1134 if (iopl < 3)
1135 env->eflags &= ~VIF_MASK;
1136 else
1137 env->eflags &= ~IF_MASK;
1138}
1139#endif /* VBOX */
1140
1141#ifdef TARGET_X86_64
1142
1143#define PUSHQ(sp, val)\
1144{\
1145 sp -= 8;\
1146 stq_kernel(sp, (val));\
1147}
1148
1149#define POPQ(sp, val)\
1150{\
1151 val = ldq_kernel(sp);\
1152 sp += 8;\
1153}
1154
1155#ifndef VBOX
1156static inline target_ulong get_rsp_from_tss(int level)
1157#else /* VBOX */
1158DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1159#endif /* VBOX */
1160{
1161 int index;
1162
1163#if 0
1164 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1165 env->tr.base, env->tr.limit);
1166#endif
1167
1168 if (!(env->tr.flags & DESC_P_MASK))
1169 cpu_abort(env, "invalid tss");
1170 index = 8 * level + 4;
1171 if ((index + 7) > env->tr.limit)
1172 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1173 return ldq_kernel(env->tr.base + index);
1174}
1175
1176/* 64 bit interrupt */
1177static void do_interrupt64(int intno, int is_int, int error_code,
1178 target_ulong next_eip, int is_hw)
1179{
1180 SegmentCache *dt;
1181 target_ulong ptr;
1182 int type, dpl, selector, cpl, ist;
1183 int has_error_code, new_stack;
1184 uint32_t e1, e2, e3, ss;
1185 target_ulong old_eip, esp, offset;
1186
1187#ifdef VBOX
1188 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1189 cpu_loop_exit();
1190#endif
1191
1192 has_error_code = 0;
1193 if (!is_int && !is_hw) {
1194 switch(intno) {
1195 case 8:
1196 case 10:
1197 case 11:
1198 case 12:
1199 case 13:
1200 case 14:
1201 case 17:
1202 has_error_code = 1;
1203 break;
1204 }
1205 }
1206 if (is_int)
1207 old_eip = next_eip;
1208 else
1209 old_eip = env->eip;
1210
1211 dt = &env->idt;
1212 if (intno * 16 + 15 > dt->limit)
1213 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1214 ptr = dt->base + intno * 16;
1215 e1 = ldl_kernel(ptr);
1216 e2 = ldl_kernel(ptr + 4);
1217 e3 = ldl_kernel(ptr + 8);
1218 /* check gate type */
1219 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1220 switch(type) {
1221 case 14: /* 386 interrupt gate */
1222 case 15: /* 386 trap gate */
1223 break;
1224 default:
1225 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1226 break;
1227 }
1228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1229 cpl = env->hflags & HF_CPL_MASK;
1230 /* check privilege if software int */
1231 if (is_int && dpl < cpl)
1232 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1233 /* check valid bit */
1234 if (!(e2 & DESC_P_MASK))
1235 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1236 selector = e1 >> 16;
1237 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1238 ist = e2 & 7;
1239 if ((selector & 0xfffc) == 0)
1240 raise_exception_err(EXCP0D_GPF, 0);
1241
1242 if (load_segment(&e1, &e2, selector) != 0)
1243 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1244 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1245 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1246 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1247 if (dpl > cpl)
1248 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1249 if (!(e2 & DESC_P_MASK))
1250 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1251 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1252 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1253 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1254 /* to inner privilege */
1255 if (ist != 0)
1256 esp = get_rsp_from_tss(ist + 3);
1257 else
1258 esp = get_rsp_from_tss(dpl);
1259 esp &= ~0xfLL; /* align stack */
1260 ss = 0;
1261 new_stack = 1;
1262 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1263 /* to same privilege */
1264 if (env->eflags & VM_MASK)
1265 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1266 new_stack = 0;
1267 if (ist != 0)
1268 esp = get_rsp_from_tss(ist + 3);
1269 else
1270 esp = ESP;
1271 esp &= ~0xfLL; /* align stack */
1272 dpl = cpl;
1273 } else {
1274 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1275 new_stack = 0; /* avoid warning */
1276 esp = 0; /* avoid warning */
1277 }
1278
1279 PUSHQ(esp, env->segs[R_SS].selector);
1280 PUSHQ(esp, ESP);
1281 PUSHQ(esp, compute_eflags());
1282 PUSHQ(esp, env->segs[R_CS].selector);
1283 PUSHQ(esp, old_eip);
1284 if (has_error_code) {
1285 PUSHQ(esp, error_code);
1286 }
1287
1288 if (new_stack) {
1289 ss = 0 | dpl;
1290 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1291 }
1292 ESP = esp;
1293
1294 selector = (selector & ~3) | dpl;
1295 cpu_x86_load_seg_cache(env, R_CS, selector,
1296 get_seg_base(e1, e2),
1297 get_seg_limit(e1, e2),
1298 e2);
1299 cpu_x86_set_cpl(env, dpl);
1300 env->eip = offset;
1301
1302 /* interrupt gate clear IF mask */
1303 if ((type & 1) == 0) {
1304 env->eflags &= ~IF_MASK;
1305 }
1306
1307#ifndef VBOX
1308 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1309#else
1310 /*
1311 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1312 * gets confused by seeingingly changed EFLAGS. See #3491 and
1313 * public bug #2341.
1314 */
1315 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1316#endif
1317}
1318#endif
1319
1320#if defined(CONFIG_USER_ONLY)
1321void helper_syscall(int next_eip_addend)
1322{
1323 env->exception_index = EXCP_SYSCALL;
1324 env->exception_next_eip = env->eip + next_eip_addend;
1325 cpu_loop_exit();
1326}
1327#else
1328void helper_syscall(int next_eip_addend)
1329{
1330 int selector;
1331
1332 if (!(env->efer & MSR_EFER_SCE)) {
1333 raise_exception_err(EXCP06_ILLOP, 0);
1334 }
1335 selector = (env->star >> 32) & 0xffff;
1336#ifdef TARGET_X86_64
1337 if (env->hflags & HF_LMA_MASK) {
1338 int code64;
1339
1340 ECX = env->eip + next_eip_addend;
1341 env->regs[11] = compute_eflags();
1342
1343 code64 = env->hflags & HF_CS64_MASK;
1344
1345 cpu_x86_set_cpl(env, 0);
1346 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1347 0, 0xffffffff,
1348 DESC_G_MASK | DESC_P_MASK |
1349 DESC_S_MASK |
1350 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1351 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1352 0, 0xffffffff,
1353 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1354 DESC_S_MASK |
1355 DESC_W_MASK | DESC_A_MASK);
1356 env->eflags &= ~env->fmask;
1357 load_eflags(env->eflags, 0);
1358 if (code64)
1359 env->eip = env->lstar;
1360 else
1361 env->eip = env->cstar;
1362 } else
1363#endif
1364 {
1365 ECX = (uint32_t)(env->eip + next_eip_addend);
1366
1367 cpu_x86_set_cpl(env, 0);
1368 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1369 0, 0xffffffff,
1370 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1371 DESC_S_MASK |
1372 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1373 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1374 0, 0xffffffff,
1375 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1376 DESC_S_MASK |
1377 DESC_W_MASK | DESC_A_MASK);
1378 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1379 env->eip = (uint32_t)env->star;
1380 }
1381}
1382#endif
1383
1384void helper_sysret(int dflag)
1385{
1386 int cpl, selector;
1387
1388 if (!(env->efer & MSR_EFER_SCE)) {
1389 raise_exception_err(EXCP06_ILLOP, 0);
1390 }
1391 cpl = env->hflags & HF_CPL_MASK;
1392 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1393 raise_exception_err(EXCP0D_GPF, 0);
1394 }
1395 selector = (env->star >> 48) & 0xffff;
1396#ifdef TARGET_X86_64
1397 if (env->hflags & HF_LMA_MASK) {
1398 if (dflag == 2) {
1399 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1400 0, 0xffffffff,
1401 DESC_G_MASK | DESC_P_MASK |
1402 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1403 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1404 DESC_L_MASK);
1405 env->eip = ECX;
1406 } else {
1407 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1408 0, 0xffffffff,
1409 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1410 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1411 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1412 env->eip = (uint32_t)ECX;
1413 }
1414 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1415 0, 0xffffffff,
1416 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1417 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1418 DESC_W_MASK | DESC_A_MASK);
1419 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1420 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1421 cpu_x86_set_cpl(env, 3);
1422 } else
1423#endif
1424 {
1425 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1426 0, 0xffffffff,
1427 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1428 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1429 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1430 env->eip = (uint32_t)ECX;
1431 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1432 0, 0xffffffff,
1433 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1434 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1435 DESC_W_MASK | DESC_A_MASK);
1436 env->eflags |= IF_MASK;
1437 cpu_x86_set_cpl(env, 3);
1438 }
1439#ifdef USE_KQEMU
1440 if (kqemu_is_ok(env)) {
1441 if (env->hflags & HF_LMA_MASK)
1442 CC_OP = CC_OP_EFLAGS;
1443 env->exception_index = -1;
1444 cpu_loop_exit();
1445 }
1446#endif
1447}
1448
1449#ifdef VBOX
1450/**
1451 * Checks and processes external VMM events.
1452 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1453 */
1454void helper_external_event(void)
1455{
1456#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1457 uintptr_t uSP;
1458# ifdef RT_ARCH_AMD64
1459 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1460# else
1461 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1462# endif
1463 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1464#endif
1465 /* Keep in sync with flags checked by gen_check_external_event() */
1466 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1467 {
1468 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1469 ~CPU_INTERRUPT_EXTERNAL_HARD);
1470 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1471 }
1472 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1473 {
1474 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1475 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1476 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1477 }
1478 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1479 {
1480 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1481 ~CPU_INTERRUPT_EXTERNAL_DMA);
1482 remR3DmaRun(env);
1483 }
1484 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1485 {
1486 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1487 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1488 remR3TimersRun(env);
1489 }
1490}
1491/* helper for recording call instruction addresses for later scanning */
1492void helper_record_call()
1493{
1494 if ( !(env->state & CPU_RAW_RING0)
1495 && (env->cr[0] & CR0_PG_MASK)
1496 && !(env->eflags & X86_EFL_IF))
1497 remR3RecordCall(env);
1498}
1499#endif /* VBOX */
1500
1501/* real mode interrupt */
1502static void do_interrupt_real(int intno, int is_int, int error_code,
1503 unsigned int next_eip)
1504{
1505 SegmentCache *dt;
1506 target_ulong ptr, ssp;
1507 int selector;
1508 uint32_t offset, esp;
1509 uint32_t old_cs, old_eip;
1510
1511 /* real mode (simpler !) */
1512 dt = &env->idt;
1513#ifndef VBOX
1514 if (intno * 4 + 3 > dt->limit)
1515#else
1516 if ((unsigned)intno * 4 + 3 > dt->limit)
1517#endif
1518 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1519 ptr = dt->base + intno * 4;
1520 offset = lduw_kernel(ptr);
1521 selector = lduw_kernel(ptr + 2);
1522 esp = ESP;
1523 ssp = env->segs[R_SS].base;
1524 if (is_int)
1525 old_eip = next_eip;
1526 else
1527 old_eip = env->eip;
1528 old_cs = env->segs[R_CS].selector;
1529 /* XXX: use SS segment size ? */
1530 PUSHW(ssp, esp, 0xffff, compute_eflags());
1531 PUSHW(ssp, esp, 0xffff, old_cs);
1532 PUSHW(ssp, esp, 0xffff, old_eip);
1533
1534 /* update processor state */
1535 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1536 env->eip = offset;
1537 env->segs[R_CS].selector = selector;
1538 env->segs[R_CS].base = (selector << 4);
1539 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1540}
1541
1542/* fake user mode interrupt */
1543void do_interrupt_user(int intno, int is_int, int error_code,
1544 target_ulong next_eip)
1545{
1546 SegmentCache *dt;
1547 target_ulong ptr;
1548 int dpl, cpl, shift;
1549 uint32_t e2;
1550
1551 dt = &env->idt;
1552 if (env->hflags & HF_LMA_MASK) {
1553 shift = 4;
1554 } else {
1555 shift = 3;
1556 }
1557 ptr = dt->base + (intno << shift);
1558 e2 = ldl_kernel(ptr + 4);
1559
1560 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1561 cpl = env->hflags & HF_CPL_MASK;
1562 /* check privilege if software int */
1563 if (is_int && dpl < cpl)
1564 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1565
1566 /* Since we emulate only user space, we cannot do more than
1567 exiting the emulation with the suitable exception and error
1568 code */
1569 if (is_int)
1570 EIP = next_eip;
1571}
1572
1573/*
1574 * Begin execution of an interruption. is_int is TRUE if coming from
1575 * the int instruction. next_eip is the EIP value AFTER the interrupt
1576 * instruction. It is only relevant if is_int is TRUE.
1577 */
1578void do_interrupt(int intno, int is_int, int error_code,
1579 target_ulong next_eip, int is_hw)
1580{
1581 if (loglevel & CPU_LOG_INT) {
1582 if ((env->cr[0] & CR0_PE_MASK)) {
1583 static int count;
1584 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1585 count, intno, error_code, is_int,
1586 env->hflags & HF_CPL_MASK,
1587 env->segs[R_CS].selector, EIP,
1588 (int)env->segs[R_CS].base + EIP,
1589 env->segs[R_SS].selector, ESP);
1590 if (intno == 0x0e) {
1591 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1592 } else {
1593 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1594 }
1595 fprintf(logfile, "\n");
1596 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1597#if 0
1598 {
1599 int i;
1600 uint8_t *ptr;
1601 fprintf(logfile, " code=");
1602 ptr = env->segs[R_CS].base + env->eip;
1603 for(i = 0; i < 16; i++) {
1604 fprintf(logfile, " %02x", ldub(ptr + i));
1605 }
1606 fprintf(logfile, "\n");
1607 }
1608#endif
1609 count++;
1610 }
1611 }
1612 if (env->cr[0] & CR0_PE_MASK) {
1613#ifdef TARGET_X86_64
1614 if (env->hflags & HF_LMA_MASK) {
1615 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1616 } else
1617#endif
1618 {
1619#ifdef VBOX
1620 /* int xx *, v86 code and VME enabled? */
1621 if ( (env->eflags & VM_MASK)
1622 && (env->cr[4] & CR4_VME_MASK)
1623 && is_int
1624 && !is_hw
1625 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1626 )
1627 do_soft_interrupt_vme(intno, error_code, next_eip);
1628 else
1629#endif /* VBOX */
1630 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1631 }
1632 } else {
1633 do_interrupt_real(intno, is_int, error_code, next_eip);
1634 }
1635}
1636
1637/*
1638 * Check nested exceptions and change to double or triple fault if
1639 * needed. It should only be called, if this is not an interrupt.
1640 * Returns the new exception number.
1641 */
1642static int check_exception(int intno, int *error_code)
1643{
1644 int first_contributory = env->old_exception == 0 ||
1645 (env->old_exception >= 10 &&
1646 env->old_exception <= 13);
1647 int second_contributory = intno == 0 ||
1648 (intno >= 10 && intno <= 13);
1649
1650 if (loglevel & CPU_LOG_INT)
1651 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1652 env->old_exception, intno);
1653
1654 if (env->old_exception == EXCP08_DBLE)
1655 cpu_abort(env, "triple fault");
1656
1657 if ((first_contributory && second_contributory)
1658 || (env->old_exception == EXCP0E_PAGE &&
1659 (second_contributory || (intno == EXCP0E_PAGE)))) {
1660 intno = EXCP08_DBLE;
1661 *error_code = 0;
1662 }
1663
1664 if (second_contributory || (intno == EXCP0E_PAGE) ||
1665 (intno == EXCP08_DBLE))
1666 env->old_exception = intno;
1667
1668 return intno;
1669}
1670
1671/*
1672 * Signal an interruption. It is executed in the main CPU loop.
1673 * is_int is TRUE if coming from the int instruction. next_eip is the
1674 * EIP value AFTER the interrupt instruction. It is only relevant if
1675 * is_int is TRUE.
1676 */
1677void raise_interrupt(int intno, int is_int, int error_code,
1678 int next_eip_addend)
1679{
1680#if defined(VBOX) && defined(DEBUG)
1681 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1682#endif
1683 if (!is_int) {
1684 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1685 intno = check_exception(intno, &error_code);
1686 } else {
1687 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1688 }
1689
1690 env->exception_index = intno;
1691 env->error_code = error_code;
1692 env->exception_is_int = is_int;
1693 env->exception_next_eip = env->eip + next_eip_addend;
1694 cpu_loop_exit();
1695}
1696
1697/* shortcuts to generate exceptions */
1698
1699void (raise_exception_err)(int exception_index, int error_code)
1700{
1701 raise_interrupt(exception_index, 0, error_code, 0);
1702}
1703
1704void raise_exception(int exception_index)
1705{
1706 raise_interrupt(exception_index, 0, 0, 0);
1707}
1708
1709/* SMM support */
1710
1711#if defined(CONFIG_USER_ONLY)
1712
1713void do_smm_enter(void)
1714{
1715}
1716
1717void helper_rsm(void)
1718{
1719}
1720
1721#else
1722
1723#ifdef TARGET_X86_64
1724#define SMM_REVISION_ID 0x00020064
1725#else
1726#define SMM_REVISION_ID 0x00020000
1727#endif
1728
1729void do_smm_enter(void)
1730{
1731 target_ulong sm_state;
1732 SegmentCache *dt;
1733 int i, offset;
1734
1735 if (loglevel & CPU_LOG_INT) {
1736 fprintf(logfile, "SMM: enter\n");
1737 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1738 }
1739
1740 env->hflags |= HF_SMM_MASK;
1741 cpu_smm_update(env);
1742
1743 sm_state = env->smbase + 0x8000;
1744
1745#ifdef TARGET_X86_64
1746 for(i = 0; i < 6; i++) {
1747 dt = &env->segs[i];
1748 offset = 0x7e00 + i * 16;
1749 stw_phys(sm_state + offset, dt->selector);
1750 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1751 stl_phys(sm_state + offset + 4, dt->limit);
1752 stq_phys(sm_state + offset + 8, dt->base);
1753 }
1754
1755 stq_phys(sm_state + 0x7e68, env->gdt.base);
1756 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1757
1758 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1759 stq_phys(sm_state + 0x7e78, env->ldt.base);
1760 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1761 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1762
1763 stq_phys(sm_state + 0x7e88, env->idt.base);
1764 stl_phys(sm_state + 0x7e84, env->idt.limit);
1765
1766 stw_phys(sm_state + 0x7e90, env->tr.selector);
1767 stq_phys(sm_state + 0x7e98, env->tr.base);
1768 stl_phys(sm_state + 0x7e94, env->tr.limit);
1769 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1770
1771 stq_phys(sm_state + 0x7ed0, env->efer);
1772
1773 stq_phys(sm_state + 0x7ff8, EAX);
1774 stq_phys(sm_state + 0x7ff0, ECX);
1775 stq_phys(sm_state + 0x7fe8, EDX);
1776 stq_phys(sm_state + 0x7fe0, EBX);
1777 stq_phys(sm_state + 0x7fd8, ESP);
1778 stq_phys(sm_state + 0x7fd0, EBP);
1779 stq_phys(sm_state + 0x7fc8, ESI);
1780 stq_phys(sm_state + 0x7fc0, EDI);
1781 for(i = 8; i < 16; i++)
1782 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1783 stq_phys(sm_state + 0x7f78, env->eip);
1784 stl_phys(sm_state + 0x7f70, compute_eflags());
1785 stl_phys(sm_state + 0x7f68, env->dr[6]);
1786 stl_phys(sm_state + 0x7f60, env->dr[7]);
1787
1788 stl_phys(sm_state + 0x7f48, env->cr[4]);
1789 stl_phys(sm_state + 0x7f50, env->cr[3]);
1790 stl_phys(sm_state + 0x7f58, env->cr[0]);
1791
1792 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1793 stl_phys(sm_state + 0x7f00, env->smbase);
1794#else
1795 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1796 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1797 stl_phys(sm_state + 0x7ff4, compute_eflags());
1798 stl_phys(sm_state + 0x7ff0, env->eip);
1799 stl_phys(sm_state + 0x7fec, EDI);
1800 stl_phys(sm_state + 0x7fe8, ESI);
1801 stl_phys(sm_state + 0x7fe4, EBP);
1802 stl_phys(sm_state + 0x7fe0, ESP);
1803 stl_phys(sm_state + 0x7fdc, EBX);
1804 stl_phys(sm_state + 0x7fd8, EDX);
1805 stl_phys(sm_state + 0x7fd4, ECX);
1806 stl_phys(sm_state + 0x7fd0, EAX);
1807 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1808 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1809
1810 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1811 stl_phys(sm_state + 0x7f64, env->tr.base);
1812 stl_phys(sm_state + 0x7f60, env->tr.limit);
1813 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1814
1815 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1816 stl_phys(sm_state + 0x7f80, env->ldt.base);
1817 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1818 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1819
1820 stl_phys(sm_state + 0x7f74, env->gdt.base);
1821 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1822
1823 stl_phys(sm_state + 0x7f58, env->idt.base);
1824 stl_phys(sm_state + 0x7f54, env->idt.limit);
1825
1826 for(i = 0; i < 6; i++) {
1827 dt = &env->segs[i];
1828 if (i < 3)
1829 offset = 0x7f84 + i * 12;
1830 else
1831 offset = 0x7f2c + (i - 3) * 12;
1832 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1833 stl_phys(sm_state + offset + 8, dt->base);
1834 stl_phys(sm_state + offset + 4, dt->limit);
1835 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1836 }
1837 stl_phys(sm_state + 0x7f14, env->cr[4]);
1838
1839 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1840 stl_phys(sm_state + 0x7ef8, env->smbase);
1841#endif
1842 /* init SMM cpu state */
1843
1844#ifdef TARGET_X86_64
1845 cpu_load_efer(env, 0);
1846#endif
1847 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1848 env->eip = 0x00008000;
1849 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1850 0xffffffff, 0);
1851 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1852 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1853 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1854 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1855 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1856
1857 cpu_x86_update_cr0(env,
1858 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1859 cpu_x86_update_cr4(env, 0);
1860 env->dr[7] = 0x00000400;
1861 CC_OP = CC_OP_EFLAGS;
1862}
1863
1864void helper_rsm(void)
1865{
1866#ifdef VBOX
1867 cpu_abort(env, "helper_rsm");
1868#else /* !VBOX */
1869 target_ulong sm_
1870
1871 target_ulong sm_state;
1872 int i, offset;
1873 uint32_t val;
1874
1875 sm_state = env->smbase + 0x8000;
1876#ifdef TARGET_X86_64
1877 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1878
1879 for(i = 0; i < 6; i++) {
1880 offset = 0x7e00 + i * 16;
1881 cpu_x86_load_seg_cache(env, i,
1882 lduw_phys(sm_state + offset),
1883 ldq_phys(sm_state + offset + 8),
1884 ldl_phys(sm_state + offset + 4),
1885 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1886 }
1887
1888 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1889 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1890
1891 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1892 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1893 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1894 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1895
1896 env->idt.base = ldq_phys(sm_state + 0x7e88);
1897 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1898
1899 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1900 env->tr.base = ldq_phys(sm_state + 0x7e98);
1901 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1902 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1903
1904 EAX = ldq_phys(sm_state + 0x7ff8);
1905 ECX = ldq_phys(sm_state + 0x7ff0);
1906 EDX = ldq_phys(sm_state + 0x7fe8);
1907 EBX = ldq_phys(sm_state + 0x7fe0);
1908 ESP = ldq_phys(sm_state + 0x7fd8);
1909 EBP = ldq_phys(sm_state + 0x7fd0);
1910 ESI = ldq_phys(sm_state + 0x7fc8);
1911 EDI = ldq_phys(sm_state + 0x7fc0);
1912 for(i = 8; i < 16; i++)
1913 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1914 env->eip = ldq_phys(sm_state + 0x7f78);
1915 load_eflags(ldl_phys(sm_state + 0x7f70),
1916 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1917 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1918 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1919
1920 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1921 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1922 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1923
1924 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1925 if (val & 0x20000) {
1926 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1927 }
1928#else
1929 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1930 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1931 load_eflags(ldl_phys(sm_state + 0x7ff4),
1932 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1933 env->eip = ldl_phys(sm_state + 0x7ff0);
1934 EDI = ldl_phys(sm_state + 0x7fec);
1935 ESI = ldl_phys(sm_state + 0x7fe8);
1936 EBP = ldl_phys(sm_state + 0x7fe4);
1937 ESP = ldl_phys(sm_state + 0x7fe0);
1938 EBX = ldl_phys(sm_state + 0x7fdc);
1939 EDX = ldl_phys(sm_state + 0x7fd8);
1940 ECX = ldl_phys(sm_state + 0x7fd4);
1941 EAX = ldl_phys(sm_state + 0x7fd0);
1942 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1943 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1944
1945 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1946 env->tr.base = ldl_phys(sm_state + 0x7f64);
1947 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1948 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1949
1950 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1951 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1952 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1953 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1954
1955 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1956 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1957
1958 env->idt.base = ldl_phys(sm_state + 0x7f58);
1959 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1960
1961 for(i = 0; i < 6; i++) {
1962 if (i < 3)
1963 offset = 0x7f84 + i * 12;
1964 else
1965 offset = 0x7f2c + (i - 3) * 12;
1966 cpu_x86_load_seg_cache(env, i,
1967 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1968 ldl_phys(sm_state + offset + 8),
1969 ldl_phys(sm_state + offset + 4),
1970 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1971 }
1972 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1973
1974 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1975 if (val & 0x20000) {
1976 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1977 }
1978#endif
1979 CC_OP = CC_OP_EFLAGS;
1980 env->hflags &= ~HF_SMM_MASK;
1981 cpu_smm_update(env);
1982
1983 if (loglevel & CPU_LOG_INT) {
1984 fprintf(logfile, "SMM: after RSM\n");
1985 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1986 }
1987#endif /* !VBOX */
1988}
1989
1990#endif /* !CONFIG_USER_ONLY */
1991
1992
1993/* division, flags are undefined */
1994
1995void helper_divb_AL(target_ulong t0)
1996{
1997 unsigned int num, den, q, r;
1998
1999 num = (EAX & 0xffff);
2000 den = (t0 & 0xff);
2001 if (den == 0) {
2002 raise_exception(EXCP00_DIVZ);
2003 }
2004 q = (num / den);
2005 if (q > 0xff)
2006 raise_exception(EXCP00_DIVZ);
2007 q &= 0xff;
2008 r = (num % den) & 0xff;
2009 EAX = (EAX & ~0xffff) | (r << 8) | q;
2010}
2011
2012void helper_idivb_AL(target_ulong t0)
2013{
2014 int num, den, q, r;
2015
2016 num = (int16_t)EAX;
2017 den = (int8_t)t0;
2018 if (den == 0) {
2019 raise_exception(EXCP00_DIVZ);
2020 }
2021 q = (num / den);
2022 if (q != (int8_t)q)
2023 raise_exception(EXCP00_DIVZ);
2024 q &= 0xff;
2025 r = (num % den) & 0xff;
2026 EAX = (EAX & ~0xffff) | (r << 8) | q;
2027}
2028
2029void helper_divw_AX(target_ulong t0)
2030{
2031 unsigned int num, den, q, r;
2032
2033 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2034 den = (t0 & 0xffff);
2035 if (den == 0) {
2036 raise_exception(EXCP00_DIVZ);
2037 }
2038 q = (num / den);
2039 if (q > 0xffff)
2040 raise_exception(EXCP00_DIVZ);
2041 q &= 0xffff;
2042 r = (num % den) & 0xffff;
2043 EAX = (EAX & ~0xffff) | q;
2044 EDX = (EDX & ~0xffff) | r;
2045}
2046
2047void helper_idivw_AX(target_ulong t0)
2048{
2049 int num, den, q, r;
2050
2051 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2052 den = (int16_t)t0;
2053 if (den == 0) {
2054 raise_exception(EXCP00_DIVZ);
2055 }
2056 q = (num / den);
2057 if (q != (int16_t)q)
2058 raise_exception(EXCP00_DIVZ);
2059 q &= 0xffff;
2060 r = (num % den) & 0xffff;
2061 EAX = (EAX & ~0xffff) | q;
2062 EDX = (EDX & ~0xffff) | r;
2063}
2064
2065void helper_divl_EAX(target_ulong t0)
2066{
2067 unsigned int den, r;
2068 uint64_t num, q;
2069
2070 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2071 den = t0;
2072 if (den == 0) {
2073 raise_exception(EXCP00_DIVZ);
2074 }
2075 q = (num / den);
2076 r = (num % den);
2077 if (q > 0xffffffff)
2078 raise_exception(EXCP00_DIVZ);
2079 EAX = (uint32_t)q;
2080 EDX = (uint32_t)r;
2081}
2082
2083void helper_idivl_EAX(target_ulong t0)
2084{
2085 int den, r;
2086 int64_t num, q;
2087
2088 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2089 den = t0;
2090 if (den == 0) {
2091 raise_exception(EXCP00_DIVZ);
2092 }
2093 q = (num / den);
2094 r = (num % den);
2095 if (q != (int32_t)q)
2096 raise_exception(EXCP00_DIVZ);
2097 EAX = (uint32_t)q;
2098 EDX = (uint32_t)r;
2099}
2100
2101/* bcd */
2102
2103/* XXX: exception */
2104void helper_aam(int base)
2105{
2106 int al, ah;
2107 al = EAX & 0xff;
2108 ah = al / base;
2109 al = al % base;
2110 EAX = (EAX & ~0xffff) | al | (ah << 8);
2111 CC_DST = al;
2112}
2113
2114void helper_aad(int base)
2115{
2116 int al, ah;
2117 al = EAX & 0xff;
2118 ah = (EAX >> 8) & 0xff;
2119 al = ((ah * base) + al) & 0xff;
2120 EAX = (EAX & ~0xffff) | al;
2121 CC_DST = al;
2122}
2123
2124void helper_aaa(void)
2125{
2126 int icarry;
2127 int al, ah, af;
2128 int eflags;
2129
2130 eflags = cc_table[CC_OP].compute_all();
2131 af = eflags & CC_A;
2132 al = EAX & 0xff;
2133 ah = (EAX >> 8) & 0xff;
2134
2135 icarry = (al > 0xf9);
2136 if (((al & 0x0f) > 9 ) || af) {
2137 al = (al + 6) & 0x0f;
2138 ah = (ah + 1 + icarry) & 0xff;
2139 eflags |= CC_C | CC_A;
2140 } else {
2141 eflags &= ~(CC_C | CC_A);
2142 al &= 0x0f;
2143 }
2144 EAX = (EAX & ~0xffff) | al | (ah << 8);
2145 CC_SRC = eflags;
2146 FORCE_RET();
2147}
2148
2149void helper_aas(void)
2150{
2151 int icarry;
2152 int al, ah, af;
2153 int eflags;
2154
2155 eflags = cc_table[CC_OP].compute_all();
2156 af = eflags & CC_A;
2157 al = EAX & 0xff;
2158 ah = (EAX >> 8) & 0xff;
2159
2160 icarry = (al < 6);
2161 if (((al & 0x0f) > 9 ) || af) {
2162 al = (al - 6) & 0x0f;
2163 ah = (ah - 1 - icarry) & 0xff;
2164 eflags |= CC_C | CC_A;
2165 } else {
2166 eflags &= ~(CC_C | CC_A);
2167 al &= 0x0f;
2168 }
2169 EAX = (EAX & ~0xffff) | al | (ah << 8);
2170 CC_SRC = eflags;
2171 FORCE_RET();
2172}
2173
2174void helper_daa(void)
2175{
2176 int al, af, cf;
2177 int eflags;
2178
2179 eflags = cc_table[CC_OP].compute_all();
2180 cf = eflags & CC_C;
2181 af = eflags & CC_A;
2182 al = EAX & 0xff;
2183
2184 eflags = 0;
2185 if (((al & 0x0f) > 9 ) || af) {
2186 al = (al + 6) & 0xff;
2187 eflags |= CC_A;
2188 }
2189 if ((al > 0x9f) || cf) {
2190 al = (al + 0x60) & 0xff;
2191 eflags |= CC_C;
2192 }
2193 EAX = (EAX & ~0xff) | al;
2194 /* well, speed is not an issue here, so we compute the flags by hand */
2195 eflags |= (al == 0) << 6; /* zf */
2196 eflags |= parity_table[al]; /* pf */
2197 eflags |= (al & 0x80); /* sf */
2198 CC_SRC = eflags;
2199 FORCE_RET();
2200}
2201
2202void helper_das(void)
2203{
2204 int al, al1, af, cf;
2205 int eflags;
2206
2207 eflags = cc_table[CC_OP].compute_all();
2208 cf = eflags & CC_C;
2209 af = eflags & CC_A;
2210 al = EAX & 0xff;
2211
2212 eflags = 0;
2213 al1 = al;
2214 if (((al & 0x0f) > 9 ) || af) {
2215 eflags |= CC_A;
2216 if (al < 6 || cf)
2217 eflags |= CC_C;
2218 al = (al - 6) & 0xff;
2219 }
2220 if ((al1 > 0x99) || cf) {
2221 al = (al - 0x60) & 0xff;
2222 eflags |= CC_C;
2223 }
2224 EAX = (EAX & ~0xff) | al;
2225 /* well, speed is not an issue here, so we compute the flags by hand */
2226 eflags |= (al == 0) << 6; /* zf */
2227 eflags |= parity_table[al]; /* pf */
2228 eflags |= (al & 0x80); /* sf */
2229 CC_SRC = eflags;
2230 FORCE_RET();
2231}
2232
2233void helper_into(int next_eip_addend)
2234{
2235 int eflags;
2236 eflags = cc_table[CC_OP].compute_all();
2237 if (eflags & CC_O) {
2238 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2239 }
2240}
2241
2242void helper_cmpxchg8b(target_ulong a0)
2243{
2244 uint64_t d;
2245 int eflags;
2246
2247 eflags = cc_table[CC_OP].compute_all();
2248 d = ldq(a0);
2249 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2250 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2251 eflags |= CC_Z;
2252 } else {
2253 /* always do the store */
2254 stq(a0, d);
2255 EDX = (uint32_t)(d >> 32);
2256 EAX = (uint32_t)d;
2257 eflags &= ~CC_Z;
2258 }
2259 CC_SRC = eflags;
2260}
2261
2262#ifdef TARGET_X86_64
2263void helper_cmpxchg16b(target_ulong a0)
2264{
2265 uint64_t d0, d1;
2266 int eflags;
2267
2268 if ((a0 & 0xf) != 0)
2269 raise_exception(EXCP0D_GPF);
2270 eflags = cc_table[CC_OP].compute_all();
2271 d0 = ldq(a0);
2272 d1 = ldq(a0 + 8);
2273 if (d0 == EAX && d1 == EDX) {
2274 stq(a0, EBX);
2275 stq(a0 + 8, ECX);
2276 eflags |= CC_Z;
2277 } else {
2278 /* always do the store */
2279 stq(a0, d0);
2280 stq(a0 + 8, d1);
2281 EDX = d1;
2282 EAX = d0;
2283 eflags &= ~CC_Z;
2284 }
2285 CC_SRC = eflags;
2286}
2287#endif
2288
2289void helper_single_step(void)
2290{
2291 env->dr[6] |= 0x4000;
2292 raise_exception(EXCP01_SSTP);
2293}
2294
2295void helper_cpuid(void)
2296{
2297#ifndef VBOX
2298 uint32_t index;
2299
2300 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2301
2302 index = (uint32_t)EAX;
2303 /* test if maximum index reached */
2304 if (index & 0x80000000) {
2305 if (index > env->cpuid_xlevel)
2306 index = env->cpuid_level;
2307 } else {
2308 if (index > env->cpuid_level)
2309 index = env->cpuid_level;
2310 }
2311
2312 switch(index) {
2313 case 0:
2314 EAX = env->cpuid_level;
2315 EBX = env->cpuid_vendor1;
2316 EDX = env->cpuid_vendor2;
2317 ECX = env->cpuid_vendor3;
2318 break;
2319 case 1:
2320 EAX = env->cpuid_version;
2321 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2322 ECX = env->cpuid_ext_features;
2323 EDX = env->cpuid_features;
2324 break;
2325 case 2:
2326 /* cache info: needed for Pentium Pro compatibility */
2327 EAX = 1;
2328 EBX = 0;
2329 ECX = 0;
2330 EDX = 0x2c307d;
2331 break;
2332 case 4:
2333 /* cache info: needed for Core compatibility */
2334 switch (ECX) {
2335 case 0: /* L1 dcache info */
2336 EAX = 0x0000121;
2337 EBX = 0x1c0003f;
2338 ECX = 0x000003f;
2339 EDX = 0x0000001;
2340 break;
2341 case 1: /* L1 icache info */
2342 EAX = 0x0000122;
2343 EBX = 0x1c0003f;
2344 ECX = 0x000003f;
2345 EDX = 0x0000001;
2346 break;
2347 case 2: /* L2 cache info */
2348 EAX = 0x0000143;
2349 EBX = 0x3c0003f;
2350 ECX = 0x0000fff;
2351 EDX = 0x0000001;
2352 break;
2353 default: /* end of info */
2354 EAX = 0;
2355 EBX = 0;
2356 ECX = 0;
2357 EDX = 0;
2358 break;
2359 }
2360
2361 break;
2362 case 5:
2363 /* mwait info: needed for Core compatibility */
2364 EAX = 0; /* Smallest monitor-line size in bytes */
2365 EBX = 0; /* Largest monitor-line size in bytes */
2366 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2367 EDX = 0;
2368 break;
2369 case 6:
2370 /* Thermal and Power Leaf */
2371 EAX = 0;
2372 EBX = 0;
2373 ECX = 0;
2374 EDX = 0;
2375 break;
2376 case 9:
2377 /* Direct Cache Access Information Leaf */
2378 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2379 EBX = 0;
2380 ECX = 0;
2381 EDX = 0;
2382 break;
2383 case 0xA:
2384 /* Architectural Performance Monitoring Leaf */
2385 EAX = 0;
2386 EBX = 0;
2387 ECX = 0;
2388 EDX = 0;
2389 break;
2390 case 0x80000000:
2391 EAX = env->cpuid_xlevel;
2392 EBX = env->cpuid_vendor1;
2393 EDX = env->cpuid_vendor2;
2394 ECX = env->cpuid_vendor3;
2395 break;
2396 case 0x80000001:
2397 EAX = env->cpuid_features;
2398 EBX = 0;
2399 ECX = env->cpuid_ext3_features;
2400 EDX = env->cpuid_ext2_features;
2401 break;
2402 case 0x80000002:
2403 case 0x80000003:
2404 case 0x80000004:
2405 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2406 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2407 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2408 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2409 break;
2410 case 0x80000005:
2411 /* cache info (L1 cache) */
2412 EAX = 0x01ff01ff;
2413 EBX = 0x01ff01ff;
2414 ECX = 0x40020140;
2415 EDX = 0x40020140;
2416 break;
2417 case 0x80000006:
2418 /* cache info (L2 cache) */
2419 EAX = 0;
2420 EBX = 0x42004200;
2421 ECX = 0x02008140;
2422 EDX = 0;
2423 break;
2424 case 0x80000008:
2425 /* virtual & phys address size in low 2 bytes. */
2426/* XXX: This value must match the one used in the MMU code. */
2427 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2428 /* 64 bit processor */
2429#if defined(USE_KQEMU)
2430 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2431#else
2432/* XXX: The physical address space is limited to 42 bits in exec.c. */
2433 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2434#endif
2435 } else {
2436#if defined(USE_KQEMU)
2437 EAX = 0x00000020; /* 32 bits physical */
2438#else
2439 if (env->cpuid_features & CPUID_PSE36)
2440 EAX = 0x00000024; /* 36 bits physical */
2441 else
2442 EAX = 0x00000020; /* 32 bits physical */
2443#endif
2444 }
2445 EBX = 0;
2446 ECX = 0;
2447 EDX = 0;
2448 break;
2449 case 0x8000000A:
2450 EAX = 0x00000001;
2451 EBX = 0;
2452 ECX = 0;
2453 EDX = 0;
2454 break;
2455 default:
2456 /* reserved values: zero */
2457 EAX = 0;
2458 EBX = 0;
2459 ECX = 0;
2460 EDX = 0;
2461 break;
2462 }
2463#else /* VBOX */
2464 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2465#endif /* VBOX */
2466}
2467
2468void helper_enter_level(int level, int data32, target_ulong t1)
2469{
2470 target_ulong ssp;
2471 uint32_t esp_mask, esp, ebp;
2472
2473 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2474 ssp = env->segs[R_SS].base;
2475 ebp = EBP;
2476 esp = ESP;
2477 if (data32) {
2478 /* 32 bit */
2479 esp -= 4;
2480 while (--level) {
2481 esp -= 4;
2482 ebp -= 4;
2483 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2484 }
2485 esp -= 4;
2486 stl(ssp + (esp & esp_mask), t1);
2487 } else {
2488 /* 16 bit */
2489 esp -= 2;
2490 while (--level) {
2491 esp -= 2;
2492 ebp -= 2;
2493 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2494 }
2495 esp -= 2;
2496 stw(ssp + (esp & esp_mask), t1);
2497 }
2498}
2499
2500#ifdef TARGET_X86_64
2501void helper_enter64_level(int level, int data64, target_ulong t1)
2502{
2503 target_ulong esp, ebp;
2504 ebp = EBP;
2505 esp = ESP;
2506
2507 if (data64) {
2508 /* 64 bit */
2509 esp -= 8;
2510 while (--level) {
2511 esp -= 8;
2512 ebp -= 8;
2513 stq(esp, ldq(ebp));
2514 }
2515 esp -= 8;
2516 stq(esp, t1);
2517 } else {
2518 /* 16 bit */
2519 esp -= 2;
2520 while (--level) {
2521 esp -= 2;
2522 ebp -= 2;
2523 stw(esp, lduw(ebp));
2524 }
2525 esp -= 2;
2526 stw(esp, t1);
2527 }
2528}
2529#endif
2530
2531void helper_lldt(int selector)
2532{
2533 SegmentCache *dt;
2534 uint32_t e1, e2;
2535#ifndef VBOX
2536 int index, entry_limit;
2537#else
2538 unsigned int index, entry_limit;
2539#endif
2540 target_ulong ptr;
2541
2542#ifdef VBOX
2543 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2544 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2545#endif
2546
2547 selector &= 0xffff;
2548 if ((selector & 0xfffc) == 0) {
2549 /* XXX: NULL selector case: invalid LDT */
2550 env->ldt.base = 0;
2551 env->ldt.limit = 0;
2552 } else {
2553 if (selector & 0x4)
2554 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2555 dt = &env->gdt;
2556 index = selector & ~7;
2557#ifdef TARGET_X86_64
2558 if (env->hflags & HF_LMA_MASK)
2559 entry_limit = 15;
2560 else
2561#endif
2562 entry_limit = 7;
2563 if ((index + entry_limit) > dt->limit)
2564 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2565 ptr = dt->base + index;
2566 e1 = ldl_kernel(ptr);
2567 e2 = ldl_kernel(ptr + 4);
2568 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2569 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2570 if (!(e2 & DESC_P_MASK))
2571 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2572#ifdef TARGET_X86_64
2573 if (env->hflags & HF_LMA_MASK) {
2574 uint32_t e3;
2575 e3 = ldl_kernel(ptr + 8);
2576 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2577 env->ldt.base |= (target_ulong)e3 << 32;
2578 } else
2579#endif
2580 {
2581 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2582 }
2583 }
2584 env->ldt.selector = selector;
2585#ifdef VBOX
2586 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2587 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2588#endif
2589}
2590
2591void helper_ltr(int selector)
2592{
2593 SegmentCache *dt;
2594 uint32_t e1, e2;
2595#ifndef VBOX
2596 int index, type, entry_limit;
2597#else
2598 unsigned int index;
2599 int type, entry_limit;
2600#endif
2601 target_ulong ptr;
2602
2603#ifdef VBOX
2604 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2605 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2606 env->tr.flags, (RTSEL)(selector & 0xffff)));
2607#endif
2608 selector &= 0xffff;
2609 if ((selector & 0xfffc) == 0) {
2610 /* NULL selector case: invalid TR */
2611 env->tr.base = 0;
2612 env->tr.limit = 0;
2613 env->tr.flags = 0;
2614 } else {
2615 if (selector & 0x4)
2616 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2617 dt = &env->gdt;
2618 index = selector & ~7;
2619#ifdef TARGET_X86_64
2620 if (env->hflags & HF_LMA_MASK)
2621 entry_limit = 15;
2622 else
2623#endif
2624 entry_limit = 7;
2625 if ((index + entry_limit) > dt->limit)
2626 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2627 ptr = dt->base + index;
2628 e1 = ldl_kernel(ptr);
2629 e2 = ldl_kernel(ptr + 4);
2630 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2631 if ((e2 & DESC_S_MASK) ||
2632 (type != 1 && type != 9))
2633 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2634 if (!(e2 & DESC_P_MASK))
2635 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2636#ifdef TARGET_X86_64
2637 if (env->hflags & HF_LMA_MASK) {
2638 uint32_t e3, e4;
2639 e3 = ldl_kernel(ptr + 8);
2640 e4 = ldl_kernel(ptr + 12);
2641 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2642 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2643 load_seg_cache_raw_dt(&env->tr, e1, e2);
2644 env->tr.base |= (target_ulong)e3 << 32;
2645 } else
2646#endif
2647 {
2648 load_seg_cache_raw_dt(&env->tr, e1, e2);
2649 }
2650 e2 |= DESC_TSS_BUSY_MASK;
2651 stl_kernel(ptr + 4, e2);
2652 }
2653 env->tr.selector = selector;
2654#ifdef VBOX
2655 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2656 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2657 env->tr.flags, (RTSEL)(selector & 0xffff)));
2658#endif
2659}
2660
2661/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2662void helper_load_seg(int seg_reg, int selector)
2663{
2664 uint32_t e1, e2;
2665 int cpl, dpl, rpl;
2666 SegmentCache *dt;
2667#ifndef VBOX
2668 int index;
2669#else
2670 unsigned int index;
2671#endif
2672 target_ulong ptr;
2673
2674 selector &= 0xffff;
2675 cpl = env->hflags & HF_CPL_MASK;
2676
2677#ifdef VBOX
2678 /* Trying to load a selector with CPL=1? */
2679 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2680 {
2681 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2682 selector = selector & 0xfffc;
2683 }
2684#endif
2685 if ((selector & 0xfffc) == 0) {
2686 /* null selector case */
2687 if (seg_reg == R_SS
2688#ifdef TARGET_X86_64
2689 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2690#endif
2691 )
2692 raise_exception_err(EXCP0D_GPF, 0);
2693 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2694 } else {
2695
2696 if (selector & 0x4)
2697 dt = &env->ldt;
2698 else
2699 dt = &env->gdt;
2700 index = selector & ~7;
2701 if ((index + 7) > dt->limit)
2702 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2703 ptr = dt->base + index;
2704 e1 = ldl_kernel(ptr);
2705 e2 = ldl_kernel(ptr + 4);
2706
2707 if (!(e2 & DESC_S_MASK))
2708 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2709 rpl = selector & 3;
2710 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2711 if (seg_reg == R_SS) {
2712 /* must be writable segment */
2713 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2714 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2715 if (rpl != cpl || dpl != cpl)
2716 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2717 } else {
2718 /* must be readable segment */
2719 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2720 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2721
2722 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2723 /* if not conforming code, test rights */
2724 if (dpl < cpl || dpl < rpl)
2725 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2726 }
2727 }
2728
2729 if (!(e2 & DESC_P_MASK)) {
2730 if (seg_reg == R_SS)
2731 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2732 else
2733 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2734 }
2735
2736 /* set the access bit if not already set */
2737 if (!(e2 & DESC_A_MASK)) {
2738 e2 |= DESC_A_MASK;
2739 stl_kernel(ptr + 4, e2);
2740 }
2741
2742 cpu_x86_load_seg_cache(env, seg_reg, selector,
2743 get_seg_base(e1, e2),
2744 get_seg_limit(e1, e2),
2745 e2);
2746#if 0
2747 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2748 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2749#endif
2750 }
2751}
2752
2753/* protected mode jump */
2754void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2755 int next_eip_addend)
2756{
2757 int gate_cs, type;
2758 uint32_t e1, e2, cpl, dpl, rpl, limit;
2759 target_ulong next_eip;
2760
2761#ifdef VBOX
2762 e1 = e2 = 0;
2763#endif
2764 if ((new_cs & 0xfffc) == 0)
2765 raise_exception_err(EXCP0D_GPF, 0);
2766 if (load_segment(&e1, &e2, new_cs) != 0)
2767 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2768 cpl = env->hflags & HF_CPL_MASK;
2769 if (e2 & DESC_S_MASK) {
2770 if (!(e2 & DESC_CS_MASK))
2771 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2772 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2773 if (e2 & DESC_C_MASK) {
2774 /* conforming code segment */
2775 if (dpl > cpl)
2776 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2777 } else {
2778 /* non conforming code segment */
2779 rpl = new_cs & 3;
2780 if (rpl > cpl)
2781 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2782 if (dpl != cpl)
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 }
2785 if (!(e2 & DESC_P_MASK))
2786 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2787 limit = get_seg_limit(e1, e2);
2788 if (new_eip > limit &&
2789 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2790 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2791 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2792 get_seg_base(e1, e2), limit, e2);
2793 EIP = new_eip;
2794 } else {
2795 /* jump to call or task gate */
2796 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2797 rpl = new_cs & 3;
2798 cpl = env->hflags & HF_CPL_MASK;
2799 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2800 switch(type) {
2801 case 1: /* 286 TSS */
2802 case 9: /* 386 TSS */
2803 case 5: /* task gate */
2804 if (dpl < cpl || dpl < rpl)
2805 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2806 next_eip = env->eip + next_eip_addend;
2807 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2808 CC_OP = CC_OP_EFLAGS;
2809 break;
2810 case 4: /* 286 call gate */
2811 case 12: /* 386 call gate */
2812 if ((dpl < cpl) || (dpl < rpl))
2813 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2814 if (!(e2 & DESC_P_MASK))
2815 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2816 gate_cs = e1 >> 16;
2817 new_eip = (e1 & 0xffff);
2818 if (type == 12)
2819 new_eip |= (e2 & 0xffff0000);
2820 if (load_segment(&e1, &e2, gate_cs) != 0)
2821 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2822 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2823 /* must be code segment */
2824 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2825 (DESC_S_MASK | DESC_CS_MASK)))
2826 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2827 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2828 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2829 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2830 if (!(e2 & DESC_P_MASK))
2831#ifdef VBOX /* See page 3-514 of 253666.pdf */
2832 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2833#else
2834 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2835#endif
2836 limit = get_seg_limit(e1, e2);
2837 if (new_eip > limit)
2838 raise_exception_err(EXCP0D_GPF, 0);
2839 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2840 get_seg_base(e1, e2), limit, e2);
2841 EIP = new_eip;
2842 break;
2843 default:
2844 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2845 break;
2846 }
2847 }
2848}
2849
2850/* real mode call */
2851void helper_lcall_real(int new_cs, target_ulong new_eip1,
2852 int shift, int next_eip)
2853{
2854 int new_eip;
2855 uint32_t esp, esp_mask;
2856 target_ulong ssp;
2857
2858 new_eip = new_eip1;
2859 esp = ESP;
2860 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2861 ssp = env->segs[R_SS].base;
2862 if (shift) {
2863 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2864 PUSHL(ssp, esp, esp_mask, next_eip);
2865 } else {
2866 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2867 PUSHW(ssp, esp, esp_mask, next_eip);
2868 }
2869
2870 SET_ESP(esp, esp_mask);
2871 env->eip = new_eip;
2872 env->segs[R_CS].selector = new_cs;
2873 env->segs[R_CS].base = (new_cs << 4);
2874}
2875
2876/* protected mode call */
2877void helper_lcall_protected(int new_cs, target_ulong new_eip,
2878 int shift, int next_eip_addend)
2879{
2880 int new_stack, i;
2881 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2882 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2883 uint32_t val, limit, old_sp_mask;
2884 target_ulong ssp, old_ssp, next_eip;
2885
2886#ifdef VBOX
2887 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2888#endif
2889 next_eip = env->eip + next_eip_addend;
2890#ifdef DEBUG_PCALL
2891 if (loglevel & CPU_LOG_PCALL) {
2892 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2893 new_cs, (uint32_t)new_eip, shift);
2894 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2895 }
2896#endif
2897 if ((new_cs & 0xfffc) == 0)
2898 raise_exception_err(EXCP0D_GPF, 0);
2899 if (load_segment(&e1, &e2, new_cs) != 0)
2900 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2901 cpl = env->hflags & HF_CPL_MASK;
2902#ifdef DEBUG_PCALL
2903 if (loglevel & CPU_LOG_PCALL) {
2904 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2905 }
2906#endif
2907 if (e2 & DESC_S_MASK) {
2908 if (!(e2 & DESC_CS_MASK))
2909 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2911 if (e2 & DESC_C_MASK) {
2912 /* conforming code segment */
2913 if (dpl > cpl)
2914 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2915 } else {
2916 /* non conforming code segment */
2917 rpl = new_cs & 3;
2918 if (rpl > cpl)
2919 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2920 if (dpl != cpl)
2921 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2922 }
2923 if (!(e2 & DESC_P_MASK))
2924 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2925
2926#ifdef TARGET_X86_64
2927 /* XXX: check 16/32 bit cases in long mode */
2928 if (shift == 2) {
2929 target_ulong rsp;
2930 /* 64 bit case */
2931 rsp = ESP;
2932 PUSHQ(rsp, env->segs[R_CS].selector);
2933 PUSHQ(rsp, next_eip);
2934 /* from this point, not restartable */
2935 ESP = rsp;
2936 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2937 get_seg_base(e1, e2),
2938 get_seg_limit(e1, e2), e2);
2939 EIP = new_eip;
2940 } else
2941#endif
2942 {
2943 sp = ESP;
2944 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2945 ssp = env->segs[R_SS].base;
2946 if (shift) {
2947 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2948 PUSHL(ssp, sp, sp_mask, next_eip);
2949 } else {
2950 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2951 PUSHW(ssp, sp, sp_mask, next_eip);
2952 }
2953
2954 limit = get_seg_limit(e1, e2);
2955 if (new_eip > limit)
2956 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2957 /* from this point, not restartable */
2958 SET_ESP(sp, sp_mask);
2959 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2960 get_seg_base(e1, e2), limit, e2);
2961 EIP = new_eip;
2962 }
2963 } else {
2964 /* check gate type */
2965 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2966 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2967 rpl = new_cs & 3;
2968 switch(type) {
2969 case 1: /* available 286 TSS */
2970 case 9: /* available 386 TSS */
2971 case 5: /* task gate */
2972 if (dpl < cpl || dpl < rpl)
2973 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2974 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2975 CC_OP = CC_OP_EFLAGS;
2976 return;
2977 case 4: /* 286 call gate */
2978 case 12: /* 386 call gate */
2979 break;
2980 default:
2981 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2982 break;
2983 }
2984 shift = type >> 3;
2985
2986 if (dpl < cpl || dpl < rpl)
2987 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2988 /* check valid bit */
2989 if (!(e2 & DESC_P_MASK))
2990 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2991 selector = e1 >> 16;
2992 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2993 param_count = e2 & 0x1f;
2994 if ((selector & 0xfffc) == 0)
2995 raise_exception_err(EXCP0D_GPF, 0);
2996
2997 if (load_segment(&e1, &e2, selector) != 0)
2998 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2999 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3000 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3001 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3002 if (dpl > cpl)
3003 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3004 if (!(e2 & DESC_P_MASK))
3005 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3006
3007 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3008 /* to inner privilege */
3009 get_ss_esp_from_tss(&ss, &sp, dpl);
3010#ifdef DEBUG_PCALL
3011 if (loglevel & CPU_LOG_PCALL)
3012 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3013 ss, sp, param_count, ESP);
3014#endif
3015 if ((ss & 0xfffc) == 0)
3016 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3017 if ((ss & 3) != dpl)
3018 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3019 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3020 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3021 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3022 if (ss_dpl != dpl)
3023 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3024 if (!(ss_e2 & DESC_S_MASK) ||
3025 (ss_e2 & DESC_CS_MASK) ||
3026 !(ss_e2 & DESC_W_MASK))
3027 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3028 if (!(ss_e2 & DESC_P_MASK))
3029#ifdef VBOX /* See page 3-99 of 253666.pdf */
3030 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3031#else
3032 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3033#endif
3034
3035 // push_size = ((param_count * 2) + 8) << shift;
3036
3037 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3038 old_ssp = env->segs[R_SS].base;
3039
3040 sp_mask = get_sp_mask(ss_e2);
3041 ssp = get_seg_base(ss_e1, ss_e2);
3042 if (shift) {
3043 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3044 PUSHL(ssp, sp, sp_mask, ESP);
3045 for(i = param_count - 1; i >= 0; i--) {
3046 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3047 PUSHL(ssp, sp, sp_mask, val);
3048 }
3049 } else {
3050 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3051 PUSHW(ssp, sp, sp_mask, ESP);
3052 for(i = param_count - 1; i >= 0; i--) {
3053 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3054 PUSHW(ssp, sp, sp_mask, val);
3055 }
3056 }
3057 new_stack = 1;
3058 } else {
3059 /* to same privilege */
3060 sp = ESP;
3061 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3062 ssp = env->segs[R_SS].base;
3063 // push_size = (4 << shift);
3064 new_stack = 0;
3065 }
3066
3067 if (shift) {
3068 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3069 PUSHL(ssp, sp, sp_mask, next_eip);
3070 } else {
3071 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3072 PUSHW(ssp, sp, sp_mask, next_eip);
3073 }
3074
3075 /* from this point, not restartable */
3076
3077 if (new_stack) {
3078 ss = (ss & ~3) | dpl;
3079 cpu_x86_load_seg_cache(env, R_SS, ss,
3080 ssp,
3081 get_seg_limit(ss_e1, ss_e2),
3082 ss_e2);
3083 }
3084
3085 selector = (selector & ~3) | dpl;
3086 cpu_x86_load_seg_cache(env, R_CS, selector,
3087 get_seg_base(e1, e2),
3088 get_seg_limit(e1, e2),
3089 e2);
3090 cpu_x86_set_cpl(env, dpl);
3091 SET_ESP(sp, sp_mask);
3092 EIP = offset;
3093 }
3094#ifdef USE_KQEMU
3095 if (kqemu_is_ok(env)) {
3096 env->exception_index = -1;
3097 cpu_loop_exit();
3098 }
3099#endif
3100}
3101
3102/* real and vm86 mode iret */
3103void helper_iret_real(int shift)
3104{
3105 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3106 target_ulong ssp;
3107 int eflags_mask;
3108#ifdef VBOX
3109 bool fVME = false;
3110
3111 remR3TrapClear(env->pVM);
3112#endif /* VBOX */
3113
3114 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3115 sp = ESP;
3116 ssp = env->segs[R_SS].base;
3117 if (shift == 1) {
3118 /* 32 bits */
3119 POPL(ssp, sp, sp_mask, new_eip);
3120 POPL(ssp, sp, sp_mask, new_cs);
3121 new_cs &= 0xffff;
3122 POPL(ssp, sp, sp_mask, new_eflags);
3123 } else {
3124 /* 16 bits */
3125 POPW(ssp, sp, sp_mask, new_eip);
3126 POPW(ssp, sp, sp_mask, new_cs);
3127 POPW(ssp, sp, sp_mask, new_eflags);
3128 }
3129#ifdef VBOX
3130 if ( (env->eflags & VM_MASK)
3131 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3132 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3133 {
3134 fVME = true;
3135 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3136 /* if TF will be set -> #GP */
3137 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3138 || (new_eflags & TF_MASK))
3139 raise_exception(EXCP0D_GPF);
3140 }
3141#endif /* VBOX */
3142 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3143 env->segs[R_CS].selector = new_cs;
3144 env->segs[R_CS].base = (new_cs << 4);
3145 env->eip = new_eip;
3146#ifdef VBOX
3147 if (fVME)
3148 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3149 else
3150#endif
3151 if (env->eflags & VM_MASK)
3152 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3153 else
3154 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3155 if (shift == 0)
3156 eflags_mask &= 0xffff;
3157 load_eflags(new_eflags, eflags_mask);
3158 env->hflags2 &= ~HF2_NMI_MASK;
3159#ifdef VBOX
3160 if (fVME)
3161 {
3162 if (new_eflags & IF_MASK)
3163 env->eflags |= VIF_MASK;
3164 else
3165 env->eflags &= ~VIF_MASK;
3166 }
3167#endif /* VBOX */
3168}
3169
3170#ifndef VBOX
3171static inline void validate_seg(int seg_reg, int cpl)
3172#else /* VBOX */
3173DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3174#endif /* VBOX */
3175{
3176 int dpl;
3177 uint32_t e2;
3178
3179 /* XXX: on x86_64, we do not want to nullify FS and GS because
3180 they may still contain a valid base. I would be interested to
3181 know how a real x86_64 CPU behaves */
3182 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3183 (env->segs[seg_reg].selector & 0xfffc) == 0)
3184 return;
3185
3186 e2 = env->segs[seg_reg].flags;
3187 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3188 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3189 /* data or non conforming code segment */
3190 if (dpl < cpl) {
3191 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3192 }
3193 }
3194}
3195
3196/* protected mode iret */
3197#ifndef VBOX
3198static inline void helper_ret_protected(int shift, int is_iret, int addend)
3199#else /* VBOX */
3200DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3201#endif /* VBOX */
3202{
3203 uint32_t new_cs, new_eflags, new_ss;
3204 uint32_t new_es, new_ds, new_fs, new_gs;
3205 uint32_t e1, e2, ss_e1, ss_e2;
3206 int cpl, dpl, rpl, eflags_mask, iopl;
3207 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3208
3209#ifdef VBOX
3210 ss_e1 = ss_e2 = e1 = e2 = 0;
3211#endif
3212
3213#ifdef TARGET_X86_64
3214 if (shift == 2)
3215 sp_mask = -1;
3216 else
3217#endif
3218 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3219 sp = ESP;
3220 ssp = env->segs[R_SS].base;
3221 new_eflags = 0; /* avoid warning */
3222#ifdef TARGET_X86_64
3223 if (shift == 2) {
3224 POPQ(sp, new_eip);
3225 POPQ(sp, new_cs);
3226 new_cs &= 0xffff;
3227 if (is_iret) {
3228 POPQ(sp, new_eflags);
3229 }
3230 } else
3231#endif
3232 if (shift == 1) {
3233 /* 32 bits */
3234 POPL(ssp, sp, sp_mask, new_eip);
3235 POPL(ssp, sp, sp_mask, new_cs);
3236 new_cs &= 0xffff;
3237 if (is_iret) {
3238 POPL(ssp, sp, sp_mask, new_eflags);
3239#if defined(VBOX) && defined(DEBUG)
3240 printf("iret: new CS %04X\n", new_cs);
3241 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3242 printf("iret: new EFLAGS %08X\n", new_eflags);
3243 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3244#endif
3245 if (new_eflags & VM_MASK)
3246 goto return_to_vm86;
3247 }
3248#ifdef VBOX
3249 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3250 {
3251#ifdef DEBUG
3252 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3253#endif
3254 new_cs = new_cs & 0xfffc;
3255 }
3256#endif
3257 } else {
3258 /* 16 bits */
3259 POPW(ssp, sp, sp_mask, new_eip);
3260 POPW(ssp, sp, sp_mask, new_cs);
3261 if (is_iret)
3262 POPW(ssp, sp, sp_mask, new_eflags);
3263 }
3264#ifdef DEBUG_PCALL
3265 if (loglevel & CPU_LOG_PCALL) {
3266 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3267 new_cs, new_eip, shift, addend);
3268 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3269 }
3270#endif
3271 if ((new_cs & 0xfffc) == 0)
3272 {
3273#if defined(VBOX) && defined(DEBUG)
3274 printf("new_cs & 0xfffc) == 0\n");
3275#endif
3276 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3277 }
3278 if (load_segment(&e1, &e2, new_cs) != 0)
3279 {
3280#if defined(VBOX) && defined(DEBUG)
3281 printf("load_segment failed\n");
3282#endif
3283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3284 }
3285 if (!(e2 & DESC_S_MASK) ||
3286 !(e2 & DESC_CS_MASK))
3287 {
3288#if defined(VBOX) && defined(DEBUG)
3289 printf("e2 mask %08x\n", e2);
3290#endif
3291 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3292 }
3293 cpl = env->hflags & HF_CPL_MASK;
3294 rpl = new_cs & 3;
3295 if (rpl < cpl)
3296 {
3297#if defined(VBOX) && defined(DEBUG)
3298 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3299#endif
3300 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3301 }
3302 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3303 if (e2 & DESC_C_MASK) {
3304 if (dpl > rpl)
3305 {
3306#if defined(VBOX) && defined(DEBUG)
3307 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3308#endif
3309 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3310 }
3311 } else {
3312 if (dpl != rpl)
3313 {
3314#if defined(VBOX) && defined(DEBUG)
3315 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3316#endif
3317 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3318 }
3319 }
3320 if (!(e2 & DESC_P_MASK))
3321 {
3322#if defined(VBOX) && defined(DEBUG)
3323 printf("DESC_P_MASK e2=%08x\n", e2);
3324#endif
3325 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3326 }
3327
3328 sp += addend;
3329 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3330 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3331 /* return to same privilege level */
3332 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3333 get_seg_base(e1, e2),
3334 get_seg_limit(e1, e2),
3335 e2);
3336 } else {
3337 /* return to different privilege level */
3338#ifdef TARGET_X86_64
3339 if (shift == 2) {
3340 POPQ(sp, new_esp);
3341 POPQ(sp, new_ss);
3342 new_ss &= 0xffff;
3343 } else
3344#endif
3345 if (shift == 1) {
3346 /* 32 bits */
3347 POPL(ssp, sp, sp_mask, new_esp);
3348 POPL(ssp, sp, sp_mask, new_ss);
3349 new_ss &= 0xffff;
3350 } else {
3351 /* 16 bits */
3352 POPW(ssp, sp, sp_mask, new_esp);
3353 POPW(ssp, sp, sp_mask, new_ss);
3354 }
3355#ifdef DEBUG_PCALL
3356 if (loglevel & CPU_LOG_PCALL) {
3357 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3358 new_ss, new_esp);
3359 }
3360#endif
3361 if ((new_ss & 0xfffc) == 0) {
3362#ifdef TARGET_X86_64
3363 /* NULL ss is allowed in long mode if cpl != 3*/
3364 /* XXX: test CS64 ? */
3365 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3366 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3367 0, 0xffffffff,
3368 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3369 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3370 DESC_W_MASK | DESC_A_MASK);
3371 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3372 } else
3373#endif
3374 {
3375 raise_exception_err(EXCP0D_GPF, 0);
3376 }
3377 } else {
3378 if ((new_ss & 3) != rpl)
3379 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3380 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3381 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3382 if (!(ss_e2 & DESC_S_MASK) ||
3383 (ss_e2 & DESC_CS_MASK) ||
3384 !(ss_e2 & DESC_W_MASK))
3385 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3386 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3387 if (dpl != rpl)
3388 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3389 if (!(ss_e2 & DESC_P_MASK))
3390 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3391 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3392 get_seg_base(ss_e1, ss_e2),
3393 get_seg_limit(ss_e1, ss_e2),
3394 ss_e2);
3395 }
3396
3397 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3398 get_seg_base(e1, e2),
3399 get_seg_limit(e1, e2),
3400 e2);
3401 cpu_x86_set_cpl(env, rpl);
3402 sp = new_esp;
3403#ifdef TARGET_X86_64
3404 if (env->hflags & HF_CS64_MASK)
3405 sp_mask = -1;
3406 else
3407#endif
3408 sp_mask = get_sp_mask(ss_e2);
3409
3410 /* validate data segments */
3411 validate_seg(R_ES, rpl);
3412 validate_seg(R_DS, rpl);
3413 validate_seg(R_FS, rpl);
3414 validate_seg(R_GS, rpl);
3415
3416 sp += addend;
3417 }
3418 SET_ESP(sp, sp_mask);
3419 env->eip = new_eip;
3420 if (is_iret) {
3421 /* NOTE: 'cpl' is the _old_ CPL */
3422 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3423 if (cpl == 0)
3424#ifdef VBOX
3425 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3426#else
3427 eflags_mask |= IOPL_MASK;
3428#endif
3429 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3430 if (cpl <= iopl)
3431 eflags_mask |= IF_MASK;
3432 if (shift == 0)
3433 eflags_mask &= 0xffff;
3434 load_eflags(new_eflags, eflags_mask);
3435 }
3436 return;
3437
3438 return_to_vm86:
3439 POPL(ssp, sp, sp_mask, new_esp);
3440 POPL(ssp, sp, sp_mask, new_ss);
3441 POPL(ssp, sp, sp_mask, new_es);
3442 POPL(ssp, sp, sp_mask, new_ds);
3443 POPL(ssp, sp, sp_mask, new_fs);
3444 POPL(ssp, sp, sp_mask, new_gs);
3445
3446 /* modify processor state */
3447 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3448 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3449 load_seg_vm(R_CS, new_cs & 0xffff);
3450 cpu_x86_set_cpl(env, 3);
3451 load_seg_vm(R_SS, new_ss & 0xffff);
3452 load_seg_vm(R_ES, new_es & 0xffff);
3453 load_seg_vm(R_DS, new_ds & 0xffff);
3454 load_seg_vm(R_FS, new_fs & 0xffff);
3455 load_seg_vm(R_GS, new_gs & 0xffff);
3456
3457 env->eip = new_eip & 0xffff;
3458 ESP = new_esp;
3459}
3460
3461void helper_iret_protected(int shift, int next_eip)
3462{
3463 int tss_selector, type;
3464 uint32_t e1, e2;
3465
3466#ifdef VBOX
3467 e1 = e2 = 0;
3468 remR3TrapClear(env->pVM);
3469#endif
3470
3471 /* specific case for TSS */
3472 if (env->eflags & NT_MASK) {
3473#ifdef TARGET_X86_64
3474 if (env->hflags & HF_LMA_MASK)
3475 raise_exception_err(EXCP0D_GPF, 0);
3476#endif
3477 tss_selector = lduw_kernel(env->tr.base + 0);
3478 if (tss_selector & 4)
3479 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3480 if (load_segment(&e1, &e2, tss_selector) != 0)
3481 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3482 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3483 /* NOTE: we check both segment and busy TSS */
3484 if (type != 3)
3485 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3486 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3487 } else {
3488 helper_ret_protected(shift, 1, 0);
3489 }
3490 env->hflags2 &= ~HF2_NMI_MASK;
3491#ifdef USE_KQEMU
3492 if (kqemu_is_ok(env)) {
3493 CC_OP = CC_OP_EFLAGS;
3494 env->exception_index = -1;
3495 cpu_loop_exit();
3496 }
3497#endif
3498}
3499
3500void helper_lret_protected(int shift, int addend)
3501{
3502 helper_ret_protected(shift, 0, addend);
3503#ifdef USE_KQEMU
3504 if (kqemu_is_ok(env)) {
3505 env->exception_index = -1;
3506 cpu_loop_exit();
3507 }
3508#endif
3509}
3510
3511void helper_sysenter(void)
3512{
3513 if (env->sysenter_cs == 0) {
3514 raise_exception_err(EXCP0D_GPF, 0);
3515 }
3516 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3517 cpu_x86_set_cpl(env, 0);
3518
3519#ifdef TARGET_X86_64
3520 if (env->hflags & HF_LMA_MASK) {
3521 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3522 0, 0xffffffff,
3523 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3524 DESC_S_MASK |
3525 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3526 } else
3527#endif
3528 {
3529 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3530 0, 0xffffffff,
3531 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3532 DESC_S_MASK |
3533 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3534 }
3535 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3536 0, 0xffffffff,
3537 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3538 DESC_S_MASK |
3539 DESC_W_MASK | DESC_A_MASK);
3540 ESP = env->sysenter_esp;
3541 EIP = env->sysenter_eip;
3542}
3543
3544void helper_sysexit(int dflag)
3545{
3546 int cpl;
3547
3548 cpl = env->hflags & HF_CPL_MASK;
3549 if (env->sysenter_cs == 0 || cpl != 0) {
3550 raise_exception_err(EXCP0D_GPF, 0);
3551 }
3552 cpu_x86_set_cpl(env, 3);
3553#ifdef TARGET_X86_64
3554 if (dflag == 2) {
3555 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3556 0, 0xffffffff,
3557 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3558 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3559 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3560 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3561 0, 0xffffffff,
3562 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3563 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3564 DESC_W_MASK | DESC_A_MASK);
3565 } else
3566#endif
3567 {
3568 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3569 0, 0xffffffff,
3570 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3571 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3572 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3573 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3574 0, 0xffffffff,
3575 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3576 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3577 DESC_W_MASK | DESC_A_MASK);
3578 }
3579 ESP = ECX;
3580 EIP = EDX;
3581#ifdef USE_KQEMU
3582 if (kqemu_is_ok(env)) {
3583 env->exception_index = -1;
3584 cpu_loop_exit();
3585 }
3586#endif
3587}
3588
3589#if defined(CONFIG_USER_ONLY)
3590target_ulong helper_read_crN(int reg)
3591{
3592 return 0;
3593}
3594
3595void helper_write_crN(int reg, target_ulong t0)
3596{
3597}
3598#else
3599target_ulong helper_read_crN(int reg)
3600{
3601 target_ulong val;
3602
3603 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3604 switch(reg) {
3605 default:
3606 val = env->cr[reg];
3607 break;
3608 case 8:
3609 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3610 val = cpu_get_apic_tpr(env);
3611 } else {
3612 val = env->v_tpr;
3613 }
3614 break;
3615 }
3616 return val;
3617}
3618
3619void helper_write_crN(int reg, target_ulong t0)
3620{
3621 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3622 switch(reg) {
3623 case 0:
3624 cpu_x86_update_cr0(env, t0);
3625 break;
3626 case 3:
3627 cpu_x86_update_cr3(env, t0);
3628 break;
3629 case 4:
3630 cpu_x86_update_cr4(env, t0);
3631 break;
3632 case 8:
3633 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3634 cpu_set_apic_tpr(env, t0);
3635 }
3636 env->v_tpr = t0 & 0x0f;
3637 break;
3638 default:
3639 env->cr[reg] = t0;
3640 break;
3641 }
3642}
3643#endif
3644
3645void helper_lmsw(target_ulong t0)
3646{
3647 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3648 if already set to one. */
3649 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3650 helper_write_crN(0, t0);
3651}
3652
3653void helper_clts(void)
3654{
3655 env->cr[0] &= ~CR0_TS_MASK;
3656 env->hflags &= ~HF_TS_MASK;
3657}
3658
3659/* XXX: do more */
3660void helper_movl_drN_T0(int reg, target_ulong t0)
3661{
3662 env->dr[reg] = t0;
3663}
3664
3665void helper_invlpg(target_ulong addr)
3666{
3667 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3668 tlb_flush_page(env, addr);
3669}
3670
3671void helper_rdtsc(void)
3672{
3673 uint64_t val;
3674
3675 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3676 raise_exception(EXCP0D_GPF);
3677 }
3678 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3679
3680 val = cpu_get_tsc(env) + env->tsc_offset;
3681 EAX = (uint32_t)(val);
3682 EDX = (uint32_t)(val >> 32);
3683}
3684
3685#ifdef VBOX
3686void helper_rdtscp(void)
3687{
3688 uint64_t val;
3689 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3690 raise_exception(EXCP0D_GPF);
3691 }
3692
3693 val = cpu_get_tsc(env);
3694 EAX = (uint32_t)(val);
3695 EDX = (uint32_t)(val >> 32);
3696 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3697}
3698#endif
3699
3700void helper_rdpmc(void)
3701{
3702 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3703 raise_exception(EXCP0D_GPF);
3704 }
3705 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3706
3707 /* currently unimplemented */
3708 raise_exception_err(EXCP06_ILLOP, 0);
3709}
3710
3711#if defined(CONFIG_USER_ONLY)
3712void helper_wrmsr(void)
3713{
3714}
3715
3716void helper_rdmsr(void)
3717{
3718}
3719#else
3720void helper_wrmsr(void)
3721{
3722 uint64_t val;
3723
3724 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3725
3726 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3727
3728 switch((uint32_t)ECX) {
3729 case MSR_IA32_SYSENTER_CS:
3730 env->sysenter_cs = val & 0xffff;
3731 break;
3732 case MSR_IA32_SYSENTER_ESP:
3733 env->sysenter_esp = val;
3734 break;
3735 case MSR_IA32_SYSENTER_EIP:
3736 env->sysenter_eip = val;
3737 break;
3738 case MSR_IA32_APICBASE:
3739 cpu_set_apic_base(env, val);
3740 break;
3741 case MSR_EFER:
3742 {
3743 uint64_t update_mask;
3744 update_mask = 0;
3745 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3746 update_mask |= MSR_EFER_SCE;
3747 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3748 update_mask |= MSR_EFER_LME;
3749 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3750 update_mask |= MSR_EFER_FFXSR;
3751 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3752 update_mask |= MSR_EFER_NXE;
3753 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3754 update_mask |= MSR_EFER_SVME;
3755 cpu_load_efer(env, (env->efer & ~update_mask) |
3756 (val & update_mask));
3757 }
3758 break;
3759 case MSR_STAR:
3760 env->star = val;
3761 break;
3762 case MSR_PAT:
3763 env->pat = val;
3764 break;
3765 case MSR_VM_HSAVE_PA:
3766 env->vm_hsave = val;
3767 break;
3768#ifdef TARGET_X86_64
3769 case MSR_LSTAR:
3770 env->lstar = val;
3771 break;
3772 case MSR_CSTAR:
3773 env->cstar = val;
3774 break;
3775 case MSR_FMASK:
3776 env->fmask = val;
3777 break;
3778 case MSR_FSBASE:
3779 env->segs[R_FS].base = val;
3780 break;
3781 case MSR_GSBASE:
3782 env->segs[R_GS].base = val;
3783 break;
3784 case MSR_KERNELGSBASE:
3785 env->kernelgsbase = val;
3786 break;
3787#endif
3788 default:
3789#ifndef VBOX
3790 /* XXX: exception ? */
3791 break;
3792#else /* VBOX */
3793 {
3794 uint32_t ecx = (uint32_t)ECX;
3795 /* In X2APIC specification this range is reserved for APIC control. */
3796 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3797 cpu_apic_wrmsr(env, ecx, val);
3798 /** @todo else exception? */
3799 break;
3800 }
3801 case MSR_K8_TSC_AUX:
3802 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3803 break;
3804#endif /* VBOX */
3805 }
3806}
3807
3808void helper_rdmsr(void)
3809{
3810 uint64_t val;
3811
3812 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3813
3814 switch((uint32_t)ECX) {
3815 case MSR_IA32_SYSENTER_CS:
3816 val = env->sysenter_cs;
3817 break;
3818 case MSR_IA32_SYSENTER_ESP:
3819 val = env->sysenter_esp;
3820 break;
3821 case MSR_IA32_SYSENTER_EIP:
3822 val = env->sysenter_eip;
3823 break;
3824 case MSR_IA32_APICBASE:
3825 val = cpu_get_apic_base(env);
3826 break;
3827 case MSR_EFER:
3828 val = env->efer;
3829 break;
3830 case MSR_STAR:
3831 val = env->star;
3832 break;
3833 case MSR_PAT:
3834 val = env->pat;
3835 break;
3836 case MSR_VM_HSAVE_PA:
3837 val = env->vm_hsave;
3838 break;
3839 case MSR_IA32_PERF_STATUS:
3840 /* tsc_increment_by_tick */
3841 val = 1000ULL;
3842 /* CPU multiplier */
3843 val |= (((uint64_t)4ULL) << 40);
3844 break;
3845#ifdef TARGET_X86_64
3846 case MSR_LSTAR:
3847 val = env->lstar;
3848 break;
3849 case MSR_CSTAR:
3850 val = env->cstar;
3851 break;
3852 case MSR_FMASK:
3853 val = env->fmask;
3854 break;
3855 case MSR_FSBASE:
3856 val = env->segs[R_FS].base;
3857 break;
3858 case MSR_GSBASE:
3859 val = env->segs[R_GS].base;
3860 break;
3861 case MSR_KERNELGSBASE:
3862 val = env->kernelgsbase;
3863 break;
3864#endif
3865#ifdef USE_KQEMU
3866 case MSR_QPI_COMMBASE:
3867 if (env->kqemu_enabled) {
3868 val = kqemu_comm_base;
3869 } else {
3870 val = 0;
3871 }
3872 break;
3873#endif
3874 default:
3875#ifndef VBOX
3876 /* XXX: exception ? */
3877 val = 0;
3878 break;
3879#else /* VBOX */
3880 {
3881 uint32_t ecx = (uint32_t)ECX;
3882 /* In X2APIC specification this range is reserved for APIC control. */
3883 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3884 val = cpu_apic_rdmsr(env, ecx);
3885 else
3886 val = 0; /** @todo else exception? */
3887 break;
3888 }
3889 case MSR_IA32_TSC:
3890 case MSR_K8_TSC_AUX:
3891 val = cpu_rdmsr(env, (uint32_t)ECX);
3892 break;
3893#endif /* VBOX */
3894 }
3895 EAX = (uint32_t)(val);
3896 EDX = (uint32_t)(val >> 32);
3897}
3898#endif
3899
3900target_ulong helper_lsl(target_ulong selector1)
3901{
3902 unsigned int limit;
3903 uint32_t e1, e2, eflags, selector;
3904 int rpl, dpl, cpl, type;
3905
3906 selector = selector1 & 0xffff;
3907 eflags = cc_table[CC_OP].compute_all();
3908 if (load_segment(&e1, &e2, selector) != 0)
3909 goto fail;
3910 rpl = selector & 3;
3911 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3912 cpl = env->hflags & HF_CPL_MASK;
3913 if (e2 & DESC_S_MASK) {
3914 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3915 /* conforming */
3916 } else {
3917 if (dpl < cpl || dpl < rpl)
3918 goto fail;
3919 }
3920 } else {
3921 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3922 switch(type) {
3923 case 1:
3924 case 2:
3925 case 3:
3926 case 9:
3927 case 11:
3928 break;
3929 default:
3930 goto fail;
3931 }
3932 if (dpl < cpl || dpl < rpl) {
3933 fail:
3934 CC_SRC = eflags & ~CC_Z;
3935 return 0;
3936 }
3937 }
3938 limit = get_seg_limit(e1, e2);
3939 CC_SRC = eflags | CC_Z;
3940 return limit;
3941}
3942
3943target_ulong helper_lar(target_ulong selector1)
3944{
3945 uint32_t e1, e2, eflags, selector;
3946 int rpl, dpl, cpl, type;
3947
3948 selector = selector1 & 0xffff;
3949 eflags = cc_table[CC_OP].compute_all();
3950 if ((selector & 0xfffc) == 0)
3951 goto fail;
3952 if (load_segment(&e1, &e2, selector) != 0)
3953 goto fail;
3954 rpl = selector & 3;
3955 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3956 cpl = env->hflags & HF_CPL_MASK;
3957 if (e2 & DESC_S_MASK) {
3958 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3959 /* conforming */
3960 } else {
3961 if (dpl < cpl || dpl < rpl)
3962 goto fail;
3963 }
3964 } else {
3965 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3966 switch(type) {
3967 case 1:
3968 case 2:
3969 case 3:
3970 case 4:
3971 case 5:
3972 case 9:
3973 case 11:
3974 case 12:
3975 break;
3976 default:
3977 goto fail;
3978 }
3979 if (dpl < cpl || dpl < rpl) {
3980 fail:
3981 CC_SRC = eflags & ~CC_Z;
3982 return 0;
3983 }
3984 }
3985 CC_SRC = eflags | CC_Z;
3986 return e2 & 0x00f0ff00;
3987}
3988
3989void helper_verr(target_ulong selector1)
3990{
3991 uint32_t e1, e2, eflags, selector;
3992 int rpl, dpl, cpl;
3993
3994 selector = selector1 & 0xffff;
3995 eflags = cc_table[CC_OP].compute_all();
3996 if ((selector & 0xfffc) == 0)
3997 goto fail;
3998 if (load_segment(&e1, &e2, selector) != 0)
3999 goto fail;
4000 if (!(e2 & DESC_S_MASK))
4001 goto fail;
4002 rpl = selector & 3;
4003 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4004 cpl = env->hflags & HF_CPL_MASK;
4005 if (e2 & DESC_CS_MASK) {
4006 if (!(e2 & DESC_R_MASK))
4007 goto fail;
4008 if (!(e2 & DESC_C_MASK)) {
4009 if (dpl < cpl || dpl < rpl)
4010 goto fail;
4011 }
4012 } else {
4013 if (dpl < cpl || dpl < rpl) {
4014 fail:
4015 CC_SRC = eflags & ~CC_Z;
4016 return;
4017 }
4018 }
4019 CC_SRC = eflags | CC_Z;
4020}
4021
4022void helper_verw(target_ulong selector1)
4023{
4024 uint32_t e1, e2, eflags, selector;
4025 int rpl, dpl, cpl;
4026
4027 selector = selector1 & 0xffff;
4028 eflags = cc_table[CC_OP].compute_all();
4029 if ((selector & 0xfffc) == 0)
4030 goto fail;
4031 if (load_segment(&e1, &e2, selector) != 0)
4032 goto fail;
4033 if (!(e2 & DESC_S_MASK))
4034 goto fail;
4035 rpl = selector & 3;
4036 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4037 cpl = env->hflags & HF_CPL_MASK;
4038 if (e2 & DESC_CS_MASK) {
4039 goto fail;
4040 } else {
4041 if (dpl < cpl || dpl < rpl)
4042 goto fail;
4043 if (!(e2 & DESC_W_MASK)) {
4044 fail:
4045 CC_SRC = eflags & ~CC_Z;
4046 return;
4047 }
4048 }
4049 CC_SRC = eflags | CC_Z;
4050}
4051
4052/* x87 FPU helpers */
4053
4054static void fpu_set_exception(int mask)
4055{
4056 env->fpus |= mask;
4057 if (env->fpus & (~env->fpuc & FPUC_EM))
4058 env->fpus |= FPUS_SE | FPUS_B;
4059}
4060
4061#ifndef VBOX
4062static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4063#else /* VBOX */
4064DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4065#endif /* VBOX */
4066{
4067 if (b == 0.0)
4068 fpu_set_exception(FPUS_ZE);
4069 return a / b;
4070}
4071
4072void fpu_raise_exception(void)
4073{
4074 if (env->cr[0] & CR0_NE_MASK) {
4075 raise_exception(EXCP10_COPR);
4076 }
4077#if !defined(CONFIG_USER_ONLY)
4078 else {
4079 cpu_set_ferr(env);
4080 }
4081#endif
4082}
4083
4084void helper_flds_FT0(uint32_t val)
4085{
4086 union {
4087 float32 f;
4088 uint32_t i;
4089 } u;
4090 u.i = val;
4091 FT0 = float32_to_floatx(u.f, &env->fp_status);
4092}
4093
4094void helper_fldl_FT0(uint64_t val)
4095{
4096 union {
4097 float64 f;
4098 uint64_t i;
4099 } u;
4100 u.i = val;
4101 FT0 = float64_to_floatx(u.f, &env->fp_status);
4102}
4103
4104void helper_fildl_FT0(int32_t val)
4105{
4106 FT0 = int32_to_floatx(val, &env->fp_status);
4107}
4108
4109void helper_flds_ST0(uint32_t val)
4110{
4111 int new_fpstt;
4112 union {
4113 float32 f;
4114 uint32_t i;
4115 } u;
4116 new_fpstt = (env->fpstt - 1) & 7;
4117 u.i = val;
4118 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4119 env->fpstt = new_fpstt;
4120 env->fptags[new_fpstt] = 0; /* validate stack entry */
4121}
4122
4123void helper_fldl_ST0(uint64_t val)
4124{
4125 int new_fpstt;
4126 union {
4127 float64 f;
4128 uint64_t i;
4129 } u;
4130 new_fpstt = (env->fpstt - 1) & 7;
4131 u.i = val;
4132 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4133 env->fpstt = new_fpstt;
4134 env->fptags[new_fpstt] = 0; /* validate stack entry */
4135}
4136
4137void helper_fildl_ST0(int32_t val)
4138{
4139 int new_fpstt;
4140 new_fpstt = (env->fpstt - 1) & 7;
4141 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4142 env->fpstt = new_fpstt;
4143 env->fptags[new_fpstt] = 0; /* validate stack entry */
4144}
4145
4146void helper_fildll_ST0(int64_t val)
4147{
4148 int new_fpstt;
4149 new_fpstt = (env->fpstt - 1) & 7;
4150 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4151 env->fpstt = new_fpstt;
4152 env->fptags[new_fpstt] = 0; /* validate stack entry */
4153}
4154
4155#ifndef VBOX
4156uint32_t helper_fsts_ST0(void)
4157#else
4158RTCCUINTREG helper_fsts_ST0(void)
4159#endif
4160{
4161 union {
4162 float32 f;
4163 uint32_t i;
4164 } u;
4165 u.f = floatx_to_float32(ST0, &env->fp_status);
4166 return u.i;
4167}
4168
4169uint64_t helper_fstl_ST0(void)
4170{
4171 union {
4172 float64 f;
4173 uint64_t i;
4174 } u;
4175 u.f = floatx_to_float64(ST0, &env->fp_status);
4176 return u.i;
4177}
4178#ifndef VBOX
4179int32_t helper_fist_ST0(void)
4180#else
4181RTCCINTREG helper_fist_ST0(void)
4182#endif
4183{
4184 int32_t val;
4185 val = floatx_to_int32(ST0, &env->fp_status);
4186 if (val != (int16_t)val)
4187 val = -32768;
4188 return val;
4189}
4190
4191#ifndef VBOX
4192int32_t helper_fistl_ST0(void)
4193#else
4194RTCCINTREG helper_fistl_ST0(void)
4195#endif
4196{
4197 int32_t val;
4198 val = floatx_to_int32(ST0, &env->fp_status);
4199 return val;
4200}
4201
4202int64_t helper_fistll_ST0(void)
4203{
4204 int64_t val;
4205 val = floatx_to_int64(ST0, &env->fp_status);
4206 return val;
4207}
4208
4209#ifndef VBOX
4210int32_t helper_fistt_ST0(void)
4211#else
4212RTCCINTREG helper_fistt_ST0(void)
4213#endif
4214{
4215 int32_t val;
4216 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4217 if (val != (int16_t)val)
4218 val = -32768;
4219 return val;
4220}
4221
4222#ifndef VBOX
4223int32_t helper_fisttl_ST0(void)
4224#else
4225RTCCINTREG helper_fisttl_ST0(void)
4226#endif
4227{
4228 int32_t val;
4229 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4230 return val;
4231}
4232
4233int64_t helper_fisttll_ST0(void)
4234{
4235 int64_t val;
4236 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4237 return val;
4238}
4239
4240void helper_fldt_ST0(target_ulong ptr)
4241{
4242 int new_fpstt;
4243 new_fpstt = (env->fpstt - 1) & 7;
4244 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4245 env->fpstt = new_fpstt;
4246 env->fptags[new_fpstt] = 0; /* validate stack entry */
4247}
4248
4249void helper_fstt_ST0(target_ulong ptr)
4250{
4251 helper_fstt(ST0, ptr);
4252}
4253
4254void helper_fpush(void)
4255{
4256 fpush();
4257}
4258
4259void helper_fpop(void)
4260{
4261 fpop();
4262}
4263
4264void helper_fdecstp(void)
4265{
4266 env->fpstt = (env->fpstt - 1) & 7;
4267 env->fpus &= (~0x4700);
4268}
4269
4270void helper_fincstp(void)
4271{
4272 env->fpstt = (env->fpstt + 1) & 7;
4273 env->fpus &= (~0x4700);
4274}
4275
4276/* FPU move */
4277
4278void helper_ffree_STN(int st_index)
4279{
4280 env->fptags[(env->fpstt + st_index) & 7] = 1;
4281}
4282
4283void helper_fmov_ST0_FT0(void)
4284{
4285 ST0 = FT0;
4286}
4287
4288void helper_fmov_FT0_STN(int st_index)
4289{
4290 FT0 = ST(st_index);
4291}
4292
4293void helper_fmov_ST0_STN(int st_index)
4294{
4295 ST0 = ST(st_index);
4296}
4297
4298void helper_fmov_STN_ST0(int st_index)
4299{
4300 ST(st_index) = ST0;
4301}
4302
4303void helper_fxchg_ST0_STN(int st_index)
4304{
4305 CPU86_LDouble tmp;
4306 tmp = ST(st_index);
4307 ST(st_index) = ST0;
4308 ST0 = tmp;
4309}
4310
4311/* FPU operations */
4312
4313static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4314
4315void helper_fcom_ST0_FT0(void)
4316{
4317 int ret;
4318
4319 ret = floatx_compare(ST0, FT0, &env->fp_status);
4320 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4321 FORCE_RET();
4322}
4323
4324void helper_fucom_ST0_FT0(void)
4325{
4326 int ret;
4327
4328 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4329 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4330 FORCE_RET();
4331}
4332
4333static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4334
4335void helper_fcomi_ST0_FT0(void)
4336{
4337 int eflags;
4338 int ret;
4339
4340 ret = floatx_compare(ST0, FT0, &env->fp_status);
4341 eflags = cc_table[CC_OP].compute_all();
4342 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4343 CC_SRC = eflags;
4344 FORCE_RET();
4345}
4346
4347void helper_fucomi_ST0_FT0(void)
4348{
4349 int eflags;
4350 int ret;
4351
4352 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4353 eflags = cc_table[CC_OP].compute_all();
4354 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4355 CC_SRC = eflags;
4356 FORCE_RET();
4357}
4358
4359void helper_fadd_ST0_FT0(void)
4360{
4361 ST0 += FT0;
4362}
4363
4364void helper_fmul_ST0_FT0(void)
4365{
4366 ST0 *= FT0;
4367}
4368
4369void helper_fsub_ST0_FT0(void)
4370{
4371 ST0 -= FT0;
4372}
4373
4374void helper_fsubr_ST0_FT0(void)
4375{
4376 ST0 = FT0 - ST0;
4377}
4378
4379void helper_fdiv_ST0_FT0(void)
4380{
4381 ST0 = helper_fdiv(ST0, FT0);
4382}
4383
4384void helper_fdivr_ST0_FT0(void)
4385{
4386 ST0 = helper_fdiv(FT0, ST0);
4387}
4388
4389/* fp operations between STN and ST0 */
4390
4391void helper_fadd_STN_ST0(int st_index)
4392{
4393 ST(st_index) += ST0;
4394}
4395
4396void helper_fmul_STN_ST0(int st_index)
4397{
4398 ST(st_index) *= ST0;
4399}
4400
4401void helper_fsub_STN_ST0(int st_index)
4402{
4403 ST(st_index) -= ST0;
4404}
4405
4406void helper_fsubr_STN_ST0(int st_index)
4407{
4408 CPU86_LDouble *p;
4409 p = &ST(st_index);
4410 *p = ST0 - *p;
4411}
4412
4413void helper_fdiv_STN_ST0(int st_index)
4414{
4415 CPU86_LDouble *p;
4416 p = &ST(st_index);
4417 *p = helper_fdiv(*p, ST0);
4418}
4419
4420void helper_fdivr_STN_ST0(int st_index)
4421{
4422 CPU86_LDouble *p;
4423 p = &ST(st_index);
4424 *p = helper_fdiv(ST0, *p);
4425}
4426
4427/* misc FPU operations */
4428void helper_fchs_ST0(void)
4429{
4430 ST0 = floatx_chs(ST0);
4431}
4432
4433void helper_fabs_ST0(void)
4434{
4435 ST0 = floatx_abs(ST0);
4436}
4437
4438void helper_fld1_ST0(void)
4439{
4440 ST0 = f15rk[1];
4441}
4442
4443void helper_fldl2t_ST0(void)
4444{
4445 ST0 = f15rk[6];
4446}
4447
4448void helper_fldl2e_ST0(void)
4449{
4450 ST0 = f15rk[5];
4451}
4452
4453void helper_fldpi_ST0(void)
4454{
4455 ST0 = f15rk[2];
4456}
4457
4458void helper_fldlg2_ST0(void)
4459{
4460 ST0 = f15rk[3];
4461}
4462
4463void helper_fldln2_ST0(void)
4464{
4465 ST0 = f15rk[4];
4466}
4467
4468void helper_fldz_ST0(void)
4469{
4470 ST0 = f15rk[0];
4471}
4472
4473void helper_fldz_FT0(void)
4474{
4475 FT0 = f15rk[0];
4476}
4477
4478#ifndef VBOX
4479uint32_t helper_fnstsw(void)
4480#else
4481RTCCUINTREG helper_fnstsw(void)
4482#endif
4483{
4484 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4485}
4486
4487#ifndef VBOX
4488uint32_t helper_fnstcw(void)
4489#else
4490RTCCUINTREG helper_fnstcw(void)
4491#endif
4492{
4493 return env->fpuc;
4494}
4495
4496static void update_fp_status(void)
4497{
4498 int rnd_type;
4499
4500 /* set rounding mode */
4501 switch(env->fpuc & RC_MASK) {
4502 default:
4503 case RC_NEAR:
4504 rnd_type = float_round_nearest_even;
4505 break;
4506 case RC_DOWN:
4507 rnd_type = float_round_down;
4508 break;
4509 case RC_UP:
4510 rnd_type = float_round_up;
4511 break;
4512 case RC_CHOP:
4513 rnd_type = float_round_to_zero;
4514 break;
4515 }
4516 set_float_rounding_mode(rnd_type, &env->fp_status);
4517#ifdef FLOATX80
4518 switch((env->fpuc >> 8) & 3) {
4519 case 0:
4520 rnd_type = 32;
4521 break;
4522 case 2:
4523 rnd_type = 64;
4524 break;
4525 case 3:
4526 default:
4527 rnd_type = 80;
4528 break;
4529 }
4530 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4531#endif
4532}
4533
4534void helper_fldcw(uint32_t val)
4535{
4536 env->fpuc = val;
4537 update_fp_status();
4538}
4539
4540void helper_fclex(void)
4541{
4542 env->fpus &= 0x7f00;
4543}
4544
4545void helper_fwait(void)
4546{
4547 if (env->fpus & FPUS_SE)
4548 fpu_raise_exception();
4549 FORCE_RET();
4550}
4551
4552void helper_fninit(void)
4553{
4554 env->fpus = 0;
4555 env->fpstt = 0;
4556 env->fpuc = 0x37f;
4557 env->fptags[0] = 1;
4558 env->fptags[1] = 1;
4559 env->fptags[2] = 1;
4560 env->fptags[3] = 1;
4561 env->fptags[4] = 1;
4562 env->fptags[5] = 1;
4563 env->fptags[6] = 1;
4564 env->fptags[7] = 1;
4565}
4566
4567/* BCD ops */
4568
4569void helper_fbld_ST0(target_ulong ptr)
4570{
4571 CPU86_LDouble tmp;
4572 uint64_t val;
4573 unsigned int v;
4574 int i;
4575
4576 val = 0;
4577 for(i = 8; i >= 0; i--) {
4578 v = ldub(ptr + i);
4579 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4580 }
4581 tmp = val;
4582 if (ldub(ptr + 9) & 0x80)
4583 tmp = -tmp;
4584 fpush();
4585 ST0 = tmp;
4586}
4587
4588void helper_fbst_ST0(target_ulong ptr)
4589{
4590 int v;
4591 target_ulong mem_ref, mem_end;
4592 int64_t val;
4593
4594 val = floatx_to_int64(ST0, &env->fp_status);
4595 mem_ref = ptr;
4596 mem_end = mem_ref + 9;
4597 if (val < 0) {
4598 stb(mem_end, 0x80);
4599 val = -val;
4600 } else {
4601 stb(mem_end, 0x00);
4602 }
4603 while (mem_ref < mem_end) {
4604 if (val == 0)
4605 break;
4606 v = val % 100;
4607 val = val / 100;
4608 v = ((v / 10) << 4) | (v % 10);
4609 stb(mem_ref++, v);
4610 }
4611 while (mem_ref < mem_end) {
4612 stb(mem_ref++, 0);
4613 }
4614}
4615
4616void helper_f2xm1(void)
4617{
4618 ST0 = pow(2.0,ST0) - 1.0;
4619}
4620
4621void helper_fyl2x(void)
4622{
4623 CPU86_LDouble fptemp;
4624
4625 fptemp = ST0;
4626 if (fptemp>0.0){
4627 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4628 ST1 *= fptemp;
4629 fpop();
4630 } else {
4631 env->fpus &= (~0x4700);
4632 env->fpus |= 0x400;
4633 }
4634}
4635
4636void helper_fptan(void)
4637{
4638 CPU86_LDouble fptemp;
4639
4640 fptemp = ST0;
4641 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4642 env->fpus |= 0x400;
4643 } else {
4644 ST0 = tan(fptemp);
4645 fpush();
4646 ST0 = 1.0;
4647 env->fpus &= (~0x400); /* C2 <-- 0 */
4648 /* the above code is for |arg| < 2**52 only */
4649 }
4650}
4651
4652void helper_fpatan(void)
4653{
4654 CPU86_LDouble fptemp, fpsrcop;
4655
4656 fpsrcop = ST1;
4657 fptemp = ST0;
4658 ST1 = atan2(fpsrcop,fptemp);
4659 fpop();
4660}
4661
4662void helper_fxtract(void)
4663{
4664 CPU86_LDoubleU temp;
4665 unsigned int expdif;
4666
4667 temp.d = ST0;
4668 expdif = EXPD(temp) - EXPBIAS;
4669 /*DP exponent bias*/
4670 ST0 = expdif;
4671 fpush();
4672 BIASEXPONENT(temp);
4673 ST0 = temp.d;
4674}
4675
4676#ifdef VBOX
4677#ifdef _MSC_VER
4678/* MSC cannot divide by zero */
4679extern double _Nan;
4680#define NaN _Nan
4681#else
4682#define NaN (0.0 / 0.0)
4683#endif
4684#endif /* VBOX */
4685
4686void helper_fprem1(void)
4687{
4688 CPU86_LDouble dblq, fpsrcop, fptemp;
4689 CPU86_LDoubleU fpsrcop1, fptemp1;
4690 int expdif;
4691 signed long long int q;
4692
4693#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4694 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4695#else
4696 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4697#endif
4698 ST0 = 0.0 / 0.0; /* NaN */
4699 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4700 return;
4701 }
4702
4703 fpsrcop = ST0;
4704 fptemp = ST1;
4705 fpsrcop1.d = fpsrcop;
4706 fptemp1.d = fptemp;
4707 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4708
4709 if (expdif < 0) {
4710 /* optimisation? taken from the AMD docs */
4711 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4712 /* ST0 is unchanged */
4713 return;
4714 }
4715
4716 if (expdif < 53) {
4717 dblq = fpsrcop / fptemp;
4718 /* round dblq towards nearest integer */
4719 dblq = rint(dblq);
4720 ST0 = fpsrcop - fptemp * dblq;
4721
4722 /* convert dblq to q by truncating towards zero */
4723 if (dblq < 0.0)
4724 q = (signed long long int)(-dblq);
4725 else
4726 q = (signed long long int)dblq;
4727
4728 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4729 /* (C0,C3,C1) <-- (q2,q1,q0) */
4730 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4731 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4732 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4733 } else {
4734 env->fpus |= 0x400; /* C2 <-- 1 */
4735 fptemp = pow(2.0, expdif - 50);
4736 fpsrcop = (ST0 / ST1) / fptemp;
4737 /* fpsrcop = integer obtained by chopping */
4738 fpsrcop = (fpsrcop < 0.0) ?
4739 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4740 ST0 -= (ST1 * fpsrcop * fptemp);
4741 }
4742}
4743
4744void helper_fprem(void)
4745{
4746 CPU86_LDouble dblq, fpsrcop, fptemp;
4747 CPU86_LDoubleU fpsrcop1, fptemp1;
4748 int expdif;
4749 signed long long int q;
4750
4751#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4752 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4753#else
4754 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4755#endif
4756 ST0 = 0.0 / 0.0; /* NaN */
4757 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4758 return;
4759 }
4760
4761 fpsrcop = (CPU86_LDouble)ST0;
4762 fptemp = (CPU86_LDouble)ST1;
4763 fpsrcop1.d = fpsrcop;
4764 fptemp1.d = fptemp;
4765 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4766
4767 if (expdif < 0) {
4768 /* optimisation? taken from the AMD docs */
4769 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4770 /* ST0 is unchanged */
4771 return;
4772 }
4773
4774 if ( expdif < 53 ) {
4775 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4776 /* round dblq towards zero */
4777 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4778 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4779
4780 /* convert dblq to q by truncating towards zero */
4781 if (dblq < 0.0)
4782 q = (signed long long int)(-dblq);
4783 else
4784 q = (signed long long int)dblq;
4785
4786 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4787 /* (C0,C3,C1) <-- (q2,q1,q0) */
4788 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4789 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4790 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4791 } else {
4792 int N = 32 + (expdif % 32); /* as per AMD docs */
4793 env->fpus |= 0x400; /* C2 <-- 1 */
4794 fptemp = pow(2.0, (double)(expdif - N));
4795 fpsrcop = (ST0 / ST1) / fptemp;
4796 /* fpsrcop = integer obtained by chopping */
4797 fpsrcop = (fpsrcop < 0.0) ?
4798 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4799 ST0 -= (ST1 * fpsrcop * fptemp);
4800 }
4801}
4802
4803void helper_fyl2xp1(void)
4804{
4805 CPU86_LDouble fptemp;
4806
4807 fptemp = ST0;
4808 if ((fptemp+1.0)>0.0) {
4809 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4810 ST1 *= fptemp;
4811 fpop();
4812 } else {
4813 env->fpus &= (~0x4700);
4814 env->fpus |= 0x400;
4815 }
4816}
4817
4818void helper_fsqrt(void)
4819{
4820 CPU86_LDouble fptemp;
4821
4822 fptemp = ST0;
4823 if (fptemp<0.0) {
4824 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4825 env->fpus |= 0x400;
4826 }
4827 ST0 = sqrt(fptemp);
4828}
4829
4830void helper_fsincos(void)
4831{
4832 CPU86_LDouble fptemp;
4833
4834 fptemp = ST0;
4835 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4836 env->fpus |= 0x400;
4837 } else {
4838 ST0 = sin(fptemp);
4839 fpush();
4840 ST0 = cos(fptemp);
4841 env->fpus &= (~0x400); /* C2 <-- 0 */
4842 /* the above code is for |arg| < 2**63 only */
4843 }
4844}
4845
4846void helper_frndint(void)
4847{
4848 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4849}
4850
4851void helper_fscale(void)
4852{
4853 ST0 = ldexp (ST0, (int)(ST1));
4854}
4855
4856void helper_fsin(void)
4857{
4858 CPU86_LDouble fptemp;
4859
4860 fptemp = ST0;
4861 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4862 env->fpus |= 0x400;
4863 } else {
4864 ST0 = sin(fptemp);
4865 env->fpus &= (~0x400); /* C2 <-- 0 */
4866 /* the above code is for |arg| < 2**53 only */
4867 }
4868}
4869
4870void helper_fcos(void)
4871{
4872 CPU86_LDouble fptemp;
4873
4874 fptemp = ST0;
4875 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4876 env->fpus |= 0x400;
4877 } else {
4878 ST0 = cos(fptemp);
4879 env->fpus &= (~0x400); /* C2 <-- 0 */
4880 /* the above code is for |arg5 < 2**63 only */
4881 }
4882}
4883
4884void helper_fxam_ST0(void)
4885{
4886 CPU86_LDoubleU temp;
4887 int expdif;
4888
4889 temp.d = ST0;
4890
4891 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4892 if (SIGND(temp))
4893 env->fpus |= 0x200; /* C1 <-- 1 */
4894
4895 /* XXX: test fptags too */
4896 expdif = EXPD(temp);
4897 if (expdif == MAXEXPD) {
4898#ifdef USE_X86LDOUBLE
4899 if (MANTD(temp) == 0x8000000000000000ULL)
4900#else
4901 if (MANTD(temp) == 0)
4902#endif
4903 env->fpus |= 0x500 /*Infinity*/;
4904 else
4905 env->fpus |= 0x100 /*NaN*/;
4906 } else if (expdif == 0) {
4907 if (MANTD(temp) == 0)
4908 env->fpus |= 0x4000 /*Zero*/;
4909 else
4910 env->fpus |= 0x4400 /*Denormal*/;
4911 } else {
4912 env->fpus |= 0x400;
4913 }
4914}
4915
4916void helper_fstenv(target_ulong ptr, int data32)
4917{
4918 int fpus, fptag, exp, i;
4919 uint64_t mant;
4920 CPU86_LDoubleU tmp;
4921
4922 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4923 fptag = 0;
4924 for (i=7; i>=0; i--) {
4925 fptag <<= 2;
4926 if (env->fptags[i]) {
4927 fptag |= 3;
4928 } else {
4929 tmp.d = env->fpregs[i].d;
4930 exp = EXPD(tmp);
4931 mant = MANTD(tmp);
4932 if (exp == 0 && mant == 0) {
4933 /* zero */
4934 fptag |= 1;
4935 } else if (exp == 0 || exp == MAXEXPD
4936#ifdef USE_X86LDOUBLE
4937 || (mant & (1LL << 63)) == 0
4938#endif
4939 ) {
4940 /* NaNs, infinity, denormal */
4941 fptag |= 2;
4942 }
4943 }
4944 }
4945 if (data32) {
4946 /* 32 bit */
4947 stl(ptr, env->fpuc);
4948 stl(ptr + 4, fpus);
4949 stl(ptr + 8, fptag);
4950 stl(ptr + 12, 0); /* fpip */
4951 stl(ptr + 16, 0); /* fpcs */
4952 stl(ptr + 20, 0); /* fpoo */
4953 stl(ptr + 24, 0); /* fpos */
4954 } else {
4955 /* 16 bit */
4956 stw(ptr, env->fpuc);
4957 stw(ptr + 2, fpus);
4958 stw(ptr + 4, fptag);
4959 stw(ptr + 6, 0);
4960 stw(ptr + 8, 0);
4961 stw(ptr + 10, 0);
4962 stw(ptr + 12, 0);
4963 }
4964}
4965
4966void helper_fldenv(target_ulong ptr, int data32)
4967{
4968 int i, fpus, fptag;
4969
4970 if (data32) {
4971 env->fpuc = lduw(ptr);
4972 fpus = lduw(ptr + 4);
4973 fptag = lduw(ptr + 8);
4974 }
4975 else {
4976 env->fpuc = lduw(ptr);
4977 fpus = lduw(ptr + 2);
4978 fptag = lduw(ptr + 4);
4979 }
4980 env->fpstt = (fpus >> 11) & 7;
4981 env->fpus = fpus & ~0x3800;
4982 for(i = 0;i < 8; i++) {
4983 env->fptags[i] = ((fptag & 3) == 3);
4984 fptag >>= 2;
4985 }
4986}
4987
4988void helper_fsave(target_ulong ptr, int data32)
4989{
4990 CPU86_LDouble tmp;
4991 int i;
4992
4993 helper_fstenv(ptr, data32);
4994
4995 ptr += (14 << data32);
4996 for(i = 0;i < 8; i++) {
4997 tmp = ST(i);
4998 helper_fstt(tmp, ptr);
4999 ptr += 10;
5000 }
5001
5002 /* fninit */
5003 env->fpus = 0;
5004 env->fpstt = 0;
5005 env->fpuc = 0x37f;
5006 env->fptags[0] = 1;
5007 env->fptags[1] = 1;
5008 env->fptags[2] = 1;
5009 env->fptags[3] = 1;
5010 env->fptags[4] = 1;
5011 env->fptags[5] = 1;
5012 env->fptags[6] = 1;
5013 env->fptags[7] = 1;
5014}
5015
5016void helper_frstor(target_ulong ptr, int data32)
5017{
5018 CPU86_LDouble tmp;
5019 int i;
5020
5021 helper_fldenv(ptr, data32);
5022 ptr += (14 << data32);
5023
5024 for(i = 0;i < 8; i++) {
5025 tmp = helper_fldt(ptr);
5026 ST(i) = tmp;
5027 ptr += 10;
5028 }
5029}
5030
5031void helper_fxsave(target_ulong ptr, int data64)
5032{
5033 int fpus, fptag, i, nb_xmm_regs;
5034 CPU86_LDouble tmp;
5035 target_ulong addr;
5036
5037 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5038 fptag = 0;
5039 for(i = 0; i < 8; i++) {
5040 fptag |= (env->fptags[i] << i);
5041 }
5042 stw(ptr, env->fpuc);
5043 stw(ptr + 2, fpus);
5044 stw(ptr + 4, fptag ^ 0xff);
5045#ifdef TARGET_X86_64
5046 if (data64) {
5047 stq(ptr + 0x08, 0); /* rip */
5048 stq(ptr + 0x10, 0); /* rdp */
5049 } else
5050#endif
5051 {
5052 stl(ptr + 0x08, 0); /* eip */
5053 stl(ptr + 0x0c, 0); /* sel */
5054 stl(ptr + 0x10, 0); /* dp */
5055 stl(ptr + 0x14, 0); /* sel */
5056 }
5057
5058 addr = ptr + 0x20;
5059 for(i = 0;i < 8; i++) {
5060 tmp = ST(i);
5061 helper_fstt(tmp, addr);
5062 addr += 16;
5063 }
5064
5065 if (env->cr[4] & CR4_OSFXSR_MASK) {
5066 /* XXX: finish it */
5067 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5068 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5069 if (env->hflags & HF_CS64_MASK)
5070 nb_xmm_regs = 16;
5071 else
5072 nb_xmm_regs = 8;
5073 addr = ptr + 0xa0;
5074 for(i = 0; i < nb_xmm_regs; i++) {
5075 stq(addr, env->xmm_regs[i].XMM_Q(0));
5076 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5077 addr += 16;
5078 }
5079 }
5080}
5081
5082void helper_fxrstor(target_ulong ptr, int data64)
5083{
5084 int i, fpus, fptag, nb_xmm_regs;
5085 CPU86_LDouble tmp;
5086 target_ulong addr;
5087
5088 env->fpuc = lduw(ptr);
5089 fpus = lduw(ptr + 2);
5090 fptag = lduw(ptr + 4);
5091 env->fpstt = (fpus >> 11) & 7;
5092 env->fpus = fpus & ~0x3800;
5093 fptag ^= 0xff;
5094 for(i = 0;i < 8; i++) {
5095 env->fptags[i] = ((fptag >> i) & 1);
5096 }
5097
5098 addr = ptr + 0x20;
5099 for(i = 0;i < 8; i++) {
5100 tmp = helper_fldt(addr);
5101 ST(i) = tmp;
5102 addr += 16;
5103 }
5104
5105 if (env->cr[4] & CR4_OSFXSR_MASK) {
5106 /* XXX: finish it */
5107 env->mxcsr = ldl(ptr + 0x18);
5108 //ldl(ptr + 0x1c);
5109 if (env->hflags & HF_CS64_MASK)
5110 nb_xmm_regs = 16;
5111 else
5112 nb_xmm_regs = 8;
5113 addr = ptr + 0xa0;
5114 for(i = 0; i < nb_xmm_regs; i++) {
5115#if !defined(VBOX) || __GNUC__ < 4
5116 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5117 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5118#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5119# if 1
5120 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5121 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5122 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5123 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5124# else
5125 /* this works fine on Mac OS X, gcc 4.0.1 */
5126 uint64_t u64 = ldq(addr);
5127 env->xmm_regs[i].XMM_Q(0);
5128 u64 = ldq(addr + 4);
5129 env->xmm_regs[i].XMM_Q(1) = u64;
5130# endif
5131#endif
5132 addr += 16;
5133 }
5134 }
5135}
5136
5137#ifndef USE_X86LDOUBLE
5138
5139void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5140{
5141 CPU86_LDoubleU temp;
5142 int e;
5143
5144 temp.d = f;
5145 /* mantissa */
5146 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5147 /* exponent + sign */
5148 e = EXPD(temp) - EXPBIAS + 16383;
5149 e |= SIGND(temp) >> 16;
5150 *pexp = e;
5151}
5152
5153CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5154{
5155 CPU86_LDoubleU temp;
5156 int e;
5157 uint64_t ll;
5158
5159 /* XXX: handle overflow ? */
5160 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5161 e |= (upper >> 4) & 0x800; /* sign */
5162 ll = (mant >> 11) & ((1LL << 52) - 1);
5163#ifdef __arm__
5164 temp.l.upper = (e << 20) | (ll >> 32);
5165 temp.l.lower = ll;
5166#else
5167 temp.ll = ll | ((uint64_t)e << 52);
5168#endif
5169 return temp.d;
5170}
5171
5172#else
5173
5174void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5175{
5176 CPU86_LDoubleU temp;
5177
5178 temp.d = f;
5179 *pmant = temp.l.lower;
5180 *pexp = temp.l.upper;
5181}
5182
5183CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5184{
5185 CPU86_LDoubleU temp;
5186
5187 temp.l.upper = upper;
5188 temp.l.lower = mant;
5189 return temp.d;
5190}
5191#endif
5192
5193#ifdef TARGET_X86_64
5194
5195//#define DEBUG_MULDIV
5196
5197static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5198{
5199 *plow += a;
5200 /* carry test */
5201 if (*plow < a)
5202 (*phigh)++;
5203 *phigh += b;
5204}
5205
5206static void neg128(uint64_t *plow, uint64_t *phigh)
5207{
5208 *plow = ~ *plow;
5209 *phigh = ~ *phigh;
5210 add128(plow, phigh, 1, 0);
5211}
5212
5213/* return TRUE if overflow */
5214static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5215{
5216 uint64_t q, r, a1, a0;
5217 int i, qb, ab;
5218
5219 a0 = *plow;
5220 a1 = *phigh;
5221 if (a1 == 0) {
5222 q = a0 / b;
5223 r = a0 % b;
5224 *plow = q;
5225 *phigh = r;
5226 } else {
5227 if (a1 >= b)
5228 return 1;
5229 /* XXX: use a better algorithm */
5230 for(i = 0; i < 64; i++) {
5231 ab = a1 >> 63;
5232 a1 = (a1 << 1) | (a0 >> 63);
5233 if (ab || a1 >= b) {
5234 a1 -= b;
5235 qb = 1;
5236 } else {
5237 qb = 0;
5238 }
5239 a0 = (a0 << 1) | qb;
5240 }
5241#if defined(DEBUG_MULDIV)
5242 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5243 *phigh, *plow, b, a0, a1);
5244#endif
5245 *plow = a0;
5246 *phigh = a1;
5247 }
5248 return 0;
5249}
5250
5251/* return TRUE if overflow */
5252static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5253{
5254 int sa, sb;
5255 sa = ((int64_t)*phigh < 0);
5256 if (sa)
5257 neg128(plow, phigh);
5258 sb = (b < 0);
5259 if (sb)
5260 b = -b;
5261 if (div64(plow, phigh, b) != 0)
5262 return 1;
5263 if (sa ^ sb) {
5264 if (*plow > (1ULL << 63))
5265 return 1;
5266 *plow = - *plow;
5267 } else {
5268 if (*plow >= (1ULL << 63))
5269 return 1;
5270 }
5271 if (sa)
5272 *phigh = - *phigh;
5273 return 0;
5274}
5275
5276void helper_mulq_EAX_T0(target_ulong t0)
5277{
5278 uint64_t r0, r1;
5279
5280 mulu64(&r0, &r1, EAX, t0);
5281 EAX = r0;
5282 EDX = r1;
5283 CC_DST = r0;
5284 CC_SRC = r1;
5285}
5286
5287void helper_imulq_EAX_T0(target_ulong t0)
5288{
5289 uint64_t r0, r1;
5290
5291 muls64(&r0, &r1, EAX, t0);
5292 EAX = r0;
5293 EDX = r1;
5294 CC_DST = r0;
5295 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5296}
5297
5298target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5299{
5300 uint64_t r0, r1;
5301
5302 muls64(&r0, &r1, t0, t1);
5303 CC_DST = r0;
5304 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5305 return r0;
5306}
5307
5308void helper_divq_EAX(target_ulong t0)
5309{
5310 uint64_t r0, r1;
5311 if (t0 == 0) {
5312 raise_exception(EXCP00_DIVZ);
5313 }
5314 r0 = EAX;
5315 r1 = EDX;
5316 if (div64(&r0, &r1, t0))
5317 raise_exception(EXCP00_DIVZ);
5318 EAX = r0;
5319 EDX = r1;
5320}
5321
5322void helper_idivq_EAX(target_ulong t0)
5323{
5324 uint64_t r0, r1;
5325 if (t0 == 0) {
5326 raise_exception(EXCP00_DIVZ);
5327 }
5328 r0 = EAX;
5329 r1 = EDX;
5330 if (idiv64(&r0, &r1, t0))
5331 raise_exception(EXCP00_DIVZ);
5332 EAX = r0;
5333 EDX = r1;
5334}
5335#endif
5336
5337static void do_hlt(void)
5338{
5339 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5340 env->halted = 1;
5341 env->exception_index = EXCP_HLT;
5342 cpu_loop_exit();
5343}
5344
5345void helper_hlt(int next_eip_addend)
5346{
5347 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5348 EIP += next_eip_addend;
5349
5350 do_hlt();
5351}
5352
5353void helper_monitor(target_ulong ptr)
5354{
5355 if ((uint32_t)ECX != 0)
5356 raise_exception(EXCP0D_GPF);
5357 /* XXX: store address ? */
5358 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5359}
5360
5361void helper_mwait(int next_eip_addend)
5362{
5363 if ((uint32_t)ECX != 0)
5364 raise_exception(EXCP0D_GPF);
5365#ifdef VBOX
5366 helper_hlt(next_eip_addend);
5367#else
5368 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5369 EIP += next_eip_addend;
5370
5371 /* XXX: not complete but not completely erroneous */
5372 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5373 /* more than one CPU: do not sleep because another CPU may
5374 wake this one */
5375 } else {
5376 do_hlt();
5377 }
5378#endif
5379}
5380
5381void helper_debug(void)
5382{
5383 env->exception_index = EXCP_DEBUG;
5384 cpu_loop_exit();
5385}
5386
5387void helper_raise_interrupt(int intno, int next_eip_addend)
5388{
5389 raise_interrupt(intno, 1, 0, next_eip_addend);
5390}
5391
5392void helper_raise_exception(int exception_index)
5393{
5394 raise_exception(exception_index);
5395}
5396
5397void helper_cli(void)
5398{
5399 env->eflags &= ~IF_MASK;
5400}
5401
5402void helper_sti(void)
5403{
5404 env->eflags |= IF_MASK;
5405}
5406
5407#ifdef VBOX
5408void helper_cli_vme(void)
5409{
5410 env->eflags &= ~VIF_MASK;
5411}
5412
5413void helper_sti_vme(void)
5414{
5415 /* First check, then change eflags according to the AMD manual */
5416 if (env->eflags & VIP_MASK) {
5417 raise_exception(EXCP0D_GPF);
5418 }
5419 env->eflags |= VIF_MASK;
5420}
5421#endif
5422
5423#if 0
5424/* vm86plus instructions */
5425void helper_cli_vm(void)
5426{
5427 env->eflags &= ~VIF_MASK;
5428}
5429
5430void helper_sti_vm(void)
5431{
5432 env->eflags |= VIF_MASK;
5433 if (env->eflags & VIP_MASK) {
5434 raise_exception(EXCP0D_GPF);
5435 }
5436}
5437#endif
5438
5439void helper_set_inhibit_irq(void)
5440{
5441 env->hflags |= HF_INHIBIT_IRQ_MASK;
5442}
5443
5444void helper_reset_inhibit_irq(void)
5445{
5446 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5447}
5448
5449void helper_boundw(target_ulong a0, int v)
5450{
5451 int low, high;
5452 low = ldsw(a0);
5453 high = ldsw(a0 + 2);
5454 v = (int16_t)v;
5455 if (v < low || v > high) {
5456 raise_exception(EXCP05_BOUND);
5457 }
5458 FORCE_RET();
5459}
5460
5461void helper_boundl(target_ulong a0, int v)
5462{
5463 int low, high;
5464 low = ldl(a0);
5465 high = ldl(a0 + 4);
5466 if (v < low || v > high) {
5467 raise_exception(EXCP05_BOUND);
5468 }
5469 FORCE_RET();
5470}
5471
5472static float approx_rsqrt(float a)
5473{
5474 return 1.0 / sqrt(a);
5475}
5476
5477static float approx_rcp(float a)
5478{
5479 return 1.0 / a;
5480}
5481
5482#if !defined(CONFIG_USER_ONLY)
5483
5484#define MMUSUFFIX _mmu
5485
5486#define SHIFT 0
5487#include "softmmu_template.h"
5488
5489#define SHIFT 1
5490#include "softmmu_template.h"
5491
5492#define SHIFT 2
5493#include "softmmu_template.h"
5494
5495#define SHIFT 3
5496#include "softmmu_template.h"
5497
5498#endif
5499
5500#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5501/* This code assumes real physical address always fit into host CPU reg,
5502 which is wrong in general, but true for our current use cases. */
5503RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5504{
5505 return remR3PhysReadS8(addr);
5506}
5507RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5508{
5509 return remR3PhysReadU8(addr);
5510}
5511void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5512{
5513 remR3PhysWriteU8(addr, val);
5514}
5515RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5516{
5517 return remR3PhysReadS16(addr);
5518}
5519RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5520{
5521 return remR3PhysReadU16(addr);
5522}
5523void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5524{
5525 remR3PhysWriteU16(addr, val);
5526}
5527RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5528{
5529 return remR3PhysReadS32(addr);
5530}
5531RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5532{
5533 return remR3PhysReadU32(addr);
5534}
5535void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5536{
5537 remR3PhysWriteU32(addr, val);
5538}
5539uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5540{
5541 return remR3PhysReadU64(addr);
5542}
5543void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5544{
5545 remR3PhysWriteU64(addr, val);
5546}
5547#endif
5548
5549/* try to fill the TLB and return an exception if error. If retaddr is
5550 NULL, it means that the function was called in C code (i.e. not
5551 from generated code or from helper.c) */
5552/* XXX: fix it to restore all registers */
5553void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5554{
5555 TranslationBlock *tb;
5556 int ret;
5557 unsigned long pc;
5558 CPUX86State *saved_env;
5559
5560 /* XXX: hack to restore env in all cases, even if not called from
5561 generated code */
5562 saved_env = env;
5563 env = cpu_single_env;
5564
5565 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5566 if (ret) {
5567 if (retaddr) {
5568 /* now we have a real cpu fault */
5569 pc = (unsigned long)retaddr;
5570 tb = tb_find_pc(pc);
5571 if (tb) {
5572 /* the PC is inside the translated code. It means that we have
5573 a virtual CPU fault */
5574 cpu_restore_state(tb, env, pc, NULL);
5575 }
5576 }
5577 raise_exception_err(env->exception_index, env->error_code);
5578 }
5579 env = saved_env;
5580}
5581
5582#ifdef VBOX
5583
5584/**
5585 * Correctly computes the eflags.
5586 * @returns eflags.
5587 * @param env1 CPU environment.
5588 */
5589uint32_t raw_compute_eflags(CPUX86State *env1)
5590{
5591 CPUX86State *savedenv = env;
5592 uint32_t efl;
5593 env = env1;
5594 efl = compute_eflags();
5595 env = savedenv;
5596 return efl;
5597}
5598
5599/**
5600 * Reads byte from virtual address in guest memory area.
5601 * XXX: is it working for any addresses? swapped out pages?
5602 * @returns readed data byte.
5603 * @param env1 CPU environment.
5604 * @param pvAddr GC Virtual address.
5605 */
5606uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5607{
5608 CPUX86State *savedenv = env;
5609 uint8_t u8;
5610 env = env1;
5611 u8 = ldub_kernel(addr);
5612 env = savedenv;
5613 return u8;
5614}
5615
5616/**
5617 * Reads byte from virtual address in guest memory area.
5618 * XXX: is it working for any addresses? swapped out pages?
5619 * @returns readed data byte.
5620 * @param env1 CPU environment.
5621 * @param pvAddr GC Virtual address.
5622 */
5623uint16_t read_word(CPUX86State *env1, target_ulong addr)
5624{
5625 CPUX86State *savedenv = env;
5626 uint16_t u16;
5627 env = env1;
5628 u16 = lduw_kernel(addr);
5629 env = savedenv;
5630 return u16;
5631}
5632
5633/**
5634 * Reads byte from virtual address in guest memory area.
5635 * XXX: is it working for any addresses? swapped out pages?
5636 * @returns readed data byte.
5637 * @param env1 CPU environment.
5638 * @param pvAddr GC Virtual address.
5639 */
5640uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5641{
5642 CPUX86State *savedenv = env;
5643 uint32_t u32;
5644 env = env1;
5645 u32 = ldl_kernel(addr);
5646 env = savedenv;
5647 return u32;
5648}
5649
5650/**
5651 * Writes byte to virtual address in guest memory area.
5652 * XXX: is it working for any addresses? swapped out pages?
5653 * @returns readed data byte.
5654 * @param env1 CPU environment.
5655 * @param pvAddr GC Virtual address.
5656 * @param val byte value
5657 */
5658void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5659{
5660 CPUX86State *savedenv = env;
5661 env = env1;
5662 stb(addr, val);
5663 env = savedenv;
5664}
5665
5666void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5667{
5668 CPUX86State *savedenv = env;
5669 env = env1;
5670 stw(addr, val);
5671 env = savedenv;
5672}
5673
5674void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5675{
5676 CPUX86State *savedenv = env;
5677 env = env1;
5678 stl(addr, val);
5679 env = savedenv;
5680}
5681
5682/**
5683 * Correctly loads selector into segment register with updating internal
5684 * qemu data/caches.
5685 * @param env1 CPU environment.
5686 * @param seg_reg Segment register.
5687 * @param selector Selector to load.
5688 */
5689void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5690{
5691 CPUX86State *savedenv = env;
5692#ifdef FORCE_SEGMENT_SYNC
5693 jmp_buf old_buf;
5694#endif
5695
5696 env = env1;
5697
5698 if ( env->eflags & X86_EFL_VM
5699 || !(env->cr[0] & X86_CR0_PE))
5700 {
5701 load_seg_vm(seg_reg, selector);
5702
5703 env = savedenv;
5704
5705 /* Successful sync. */
5706 env1->segs[seg_reg].newselector = 0;
5707 }
5708 else
5709 {
5710 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5711 time critical - let's not do that */
5712#ifdef FORCE_SEGMENT_SYNC
5713 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5714#endif
5715 if (setjmp(env1->jmp_env) == 0)
5716 {
5717 if (seg_reg == R_CS)
5718 {
5719 uint32_t e1, e2;
5720 e1 = e2 = 0;
5721 load_segment(&e1, &e2, selector);
5722 cpu_x86_load_seg_cache(env, R_CS, selector,
5723 get_seg_base(e1, e2),
5724 get_seg_limit(e1, e2),
5725 e2);
5726 }
5727 else
5728 helper_load_seg(seg_reg, selector);
5729 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5730 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5731
5732 env = savedenv;
5733
5734 /* Successful sync. */
5735 env1->segs[seg_reg].newselector = 0;
5736 }
5737 else
5738 {
5739 env = savedenv;
5740
5741 /* Postpone sync until the guest uses the selector. */
5742 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5743 env1->segs[seg_reg].newselector = selector;
5744 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5745 env1->exception_index = -1;
5746 env1->error_code = 0;
5747 env1->old_exception = -1;
5748 }
5749#ifdef FORCE_SEGMENT_SYNC
5750 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5751#endif
5752 }
5753
5754}
5755
5756DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5757{
5758 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5759}
5760
5761
5762int emulate_single_instr(CPUX86State *env1)
5763{
5764 TranslationBlock *tb;
5765 TranslationBlock *current;
5766 int flags;
5767 uint8_t *tc_ptr;
5768 target_ulong old_eip;
5769
5770 /* ensures env is loaded! */
5771 CPUX86State *savedenv = env;
5772 env = env1;
5773
5774 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5775
5776 current = env->current_tb;
5777 env->current_tb = NULL;
5778 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5779
5780 /*
5781 * Translate only one instruction.
5782 */
5783 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5784 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5785 env->segs[R_CS].base, flags, 0);
5786
5787 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5788
5789
5790 /* tb_link_phys: */
5791 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5792 tb->jmp_next[0] = NULL;
5793 tb->jmp_next[1] = NULL;
5794 Assert(tb->jmp_next[0] == NULL);
5795 Assert(tb->jmp_next[1] == NULL);
5796 if (tb->tb_next_offset[0] != 0xffff)
5797 tb_reset_jump(tb, 0);
5798 if (tb->tb_next_offset[1] != 0xffff)
5799 tb_reset_jump(tb, 1);
5800
5801 /*
5802 * Execute it using emulation
5803 */
5804 old_eip = env->eip;
5805 env->current_tb = tb;
5806
5807 /*
5808 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5809 * perhaps not a very safe hack
5810 */
5811 while(old_eip == env->eip)
5812 {
5813 tc_ptr = tb->tc_ptr;
5814
5815#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5816 int fake_ret;
5817 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5818#else
5819 tcg_qemu_tb_exec(tc_ptr);
5820#endif
5821 /*
5822 * Exit once we detect an external interrupt and interrupts are enabled
5823 */
5824 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5825 ( (env->eflags & IF_MASK) &&
5826 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5827 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5828 {
5829 break;
5830 }
5831 }
5832 env->current_tb = current;
5833
5834 tb_phys_invalidate(tb, -1);
5835 tb_free(tb);
5836/*
5837 Assert(tb->tb_next_offset[0] == 0xffff);
5838 Assert(tb->tb_next_offset[1] == 0xffff);
5839 Assert(tb->tb_next[0] == 0xffff);
5840 Assert(tb->tb_next[1] == 0xffff);
5841 Assert(tb->jmp_next[0] == NULL);
5842 Assert(tb->jmp_next[1] == NULL);
5843 Assert(tb->jmp_first == NULL); */
5844
5845 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5846
5847 /*
5848 * Execute the next instruction when we encounter instruction fusing.
5849 */
5850 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5851 {
5852 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5853 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5854 emulate_single_instr(env);
5855 }
5856
5857 env = savedenv;
5858 return 0;
5859}
5860
5861/**
5862 * Correctly loads a new ldtr selector.
5863 *
5864 * @param env1 CPU environment.
5865 * @param selector Selector to load.
5866 */
5867void sync_ldtr(CPUX86State *env1, int selector)
5868{
5869 CPUX86State *saved_env = env;
5870 if (setjmp(env1->jmp_env) == 0)
5871 {
5872 env = env1;
5873 helper_lldt(selector);
5874 env = saved_env;
5875 }
5876 else
5877 {
5878 env = saved_env;
5879#ifdef VBOX_STRICT
5880 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5881#endif
5882 }
5883}
5884
5885int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5886 uint32_t *esp_ptr, int dpl)
5887{
5888 int type, index, shift;
5889
5890 CPUX86State *savedenv = env;
5891 env = env1;
5892
5893 if (!(env->tr.flags & DESC_P_MASK))
5894 cpu_abort(env, "invalid tss");
5895 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5896 if ((type & 7) != 1)
5897 cpu_abort(env, "invalid tss type %d", type);
5898 shift = type >> 3;
5899 index = (dpl * 4 + 2) << shift;
5900 if (index + (4 << shift) - 1 > env->tr.limit)
5901 {
5902 env = savedenv;
5903 return 0;
5904 }
5905 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5906
5907 if (shift == 0) {
5908 *esp_ptr = lduw_kernel(env->tr.base + index);
5909 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5910 } else {
5911 *esp_ptr = ldl_kernel(env->tr.base + index);
5912 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5913 }
5914
5915 env = savedenv;
5916 return 1;
5917}
5918
5919//*****************************************************************************
5920// Needs to be at the bottom of the file (overriding macros)
5921
5922#ifndef VBOX
5923static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5924#else /* VBOX */
5925DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5926#endif /* VBOX */
5927{
5928 return *(CPU86_LDouble *)ptr;
5929}
5930
5931#ifndef VBOX
5932static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5933#else /* VBOX */
5934DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5935#endif /* VBOX */
5936{
5937 *(CPU86_LDouble *)ptr = f;
5938}
5939
5940#undef stw
5941#undef stl
5942#undef stq
5943#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5944#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5945#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5946
5947//*****************************************************************************
5948void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5949{
5950 int fpus, fptag, i, nb_xmm_regs;
5951 CPU86_LDouble tmp;
5952 uint8_t *addr;
5953 int data64 = !!(env->hflags & HF_LMA_MASK);
5954
5955 if (env->cpuid_features & CPUID_FXSR)
5956 {
5957 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5958 fptag = 0;
5959 for(i = 0; i < 8; i++) {
5960 fptag |= (env->fptags[i] << i);
5961 }
5962 stw(ptr, env->fpuc);
5963 stw(ptr + 2, fpus);
5964 stw(ptr + 4, fptag ^ 0xff);
5965
5966 addr = ptr + 0x20;
5967 for(i = 0;i < 8; i++) {
5968 tmp = ST(i);
5969 helper_fstt_raw(tmp, addr);
5970 addr += 16;
5971 }
5972
5973 if (env->cr[4] & CR4_OSFXSR_MASK) {
5974 /* XXX: finish it */
5975 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5976 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5977 nb_xmm_regs = 8 << data64;
5978 addr = ptr + 0xa0;
5979 for(i = 0; i < nb_xmm_regs; i++) {
5980#if __GNUC__ < 4
5981 stq(addr, env->xmm_regs[i].XMM_Q(0));
5982 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5983#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5984 stl(addr, env->xmm_regs[i].XMM_L(0));
5985 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5986 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5987 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5988#endif
5989 addr += 16;
5990 }
5991 }
5992 }
5993 else
5994 {
5995 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5996 int fptag;
5997
5998 fp->FCW = env->fpuc;
5999 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6000 fptag = 0;
6001 for (i=7; i>=0; i--) {
6002 fptag <<= 2;
6003 if (env->fptags[i]) {
6004 fptag |= 3;
6005 } else {
6006 /* the FPU automatically computes it */
6007 }
6008 }
6009 fp->FTW = fptag;
6010
6011 for(i = 0;i < 8; i++) {
6012 tmp = ST(i);
6013 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6014 }
6015 }
6016}
6017
6018//*****************************************************************************
6019#undef lduw
6020#undef ldl
6021#undef ldq
6022#define lduw(a) *(uint16_t *)(a)
6023#define ldl(a) *(uint32_t *)(a)
6024#define ldq(a) *(uint64_t *)(a)
6025//*****************************************************************************
6026void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6027{
6028 int i, fpus, fptag, nb_xmm_regs;
6029 CPU86_LDouble tmp;
6030 uint8_t *addr;
6031 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6032
6033 if (env->cpuid_features & CPUID_FXSR)
6034 {
6035 env->fpuc = lduw(ptr);
6036 fpus = lduw(ptr + 2);
6037 fptag = lduw(ptr + 4);
6038 env->fpstt = (fpus >> 11) & 7;
6039 env->fpus = fpus & ~0x3800;
6040 fptag ^= 0xff;
6041 for(i = 0;i < 8; i++) {
6042 env->fptags[i] = ((fptag >> i) & 1);
6043 }
6044
6045 addr = ptr + 0x20;
6046 for(i = 0;i < 8; i++) {
6047 tmp = helper_fldt_raw(addr);
6048 ST(i) = tmp;
6049 addr += 16;
6050 }
6051
6052 if (env->cr[4] & CR4_OSFXSR_MASK) {
6053 /* XXX: finish it, endianness */
6054 env->mxcsr = ldl(ptr + 0x18);
6055 //ldl(ptr + 0x1c);
6056 nb_xmm_regs = 8 << data64;
6057 addr = ptr + 0xa0;
6058 for(i = 0; i < nb_xmm_regs; i++) {
6059#if HC_ARCH_BITS == 32
6060 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6061 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6062 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6063 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6064 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6065#else
6066 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6067 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6068#endif
6069 addr += 16;
6070 }
6071 }
6072 }
6073 else
6074 {
6075 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6076 int fptag, j;
6077
6078 env->fpuc = fp->FCW;
6079 env->fpstt = (fp->FSW >> 11) & 7;
6080 env->fpus = fp->FSW & ~0x3800;
6081 fptag = fp->FTW;
6082 for(i = 0;i < 8; i++) {
6083 env->fptags[i] = ((fptag & 3) == 3);
6084 fptag >>= 2;
6085 }
6086 j = env->fpstt;
6087 for(i = 0;i < 8; i++) {
6088 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6089 ST(i) = tmp;
6090 }
6091 }
6092}
6093//*****************************************************************************
6094//*****************************************************************************
6095
6096#endif /* VBOX */
6097
6098/* Secure Virtual Machine helpers */
6099
6100#if defined(CONFIG_USER_ONLY)
6101
6102void helper_vmrun(int aflag, int next_eip_addend)
6103{
6104}
6105void helper_vmmcall(void)
6106{
6107}
6108void helper_vmload(int aflag)
6109{
6110}
6111void helper_vmsave(int aflag)
6112{
6113}
6114void helper_stgi(void)
6115{
6116}
6117void helper_clgi(void)
6118{
6119}
6120void helper_skinit(void)
6121{
6122}
6123void helper_invlpga(int aflag)
6124{
6125}
6126void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6127{
6128}
6129void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6130{
6131}
6132
6133void helper_svm_check_io(uint32_t port, uint32_t param,
6134 uint32_t next_eip_addend)
6135{
6136}
6137#else
6138
6139#ifndef VBOX
6140static inline void svm_save_seg(target_phys_addr_t addr,
6141#else /* VBOX */
6142DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6143#endif /* VBOX */
6144 const SegmentCache *sc)
6145{
6146 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6147 sc->selector);
6148 stq_phys(addr + offsetof(struct vmcb_seg, base),
6149 sc->base);
6150 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6151 sc->limit);
6152 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6153 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6154}
6155
6156#ifndef VBOX
6157static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6158#else /* VBOX */
6159DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6160#endif /* VBOX */
6161{
6162 unsigned int flags;
6163
6164 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6165 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6166 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6167 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6168 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6169}
6170
6171#ifndef VBOX
6172static inline void svm_load_seg_cache(target_phys_addr_t addr,
6173#else /* VBOX */
6174DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6175#endif /* VBOX */
6176 CPUState *env, int seg_reg)
6177{
6178 SegmentCache sc1, *sc = &sc1;
6179 svm_load_seg(addr, sc);
6180 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6181 sc->base, sc->limit, sc->flags);
6182}
6183
6184void helper_vmrun(int aflag, int next_eip_addend)
6185{
6186 target_ulong addr;
6187 uint32_t event_inj;
6188 uint32_t int_ctl;
6189
6190 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6191
6192 if (aflag == 2)
6193 addr = EAX;
6194 else
6195 addr = (uint32_t)EAX;
6196
6197 if (loglevel & CPU_LOG_TB_IN_ASM)
6198 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6199
6200 env->vm_vmcb = addr;
6201
6202 /* save the current CPU state in the hsave page */
6203 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6204 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6205
6206 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6207 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6208
6209 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6210 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6211 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6212 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6213 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6214 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6215
6216 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6217 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6218
6219 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6220 &env->segs[R_ES]);
6221 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6222 &env->segs[R_CS]);
6223 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6224 &env->segs[R_SS]);
6225 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6226 &env->segs[R_DS]);
6227
6228 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6229 EIP + next_eip_addend);
6230 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6231 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6232
6233 /* load the interception bitmaps so we do not need to access the
6234 vmcb in svm mode */
6235 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6236 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6237 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6238 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6239 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6240 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6241
6242 /* enable intercepts */
6243 env->hflags |= HF_SVMI_MASK;
6244
6245 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6246
6247 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6248 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6249
6250 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6251 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6252
6253 /* clear exit_info_2 so we behave like the real hardware */
6254 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6255
6256 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6257 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6258 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6259 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6260 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6261 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6262 if (int_ctl & V_INTR_MASKING_MASK) {
6263 env->v_tpr = int_ctl & V_TPR_MASK;
6264 env->hflags2 |= HF2_VINTR_MASK;
6265 if (env->eflags & IF_MASK)
6266 env->hflags2 |= HF2_HIF_MASK;
6267 }
6268
6269 cpu_load_efer(env,
6270 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6271 env->eflags = 0;
6272 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6273 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6274 CC_OP = CC_OP_EFLAGS;
6275
6276 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6277 env, R_ES);
6278 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6279 env, R_CS);
6280 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6281 env, R_SS);
6282 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6283 env, R_DS);
6284
6285 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6286 env->eip = EIP;
6287 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6288 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6289 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6290 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6291 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6292
6293 /* FIXME: guest state consistency checks */
6294
6295 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6296 case TLB_CONTROL_DO_NOTHING:
6297 break;
6298 case TLB_CONTROL_FLUSH_ALL_ASID:
6299 /* FIXME: this is not 100% correct but should work for now */
6300 tlb_flush(env, 1);
6301 break;
6302 }
6303
6304 env->hflags2 |= HF2_GIF_MASK;
6305
6306 if (int_ctl & V_IRQ_MASK) {
6307 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6308 }
6309
6310 /* maybe we need to inject an event */
6311 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6312 if (event_inj & SVM_EVTINJ_VALID) {
6313 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6314 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6315 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6316 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6317
6318 if (loglevel & CPU_LOG_TB_IN_ASM)
6319 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6320 /* FIXME: need to implement valid_err */
6321 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6322 case SVM_EVTINJ_TYPE_INTR:
6323 env->exception_index = vector;
6324 env->error_code = event_inj_err;
6325 env->exception_is_int = 0;
6326 env->exception_next_eip = -1;
6327 if (loglevel & CPU_LOG_TB_IN_ASM)
6328 fprintf(logfile, "INTR");
6329 /* XXX: is it always correct ? */
6330 do_interrupt(vector, 0, 0, 0, 1);
6331 break;
6332 case SVM_EVTINJ_TYPE_NMI:
6333 env->exception_index = EXCP02_NMI;
6334 env->error_code = event_inj_err;
6335 env->exception_is_int = 0;
6336 env->exception_next_eip = EIP;
6337 if (loglevel & CPU_LOG_TB_IN_ASM)
6338 fprintf(logfile, "NMI");
6339 cpu_loop_exit();
6340 break;
6341 case SVM_EVTINJ_TYPE_EXEPT:
6342 env->exception_index = vector;
6343 env->error_code = event_inj_err;
6344 env->exception_is_int = 0;
6345 env->exception_next_eip = -1;
6346 if (loglevel & CPU_LOG_TB_IN_ASM)
6347 fprintf(logfile, "EXEPT");
6348 cpu_loop_exit();
6349 break;
6350 case SVM_EVTINJ_TYPE_SOFT:
6351 env->exception_index = vector;
6352 env->error_code = event_inj_err;
6353 env->exception_is_int = 1;
6354 env->exception_next_eip = EIP;
6355 if (loglevel & CPU_LOG_TB_IN_ASM)
6356 fprintf(logfile, "SOFT");
6357 cpu_loop_exit();
6358 break;
6359 }
6360 if (loglevel & CPU_LOG_TB_IN_ASM)
6361 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6362 }
6363}
6364
6365void helper_vmmcall(void)
6366{
6367 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6368 raise_exception(EXCP06_ILLOP);
6369}
6370
6371void helper_vmload(int aflag)
6372{
6373 target_ulong addr;
6374 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6375
6376 if (aflag == 2)
6377 addr = EAX;
6378 else
6379 addr = (uint32_t)EAX;
6380
6381 if (loglevel & CPU_LOG_TB_IN_ASM)
6382 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6383 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6384 env->segs[R_FS].base);
6385
6386 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6387 env, R_FS);
6388 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6389 env, R_GS);
6390 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6391 &env->tr);
6392 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6393 &env->ldt);
6394
6395#ifdef TARGET_X86_64
6396 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6397 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6398 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6399 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6400#endif
6401 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6402 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6403 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6404 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6405}
6406
6407void helper_vmsave(int aflag)
6408{
6409 target_ulong addr;
6410 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6411
6412 if (aflag == 2)
6413 addr = EAX;
6414 else
6415 addr = (uint32_t)EAX;
6416
6417 if (loglevel & CPU_LOG_TB_IN_ASM)
6418 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6419 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6420 env->segs[R_FS].base);
6421
6422 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6423 &env->segs[R_FS]);
6424 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6425 &env->segs[R_GS]);
6426 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6427 &env->tr);
6428 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6429 &env->ldt);
6430
6431#ifdef TARGET_X86_64
6432 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6433 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6434 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6435 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6436#endif
6437 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6438 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6439 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6440 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6441}
6442
6443void helper_stgi(void)
6444{
6445 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6446 env->hflags2 |= HF2_GIF_MASK;
6447}
6448
6449void helper_clgi(void)
6450{
6451 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6452 env->hflags2 &= ~HF2_GIF_MASK;
6453}
6454
6455void helper_skinit(void)
6456{
6457 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6458 /* XXX: not implemented */
6459 raise_exception(EXCP06_ILLOP);
6460}
6461
6462void helper_invlpga(int aflag)
6463{
6464 target_ulong addr;
6465 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6466
6467 if (aflag == 2)
6468 addr = EAX;
6469 else
6470 addr = (uint32_t)EAX;
6471
6472 /* XXX: could use the ASID to see if it is needed to do the
6473 flush */
6474 tlb_flush_page(env, addr);
6475}
6476
6477void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6478{
6479 if (likely(!(env->hflags & HF_SVMI_MASK)))
6480 return;
6481#ifndef VBOX
6482 switch(type) {
6483#ifndef VBOX
6484 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6485#else
6486 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6487 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6488 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6489#endif
6490 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6491 helper_vmexit(type, param);
6492 }
6493 break;
6494#ifndef VBOX
6495 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6496#else
6497 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6498 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6499 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6500#endif
6501 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6502 helper_vmexit(type, param);
6503 }
6504 break;
6505 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6506 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6507 helper_vmexit(type, param);
6508 }
6509 break;
6510 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6511 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6512 helper_vmexit(type, param);
6513 }
6514 break;
6515 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6516 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6517 helper_vmexit(type, param);
6518 }
6519 break;
6520 case SVM_EXIT_MSR:
6521 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6522 /* FIXME: this should be read in at vmrun (faster this way?) */
6523 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6524 uint32_t t0, t1;
6525 switch((uint32_t)ECX) {
6526 case 0 ... 0x1fff:
6527 t0 = (ECX * 2) % 8;
6528 t1 = ECX / 8;
6529 break;
6530 case 0xc0000000 ... 0xc0001fff:
6531 t0 = (8192 + ECX - 0xc0000000) * 2;
6532 t1 = (t0 / 8);
6533 t0 %= 8;
6534 break;
6535 case 0xc0010000 ... 0xc0011fff:
6536 t0 = (16384 + ECX - 0xc0010000) * 2;
6537 t1 = (t0 / 8);
6538 t0 %= 8;
6539 break;
6540 default:
6541 helper_vmexit(type, param);
6542 t0 = 0;
6543 t1 = 0;
6544 break;
6545 }
6546 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6547 helper_vmexit(type, param);
6548 }
6549 break;
6550 default:
6551 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6552 helper_vmexit(type, param);
6553 }
6554 break;
6555 }
6556#else
6557 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6558#endif
6559}
6560
6561void helper_svm_check_io(uint32_t port, uint32_t param,
6562 uint32_t next_eip_addend)
6563{
6564 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6565 /* FIXME: this should be read in at vmrun (faster this way?) */
6566 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6567 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6568 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6569 /* next EIP */
6570 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6571 env->eip + next_eip_addend);
6572 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6573 }
6574 }
6575}
6576
6577/* Note: currently only 32 bits of exit_code are used */
6578void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6579{
6580 uint32_t int_ctl;
6581
6582 if (loglevel & CPU_LOG_TB_IN_ASM)
6583 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6584 exit_code, exit_info_1,
6585 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6586 EIP);
6587
6588 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6589 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6590 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6591 } else {
6592 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6593 }
6594
6595 /* Save the VM state in the vmcb */
6596 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6597 &env->segs[R_ES]);
6598 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6599 &env->segs[R_CS]);
6600 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6601 &env->segs[R_SS]);
6602 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6603 &env->segs[R_DS]);
6604
6605 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6606 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6607
6608 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6609 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6610
6611 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6612 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6613 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6614 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6615 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6616
6617 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6618 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6619 int_ctl |= env->v_tpr & V_TPR_MASK;
6620 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6621 int_ctl |= V_IRQ_MASK;
6622 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6623
6624 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6625 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6626 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6627 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6628 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6629 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6630 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6631
6632 /* Reload the host state from vm_hsave */
6633 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6634 env->hflags &= ~HF_SVMI_MASK;
6635 env->intercept = 0;
6636 env->intercept_exceptions = 0;
6637 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6638 env->tsc_offset = 0;
6639
6640 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6641 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6642
6643 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6644 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6645
6646 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6647 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6648 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6649 /* we need to set the efer after the crs so the hidden flags get
6650 set properly */
6651 cpu_load_efer(env,
6652 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6653 env->eflags = 0;
6654 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6655 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6656 CC_OP = CC_OP_EFLAGS;
6657
6658 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6659 env, R_ES);
6660 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6661 env, R_CS);
6662 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6663 env, R_SS);
6664 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6665 env, R_DS);
6666
6667 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6668 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6669 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6670
6671 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6672 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6673
6674 /* other setups */
6675 cpu_x86_set_cpl(env, 0);
6676 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6677 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6678
6679 env->hflags2 &= ~HF2_GIF_MASK;
6680 /* FIXME: Resets the current ASID register to zero (host ASID). */
6681
6682 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6683
6684 /* Clears the TSC_OFFSET inside the processor. */
6685
6686 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6687 from the page table indicated the host's CR3. If the PDPEs contain
6688 illegal state, the processor causes a shutdown. */
6689
6690 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6691 env->cr[0] |= CR0_PE_MASK;
6692 env->eflags &= ~VM_MASK;
6693
6694 /* Disables all breakpoints in the host DR7 register. */
6695
6696 /* Checks the reloaded host state for consistency. */
6697
6698 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6699 host's code segment or non-canonical (in the case of long mode), a
6700 #GP fault is delivered inside the host.) */
6701
6702 /* remove any pending exception */
6703 env->exception_index = -1;
6704 env->error_code = 0;
6705 env->old_exception = -1;
6706
6707 cpu_loop_exit();
6708}
6709
6710#endif
6711
6712/* MMX/SSE */
6713/* XXX: optimize by storing fptt and fptags in the static cpu state */
6714void helper_enter_mmx(void)
6715{
6716 env->fpstt = 0;
6717 *(uint32_t *)(env->fptags) = 0;
6718 *(uint32_t *)(env->fptags + 4) = 0;
6719}
6720
6721void helper_emms(void)
6722{
6723 /* set to empty state */
6724 *(uint32_t *)(env->fptags) = 0x01010101;
6725 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6726}
6727
6728/* XXX: suppress */
6729void helper_movq(uint64_t *d, uint64_t *s)
6730{
6731 *d = *s;
6732}
6733
6734#define SHIFT 0
6735#include "ops_sse.h"
6736
6737#define SHIFT 1
6738#include "ops_sse.h"
6739
6740#define SHIFT 0
6741#include "helper_template.h"
6742#undef SHIFT
6743
6744#define SHIFT 1
6745#include "helper_template.h"
6746#undef SHIFT
6747
6748#define SHIFT 2
6749#include "helper_template.h"
6750#undef SHIFT
6751
6752#ifdef TARGET_X86_64
6753
6754#define SHIFT 3
6755#include "helper_template.h"
6756#undef SHIFT
6757
6758#endif
6759
6760/* bit operations */
6761target_ulong helper_bsf(target_ulong t0)
6762{
6763 int count;
6764 target_ulong res;
6765
6766 res = t0;
6767 count = 0;
6768 while ((res & 1) == 0) {
6769 count++;
6770 res >>= 1;
6771 }
6772 return count;
6773}
6774
6775target_ulong helper_bsr(target_ulong t0)
6776{
6777 int count;
6778 target_ulong res, mask;
6779
6780 res = t0;
6781 count = TARGET_LONG_BITS - 1;
6782 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6783 while ((res & mask) == 0) {
6784 count--;
6785 res <<= 1;
6786 }
6787 return count;
6788}
6789
6790
6791static int compute_all_eflags(void)
6792{
6793 return CC_SRC;
6794}
6795
6796static int compute_c_eflags(void)
6797{
6798 return CC_SRC & CC_C;
6799}
6800
6801#ifndef VBOX
6802CCTable cc_table[CC_OP_NB] = {
6803 [CC_OP_DYNAMIC] = { /* should never happen */ },
6804
6805 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6806
6807 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6808 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6809 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6810
6811 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6812 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6813 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6814
6815 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6816 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6817 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6818
6819 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6820 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6821 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6822
6823 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6824 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6825 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6826
6827 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6828 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6829 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6830
6831 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6832 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6833 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6834
6835 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6836 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6837 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6838
6839 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6840 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6841 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6842
6843 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6844 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6845 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6846
6847#ifdef TARGET_X86_64
6848 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6849
6850 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6851
6852 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6853
6854 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6855
6856 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6857
6858 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6859
6860 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6861
6862 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6863
6864 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6865
6866 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6867#endif
6868};
6869#else /* VBOX */
6870/* Sync carefully with cpu.h */
6871CCTable cc_table[CC_OP_NB] = {
6872 /* CC_OP_DYNAMIC */ { 0, 0 },
6873
6874 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6875
6876 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6877 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6878 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6879#ifdef TARGET_X86_64
6880 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6881#else
6882 /* CC_OP_MULQ */ { 0, 0 },
6883#endif
6884
6885 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6886 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6887 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6888#ifdef TARGET_X86_64
6889 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6890#else
6891 /* CC_OP_ADDQ */ { 0, 0 },
6892#endif
6893
6894 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6895 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6896 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6897#ifdef TARGET_X86_64
6898 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6899#else
6900 /* CC_OP_ADCQ */ { 0, 0 },
6901#endif
6902
6903 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6904 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6905 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6906#ifdef TARGET_X86_64
6907 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6908#else
6909 /* CC_OP_SUBQ */ { 0, 0 },
6910#endif
6911
6912 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6913 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6914 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6915#ifdef TARGET_X86_64
6916 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6917#else
6918 /* CC_OP_SBBQ */ { 0, 0 },
6919#endif
6920
6921 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6922 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6923 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6924#ifdef TARGET_X86_64
6925 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6926#else
6927 /* CC_OP_LOGICQ */ { 0, 0 },
6928#endif
6929
6930 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6931 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6932 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6933#ifdef TARGET_X86_64
6934 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6935#else
6936 /* CC_OP_INCQ */ { 0, 0 },
6937#endif
6938
6939 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6940 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6941 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6942#ifdef TARGET_X86_64
6943 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6944#else
6945 /* CC_OP_DECQ */ { 0, 0 },
6946#endif
6947
6948 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6949 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6950 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6951#ifdef TARGET_X86_64
6952 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6953#else
6954 /* CC_OP_SHLQ */ { 0, 0 },
6955#endif
6956
6957 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6958 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6959 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6960#ifdef TARGET_X86_64
6961 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6962#else
6963 /* CC_OP_SARQ */ { 0, 0 },
6964#endif
6965};
6966#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette