VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/op_helper.c@ 17101

Last change on this file since 17101 was 17045, checked in by vboxsync, 16 years ago

REM: update with explanation of the previous fix

  • Property svn:eol-style set to native
File size: 194.1 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176
177 /* According to AMD manual, should be read with IOPL == 3 */
178 eflags |= (3 << IOPL_SHIFT);
179
180 /* We only use helper_read_eflags_vme() in 16-bits mode */
181 return eflags & 0xffff;
182}
183
184void helper_dump_state()
185{
186 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
187 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
188 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
189 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
190 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
191 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
192 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
193}
194#endif
195
196/* return non zero if error */
197#ifndef VBOX
198static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#else /* VBOX */
200DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
201#endif /* VBOX */
202 int selector)
203{
204 SegmentCache *dt;
205 int index;
206 target_ulong ptr;
207
208#ifdef VBOX
209 /* Trying to load a selector with CPL=1? */
210 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
211 {
212 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
213 selector = selector & 0xfffc;
214 }
215#endif
216
217 if (selector & 0x4)
218 dt = &env->ldt;
219 else
220 dt = &env->gdt;
221 index = selector & ~7;
222 if ((index + 7) > dt->limit)
223 return -1;
224 ptr = dt->base + index;
225 *e1_ptr = ldl_kernel(ptr);
226 *e2_ptr = ldl_kernel(ptr + 4);
227 return 0;
228}
229
230#ifndef VBOX
231static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
232#else /* VBOX */
233DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
234#endif /* VBOX */
235{
236 unsigned int limit;
237 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
238 if (e2 & DESC_G_MASK)
239 limit = (limit << 12) | 0xfff;
240 return limit;
241}
242
243#ifndef VBOX
244static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
245#else /* VBOX */
246DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
247#endif /* VBOX */
248{
249 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
250}
251
252#ifndef VBOX
253static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#else /* VBOX */
255DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
256#endif /* VBOX */
257{
258 sc->base = get_seg_base(e1, e2);
259 sc->limit = get_seg_limit(e1, e2);
260 sc->flags = e2;
261}
262
263/* init the segment cache in vm86 mode. */
264#ifndef VBOX
265static inline void load_seg_vm(int seg, int selector)
266#else /* VBOX */
267DECLINLINE(void) load_seg_vm(int seg, int selector)
268#endif /* VBOX */
269{
270 selector &= 0xffff;
271#ifdef VBOX
272 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
273
274 if (seg == R_CS)
275 flags |= DESC_CS_MASK;
276
277 cpu_x86_load_seg_cache(env, seg, selector,
278 (selector << 4), 0xffff, flags);
279#else
280 cpu_x86_load_seg_cache(env, seg, selector,
281 (selector << 4), 0xffff, 0);
282#endif
283}
284
285#ifndef VBOX
286static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
287#else /* VBOX */
288DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
289#endif /* VBOX */
290 uint32_t *esp_ptr, int dpl)
291{
292#ifndef VBOX
293 int type, index, shift;
294#else
295 unsigned int type, index, shift;
296#endif
297
298#if 0
299 {
300 int i;
301 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
302 for(i=0;i<env->tr.limit;i++) {
303 printf("%02x ", env->tr.base[i]);
304 if ((i & 7) == 7) printf("\n");
305 }
306 printf("\n");
307 }
308#endif
309
310 if (!(env->tr.flags & DESC_P_MASK))
311 cpu_abort(env, "invalid tss");
312 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if ((type & 7) != 1)
314 cpu_abort(env, "invalid tss type");
315 shift = type >> 3;
316 index = (dpl * 4 + 2) << shift;
317 if (index + (4 << shift) - 1 > env->tr.limit)
318 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
319 if (shift == 0) {
320 *esp_ptr = lduw_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
322 } else {
323 *esp_ptr = ldl_kernel(env->tr.base + index);
324 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
325 }
326}
327
328/* XXX: merge with load_seg() */
329static void tss_load_seg(int seg_reg, int selector)
330{
331 uint32_t e1, e2;
332 int rpl, dpl, cpl;
333
334#ifdef VBOX
335 e1 = e2 = 0;
336 cpl = env->hflags & HF_CPL_MASK;
337 /* Trying to load a selector with CPL=1? */
338 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
339 {
340 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
341 selector = selector & 0xfffc;
342 }
343#endif
344
345 if ((selector & 0xfffc) != 0) {
346 if (load_segment(&e1, &e2, selector) != 0)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 if (!(e2 & DESC_S_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 rpl = selector & 3;
351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
352 cpl = env->hflags & HF_CPL_MASK;
353 if (seg_reg == R_CS) {
354 if (!(e2 & DESC_CS_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* XXX: is it correct ? */
357 if (dpl != rpl)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if ((e2 & DESC_C_MASK) && dpl > rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 } else if (seg_reg == R_SS) {
362 /* SS must be writable data */
363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 if (dpl != cpl || dpl != rpl)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 } else {
368 /* not readable code */
369 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* if data or non conforming code, checks the rights */
372 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 if (dpl < cpl || dpl < rpl)
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 }
376 }
377 if (!(e2 & DESC_P_MASK))
378 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
379 cpu_x86_load_seg_cache(env, seg_reg, selector,
380 get_seg_base(e1, e2),
381 get_seg_limit(e1, e2),
382 e2);
383 } else {
384 if (seg_reg == R_SS || seg_reg == R_CS)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386#ifdef VBOX
387#if 0
388 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
389 cpu_x86_load_seg_cache(env, seg_reg, selector,
390 0, 0, 0);
391#endif
392#endif
393 }
394}
395
396#define SWITCH_TSS_JMP 0
397#define SWITCH_TSS_IRET 1
398#define SWITCH_TSS_CALL 2
399
400/* XXX: restore CPU state in registers (PowerPC case) */
401static void switch_tss(int tss_selector,
402 uint32_t e1, uint32_t e2, int source,
403 uint32_t next_eip)
404{
405 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
406 target_ulong tss_base;
407 uint32_t new_regs[8], new_segs[6];
408 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
409 uint32_t old_eflags, eflags_mask;
410 SegmentCache *dt;
411#ifndef VBOX
412 int index;
413#else
414 unsigned int index;
415#endif
416 target_ulong ptr;
417
418 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
419#ifdef DEBUG_PCALL
420 if (loglevel & CPU_LOG_PCALL)
421 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
422#endif
423
424#if defined(VBOX) && defined(DEBUG)
425 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
426#endif
427
428 /* if task gate, we read the TSS segment and we load it */
429 if (type == 5) {
430 if (!(e2 & DESC_P_MASK))
431 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
432 tss_selector = e1 >> 16;
433 if (tss_selector & 4)
434 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
435 if (load_segment(&e1, &e2, tss_selector) != 0)
436 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
437 if (e2 & DESC_S_MASK)
438 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
439 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
440 if ((type & 7) != 1)
441 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
442 }
443
444 if (!(e2 & DESC_P_MASK))
445 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
446
447 if (type & 8)
448 tss_limit_max = 103;
449 else
450 tss_limit_max = 43;
451 tss_limit = get_seg_limit(e1, e2);
452 tss_base = get_seg_base(e1, e2);
453 if ((tss_selector & 4) != 0 ||
454 tss_limit < tss_limit_max)
455 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
456 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
457 if (old_type & 8)
458 old_tss_limit_max = 103;
459 else
460 old_tss_limit_max = 43;
461
462 /* read all the registers from the new TSS */
463 if (type & 8) {
464 /* 32 bit */
465 new_cr3 = ldl_kernel(tss_base + 0x1c);
466 new_eip = ldl_kernel(tss_base + 0x20);
467 new_eflags = ldl_kernel(tss_base + 0x24);
468 for(i = 0; i < 8; i++)
469 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
470 for(i = 0; i < 6; i++)
471 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
472 new_ldt = lduw_kernel(tss_base + 0x60);
473 new_trap = ldl_kernel(tss_base + 0x64);
474 } else {
475 /* 16 bit */
476 new_cr3 = 0;
477 new_eip = lduw_kernel(tss_base + 0x0e);
478 new_eflags = lduw_kernel(tss_base + 0x10);
479 for(i = 0; i < 8; i++)
480 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
481 for(i = 0; i < 4; i++)
482 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
483 new_ldt = lduw_kernel(tss_base + 0x2a);
484 new_segs[R_FS] = 0;
485 new_segs[R_GS] = 0;
486 new_trap = 0;
487 }
488
489 /* NOTE: we must avoid memory exceptions during the task switch,
490 so we make dummy accesses before */
491 /* XXX: it can still fail in some cases, so a bigger hack is
492 necessary to valid the TLB after having done the accesses */
493
494 v1 = ldub_kernel(env->tr.base);
495 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
496 stb_kernel(env->tr.base, v1);
497 stb_kernel(env->tr.base + old_tss_limit_max, v2);
498
499 /* clear busy bit (it is restartable) */
500 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
501 target_ulong ptr;
502 uint32_t e2;
503 ptr = env->gdt.base + (env->tr.selector & ~7);
504 e2 = ldl_kernel(ptr + 4);
505 e2 &= ~DESC_TSS_BUSY_MASK;
506 stl_kernel(ptr + 4, e2);
507 }
508 old_eflags = compute_eflags();
509 if (source == SWITCH_TSS_IRET)
510 old_eflags &= ~NT_MASK;
511
512 /* save the current state in the old TSS */
513 if (type & 8) {
514 /* 32 bit */
515 stl_kernel(env->tr.base + 0x20, next_eip);
516 stl_kernel(env->tr.base + 0x24, old_eflags);
517 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
518 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
519 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
520 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
521 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
522 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
523 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
524 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
525 for(i = 0; i < 6; i++)
526 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
527#if defined(VBOX) && defined(DEBUG)
528 printf("TSS 32 bits switch\n");
529 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
530#endif
531 } else {
532 /* 16 bit */
533 stw_kernel(env->tr.base + 0x0e, next_eip);
534 stw_kernel(env->tr.base + 0x10, old_eflags);
535 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
536 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
537 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
538 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
539 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
540 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
541 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
542 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
543 for(i = 0; i < 4; i++)
544 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
545 }
546
547 /* now if an exception occurs, it will occurs in the next task
548 context */
549
550 if (source == SWITCH_TSS_CALL) {
551 stw_kernel(tss_base, env->tr.selector);
552 new_eflags |= NT_MASK;
553 }
554
555 /* set busy bit */
556 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
557 target_ulong ptr;
558 uint32_t e2;
559 ptr = env->gdt.base + (tss_selector & ~7);
560 e2 = ldl_kernel(ptr + 4);
561 e2 |= DESC_TSS_BUSY_MASK;
562 stl_kernel(ptr + 4, e2);
563 }
564
565 /* set the new CPU state */
566 /* from this point, any exception which occurs can give problems */
567 env->cr[0] |= CR0_TS_MASK;
568 env->hflags |= HF_TS_MASK;
569 env->tr.selector = tss_selector;
570 env->tr.base = tss_base;
571 env->tr.limit = tss_limit;
572 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
573
574 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
575 cpu_x86_update_cr3(env, new_cr3);
576 }
577
578 /* load all registers without an exception, then reload them with
579 possible exception */
580 env->eip = new_eip;
581 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
582 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
583 if (!(type & 8))
584 eflags_mask &= 0xffff;
585 load_eflags(new_eflags, eflags_mask);
586 /* XXX: what to do in 16 bit case ? */
587 EAX = new_regs[0];
588 ECX = new_regs[1];
589 EDX = new_regs[2];
590 EBX = new_regs[3];
591 ESP = new_regs[4];
592 EBP = new_regs[5];
593 ESI = new_regs[6];
594 EDI = new_regs[7];
595 if (new_eflags & VM_MASK) {
596 for(i = 0; i < 6; i++)
597 load_seg_vm(i, new_segs[i]);
598 /* in vm86, CPL is always 3 */
599 cpu_x86_set_cpl(env, 3);
600 } else {
601 /* CPL is set the RPL of CS */
602 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
603 /* first just selectors as the rest may trigger exceptions */
604 for(i = 0; i < 6; i++)
605 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
606 }
607
608 env->ldt.selector = new_ldt & ~4;
609 env->ldt.base = 0;
610 env->ldt.limit = 0;
611 env->ldt.flags = 0;
612
613 /* load the LDT */
614 if (new_ldt & 4)
615 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
616
617 if ((new_ldt & 0xfffc) != 0) {
618 dt = &env->gdt;
619 index = new_ldt & ~7;
620 if ((index + 7) > dt->limit)
621 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
622 ptr = dt->base + index;
623 e1 = ldl_kernel(ptr);
624 e2 = ldl_kernel(ptr + 4);
625 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
626 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
627 if (!(e2 & DESC_P_MASK))
628 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
629 load_seg_cache_raw_dt(&env->ldt, e1, e2);
630 }
631
632 /* load the segments */
633 if (!(new_eflags & VM_MASK)) {
634 tss_load_seg(R_CS, new_segs[R_CS]);
635 tss_load_seg(R_SS, new_segs[R_SS]);
636 tss_load_seg(R_ES, new_segs[R_ES]);
637 tss_load_seg(R_DS, new_segs[R_DS]);
638 tss_load_seg(R_FS, new_segs[R_FS]);
639 tss_load_seg(R_GS, new_segs[R_GS]);
640 }
641
642 /* check that EIP is in the CS segment limits */
643 if (new_eip > env->segs[R_CS].limit) {
644 /* XXX: different exception if CALL ? */
645 raise_exception_err(EXCP0D_GPF, 0);
646 }
647}
648
649/* check if Port I/O is allowed in TSS */
650#ifndef VBOX
651static inline void check_io(int addr, int size)
652{
653 int io_offset, val, mask;
654
655#else /* VBOX */
656DECLINLINE(void) check_io(int addr, int size)
657{
658 int val, mask;
659 unsigned int io_offset;
660#endif /* VBOX */
661 /* TSS must be a valid 32 bit one */
662 if (!(env->tr.flags & DESC_P_MASK) ||
663 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
664 env->tr.limit < 103)
665 goto fail;
666 io_offset = lduw_kernel(env->tr.base + 0x66);
667 io_offset += (addr >> 3);
668 /* Note: the check needs two bytes */
669 if ((io_offset + 1) > env->tr.limit)
670 goto fail;
671 val = lduw_kernel(env->tr.base + io_offset);
672 val >>= (addr & 7);
673 mask = (1 << size) - 1;
674 /* all bits must be zero to allow the I/O */
675 if ((val & mask) != 0) {
676 fail:
677 raise_exception_err(EXCP0D_GPF, 0);
678 }
679}
680
681#ifdef VBOX
682/* Keep in sync with gen_check_external_event() */
683void helper_check_external_event()
684{
685 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
686 | CPU_INTERRUPT_EXTERNAL_TIMER
687 | CPU_INTERRUPT_EXTERNAL_DMA))
688 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
689 && (env->eflags & IF_MASK)
690 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
691 {
692 helper_external_event();
693 }
694
695}
696
697void helper_sync_seg(uint32_t reg)
698{
699 if (env->segs[reg].newselector)
700 sync_seg(env, reg, env->segs[reg].newselector);
701}
702#endif
703
704void helper_check_iob(uint32_t t0)
705{
706 check_io(t0, 1);
707}
708
709void helper_check_iow(uint32_t t0)
710{
711 check_io(t0, 2);
712}
713
714void helper_check_iol(uint32_t t0)
715{
716 check_io(t0, 4);
717}
718
719void helper_outb(uint32_t port, uint32_t data)
720{
721 cpu_outb(env, port, data & 0xff);
722}
723
724target_ulong helper_inb(uint32_t port)
725{
726 return cpu_inb(env, port);
727}
728
729void helper_outw(uint32_t port, uint32_t data)
730{
731 cpu_outw(env, port, data & 0xffff);
732}
733
734target_ulong helper_inw(uint32_t port)
735{
736 return cpu_inw(env, port);
737}
738
739void helper_outl(uint32_t port, uint32_t data)
740{
741 cpu_outl(env, port, data);
742}
743
744target_ulong helper_inl(uint32_t port)
745{
746 return cpu_inl(env, port);
747}
748
749#ifndef VBOX
750static inline unsigned int get_sp_mask(unsigned int e2)
751#else /* VBOX */
752DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
753#endif /* VBOX */
754{
755 if (e2 & DESC_B_MASK)
756 return 0xffffffff;
757 else
758 return 0xffff;
759}
760
761#ifdef TARGET_X86_64
762#define SET_ESP(val, sp_mask)\
763do {\
764 if ((sp_mask) == 0xffff)\
765 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
766 else if ((sp_mask) == 0xffffffffLL)\
767 ESP = (uint32_t)(val);\
768 else\
769 ESP = (val);\
770} while (0)
771#else
772#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
773#endif
774
775/* in 64-bit machines, this can overflow. So this segment addition macro
776 * can be used to trim the value to 32-bit whenever needed */
777#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
778
779/* XXX: add a is_user flag to have proper security support */
780#define PUSHW(ssp, sp, sp_mask, val)\
781{\
782 sp -= 2;\
783 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
784}
785
786#define PUSHL(ssp, sp, sp_mask, val)\
787{\
788 sp -= 4;\
789 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
790}
791
792#define POPW(ssp, sp, sp_mask, val)\
793{\
794 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
795 sp += 2;\
796}
797
798#define POPL(ssp, sp, sp_mask, val)\
799{\
800 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
801 sp += 4;\
802}
803
804/* protected mode interrupt */
805static void do_interrupt_protected(int intno, int is_int, int error_code,
806 unsigned int next_eip, int is_hw)
807{
808 SegmentCache *dt;
809 target_ulong ptr, ssp;
810 int type, dpl, selector, ss_dpl, cpl;
811 int has_error_code, new_stack, shift;
812 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
813 uint32_t old_eip, sp_mask;
814
815#ifdef VBOX
816 ss = ss_e1 = ss_e2 = 0;
817# ifdef VBOX_WITH_VMI
818 if ( intno == 6
819 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
820 {
821 env->exception_index = EXCP_PARAV_CALL;
822 cpu_loop_exit();
823 }
824# endif
825 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
826 cpu_loop_exit();
827#endif
828
829 has_error_code = 0;
830 if (!is_int && !is_hw) {
831 switch(intno) {
832 case 8:
833 case 10:
834 case 11:
835 case 12:
836 case 13:
837 case 14:
838 case 17:
839 has_error_code = 1;
840 break;
841 }
842 }
843 if (is_int)
844 old_eip = next_eip;
845 else
846 old_eip = env->eip;
847
848 dt = &env->idt;
849#ifndef VBOX
850 if (intno * 8 + 7 > dt->limit)
851#else
852 if ((unsigned)intno * 8 + 7 > dt->limit)
853#endif
854 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
855 ptr = dt->base + intno * 8;
856 e1 = ldl_kernel(ptr);
857 e2 = ldl_kernel(ptr + 4);
858 /* check gate type */
859 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
860 switch(type) {
861 case 5: /* task gate */
862 /* must do that check here to return the correct error code */
863 if (!(e2 & DESC_P_MASK))
864 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
865 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
866 if (has_error_code) {
867 int type;
868 uint32_t mask;
869 /* push the error code */
870 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
871 shift = type >> 3;
872 if (env->segs[R_SS].flags & DESC_B_MASK)
873 mask = 0xffffffff;
874 else
875 mask = 0xffff;
876 esp = (ESP - (2 << shift)) & mask;
877 ssp = env->segs[R_SS].base + esp;
878 if (shift)
879 stl_kernel(ssp, error_code);
880 else
881 stw_kernel(ssp, error_code);
882 SET_ESP(esp, mask);
883 }
884 return;
885 case 6: /* 286 interrupt gate */
886 case 7: /* 286 trap gate */
887 case 14: /* 386 interrupt gate */
888 case 15: /* 386 trap gate */
889 break;
890 default:
891 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
892 break;
893 }
894 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
895 cpl = env->hflags & HF_CPL_MASK;
896 /* check privilege if software int */
897 if (is_int && dpl < cpl)
898 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
899 /* check valid bit */
900 if (!(e2 & DESC_P_MASK))
901 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
902 selector = e1 >> 16;
903 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
904 if ((selector & 0xfffc) == 0)
905 raise_exception_err(EXCP0D_GPF, 0);
906
907 if (load_segment(&e1, &e2, selector) != 0)
908 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
909 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
910 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
911 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
912 if (dpl > cpl)
913 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
914 if (!(e2 & DESC_P_MASK))
915 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
916 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
917 /* to inner privilege */
918 get_ss_esp_from_tss(&ss, &esp, dpl);
919 if ((ss & 0xfffc) == 0)
920 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
921 if ((ss & 3) != dpl)
922 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
923 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
924 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
926 if (ss_dpl != dpl)
927 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928 if (!(ss_e2 & DESC_S_MASK) ||
929 (ss_e2 & DESC_CS_MASK) ||
930 !(ss_e2 & DESC_W_MASK))
931 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
932 if (!(ss_e2 & DESC_P_MASK))
933#ifdef VBOX /* See page 3-477 of 253666.pdf */
934 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
935#else
936 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
937#endif
938 new_stack = 1;
939 sp_mask = get_sp_mask(ss_e2);
940 ssp = get_seg_base(ss_e1, ss_e2);
941#if defined(VBOX) && defined(DEBUG)
942 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
943#endif
944 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
945 /* to same privilege */
946 if (env->eflags & VM_MASK)
947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 new_stack = 0;
949 sp_mask = get_sp_mask(env->segs[R_SS].flags);
950 ssp = env->segs[R_SS].base;
951 esp = ESP;
952 dpl = cpl;
953 } else {
954 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955 new_stack = 0; /* avoid warning */
956 sp_mask = 0; /* avoid warning */
957 ssp = 0; /* avoid warning */
958 esp = 0; /* avoid warning */
959 }
960
961 shift = type >> 3;
962
963#if 0
964 /* XXX: check that enough room is available */
965 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
966 if (env->eflags & VM_MASK)
967 push_size += 8;
968 push_size <<= shift;
969#endif
970 if (shift == 1) {
971 if (new_stack) {
972 if (env->eflags & VM_MASK) {
973 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
974 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
975 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
976 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
977 }
978 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
979 PUSHL(ssp, esp, sp_mask, ESP);
980 }
981 PUSHL(ssp, esp, sp_mask, compute_eflags());
982 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
983 PUSHL(ssp, esp, sp_mask, old_eip);
984 if (has_error_code) {
985 PUSHL(ssp, esp, sp_mask, error_code);
986 }
987 } else {
988 if (new_stack) {
989 if (env->eflags & VM_MASK) {
990 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
991 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
992 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
993 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
994 }
995 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
996 PUSHW(ssp, esp, sp_mask, ESP);
997 }
998 PUSHW(ssp, esp, sp_mask, compute_eflags());
999 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1000 PUSHW(ssp, esp, sp_mask, old_eip);
1001 if (has_error_code) {
1002 PUSHW(ssp, esp, sp_mask, error_code);
1003 }
1004 }
1005
1006 if (new_stack) {
1007 if (env->eflags & VM_MASK) {
1008 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1009 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1010 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1011 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1012 }
1013 ss = (ss & ~3) | dpl;
1014 cpu_x86_load_seg_cache(env, R_SS, ss,
1015 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1016 }
1017 SET_ESP(esp, sp_mask);
1018
1019 selector = (selector & ~3) | dpl;
1020 cpu_x86_load_seg_cache(env, R_CS, selector,
1021 get_seg_base(e1, e2),
1022 get_seg_limit(e1, e2),
1023 e2);
1024 cpu_x86_set_cpl(env, dpl);
1025 env->eip = offset;
1026
1027 /* interrupt gate clear IF mask */
1028 if ((type & 1) == 0) {
1029 env->eflags &= ~IF_MASK;
1030 }
1031#ifndef VBOX
1032 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1033#else
1034 /*
1035 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1036 * gets confused by seeingingly changed EFLAGS. See #3491 and
1037 * public bug #2341.
1038 */
1039 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1040#endif
1041}
1042#ifdef VBOX
1043
1044/* check if VME interrupt redirection is enabled in TSS */
1045DECLINLINE(bool) is_vme_irq_redirected(int intno)
1046{
1047 unsigned int io_offset, intredir_offset;
1048 unsigned char val, mask;
1049
1050 /* TSS must be a valid 32 bit one */
1051 if (!(env->tr.flags & DESC_P_MASK) ||
1052 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1053 env->tr.limit < 103)
1054 goto fail;
1055 io_offset = lduw_kernel(env->tr.base + 0x66);
1056 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1057 if (io_offset < 0x68 + 0x20)
1058 io_offset = 0x68 + 0x20;
1059 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1060 intredir_offset = io_offset - 0x20;
1061
1062 intredir_offset += (intno >> 3);
1063 if ((intredir_offset) > env->tr.limit)
1064 goto fail;
1065
1066 val = ldub_kernel(env->tr.base + intredir_offset);
1067 mask = 1 << (unsigned char)(intno & 7);
1068
1069 /* bit set means no redirection. */
1070 if ((val & mask) != 0) {
1071 return false;
1072 }
1073 return true;
1074
1075fail:
1076 raise_exception_err(EXCP0D_GPF, 0);
1077 return true;
1078}
1079
1080/* V86 mode software interrupt with CR4.VME=1 */
1081static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1082{
1083 target_ulong ptr, ssp;
1084 int selector;
1085 uint32_t offset, esp;
1086 uint32_t old_cs, old_eflags;
1087 uint32_t iopl;
1088
1089 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1090
1091 if (!is_vme_irq_redirected(intno))
1092 {
1093 if (iopl == 3)
1094 {
1095 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1096 return;
1097 }
1098 else
1099 raise_exception_err(EXCP0D_GPF, 0);
1100 }
1101
1102 /* virtual mode idt is at linear address 0 */
1103 ptr = 0 + intno * 4;
1104 offset = lduw_kernel(ptr);
1105 selector = lduw_kernel(ptr + 2);
1106 esp = ESP;
1107 ssp = env->segs[R_SS].base;
1108 old_cs = env->segs[R_CS].selector;
1109
1110 old_eflags = compute_eflags();
1111 if (iopl < 3)
1112 {
1113 /* copy VIF into IF and set IOPL to 3 */
1114 if (env->eflags & VIF_MASK)
1115 old_eflags |= IF_MASK;
1116 else
1117 old_eflags &= ~IF_MASK;
1118
1119 old_eflags |= (3 << IOPL_SHIFT);
1120 }
1121
1122 /* XXX: use SS segment size ? */
1123 PUSHW(ssp, esp, 0xffff, old_eflags);
1124 PUSHW(ssp, esp, 0xffff, old_cs);
1125 PUSHW(ssp, esp, 0xffff, next_eip);
1126
1127 /* update processor state */
1128 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1129 env->eip = offset;
1130 env->segs[R_CS].selector = selector;
1131 env->segs[R_CS].base = (selector << 4);
1132 env->eflags &= ~(TF_MASK | RF_MASK);
1133
1134 if (iopl < 3)
1135 env->eflags &= ~VIF_MASK;
1136 else
1137 env->eflags &= ~IF_MASK;
1138}
1139#endif /* VBOX */
1140
1141#ifdef TARGET_X86_64
1142
1143#define PUSHQ(sp, val)\
1144{\
1145 sp -= 8;\
1146 stq_kernel(sp, (val));\
1147}
1148
1149#define POPQ(sp, val)\
1150{\
1151 val = ldq_kernel(sp);\
1152 sp += 8;\
1153}
1154
1155#ifndef VBOX
1156static inline target_ulong get_rsp_from_tss(int level)
1157#else /* VBOX */
1158DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1159#endif /* VBOX */
1160{
1161 int index;
1162
1163#if 0
1164 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1165 env->tr.base, env->tr.limit);
1166#endif
1167
1168 if (!(env->tr.flags & DESC_P_MASK))
1169 cpu_abort(env, "invalid tss");
1170 index = 8 * level + 4;
1171 if ((index + 7) > env->tr.limit)
1172 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1173 return ldq_kernel(env->tr.base + index);
1174}
1175
1176/* 64 bit interrupt */
1177static void do_interrupt64(int intno, int is_int, int error_code,
1178 target_ulong next_eip, int is_hw)
1179{
1180 SegmentCache *dt;
1181 target_ulong ptr;
1182 int type, dpl, selector, cpl, ist;
1183 int has_error_code, new_stack;
1184 uint32_t e1, e2, e3, ss;
1185 target_ulong old_eip, esp, offset;
1186
1187#ifdef VBOX
1188 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1189 cpu_loop_exit();
1190#endif
1191
1192 has_error_code = 0;
1193 if (!is_int && !is_hw) {
1194 switch(intno) {
1195 case 8:
1196 case 10:
1197 case 11:
1198 case 12:
1199 case 13:
1200 case 14:
1201 case 17:
1202 has_error_code = 1;
1203 break;
1204 }
1205 }
1206 if (is_int)
1207 old_eip = next_eip;
1208 else
1209 old_eip = env->eip;
1210
1211 dt = &env->idt;
1212 if (intno * 16 + 15 > dt->limit)
1213 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1214 ptr = dt->base + intno * 16;
1215 e1 = ldl_kernel(ptr);
1216 e2 = ldl_kernel(ptr + 4);
1217 e3 = ldl_kernel(ptr + 8);
1218 /* check gate type */
1219 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1220 switch(type) {
1221 case 14: /* 386 interrupt gate */
1222 case 15: /* 386 trap gate */
1223 break;
1224 default:
1225 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1226 break;
1227 }
1228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1229 cpl = env->hflags & HF_CPL_MASK;
1230 /* check privilege if software int */
1231 if (is_int && dpl < cpl)
1232 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1233 /* check valid bit */
1234 if (!(e2 & DESC_P_MASK))
1235 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1236 selector = e1 >> 16;
1237 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1238 ist = e2 & 7;
1239 if ((selector & 0xfffc) == 0)
1240 raise_exception_err(EXCP0D_GPF, 0);
1241
1242 if (load_segment(&e1, &e2, selector) != 0)
1243 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1244 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1245 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1246 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1247 if (dpl > cpl)
1248 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1249 if (!(e2 & DESC_P_MASK))
1250 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1251 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1252 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1253 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1254 /* to inner privilege */
1255 if (ist != 0)
1256 esp = get_rsp_from_tss(ist + 3);
1257 else
1258 esp = get_rsp_from_tss(dpl);
1259 esp &= ~0xfLL; /* align stack */
1260 ss = 0;
1261 new_stack = 1;
1262 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1263 /* to same privilege */
1264 if (env->eflags & VM_MASK)
1265 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1266 new_stack = 0;
1267 if (ist != 0)
1268 esp = get_rsp_from_tss(ist + 3);
1269 else
1270 esp = ESP;
1271 esp &= ~0xfLL; /* align stack */
1272 dpl = cpl;
1273 } else {
1274 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1275 new_stack = 0; /* avoid warning */
1276 esp = 0; /* avoid warning */
1277 }
1278
1279 PUSHQ(esp, env->segs[R_SS].selector);
1280 PUSHQ(esp, ESP);
1281 PUSHQ(esp, compute_eflags());
1282 PUSHQ(esp, env->segs[R_CS].selector);
1283 PUSHQ(esp, old_eip);
1284 if (has_error_code) {
1285 PUSHQ(esp, error_code);
1286 }
1287
1288 if (new_stack) {
1289 ss = 0 | dpl;
1290 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1291 }
1292 ESP = esp;
1293
1294 selector = (selector & ~3) | dpl;
1295 cpu_x86_load_seg_cache(env, R_CS, selector,
1296 get_seg_base(e1, e2),
1297 get_seg_limit(e1, e2),
1298 e2);
1299 cpu_x86_set_cpl(env, dpl);
1300 env->eip = offset;
1301
1302 /* interrupt gate clear IF mask */
1303 if ((type & 1) == 0) {
1304 env->eflags &= ~IF_MASK;
1305 }
1306
1307#ifndef VBOX
1308 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1309#else
1310 /*
1311 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1312 * gets confused by seeingingly changed EFLAGS. See #3491 and
1313 * public bug #2341.
1314 */
1315 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1316#endif
1317}
1318#endif
1319
1320#if defined(CONFIG_USER_ONLY)
1321void helper_syscall(int next_eip_addend)
1322{
1323 env->exception_index = EXCP_SYSCALL;
1324 env->exception_next_eip = env->eip + next_eip_addend;
1325 cpu_loop_exit();
1326}
1327#else
1328void helper_syscall(int next_eip_addend)
1329{
1330 int selector;
1331
1332 if (!(env->efer & MSR_EFER_SCE)) {
1333 raise_exception_err(EXCP06_ILLOP, 0);
1334 }
1335 selector = (env->star >> 32) & 0xffff;
1336#ifdef TARGET_X86_64
1337 if (env->hflags & HF_LMA_MASK) {
1338 int code64;
1339
1340 ECX = env->eip + next_eip_addend;
1341 env->regs[11] = compute_eflags();
1342
1343 code64 = env->hflags & HF_CS64_MASK;
1344
1345 cpu_x86_set_cpl(env, 0);
1346 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1347 0, 0xffffffff,
1348 DESC_G_MASK | DESC_P_MASK |
1349 DESC_S_MASK |
1350 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1351 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1352 0, 0xffffffff,
1353 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1354 DESC_S_MASK |
1355 DESC_W_MASK | DESC_A_MASK);
1356 env->eflags &= ~env->fmask;
1357 load_eflags(env->eflags, 0);
1358 if (code64)
1359 env->eip = env->lstar;
1360 else
1361 env->eip = env->cstar;
1362 } else
1363#endif
1364 {
1365 ECX = (uint32_t)(env->eip + next_eip_addend);
1366
1367 cpu_x86_set_cpl(env, 0);
1368 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1369 0, 0xffffffff,
1370 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1371 DESC_S_MASK |
1372 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1373 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1374 0, 0xffffffff,
1375 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1376 DESC_S_MASK |
1377 DESC_W_MASK | DESC_A_MASK);
1378 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1379 env->eip = (uint32_t)env->star;
1380 }
1381}
1382#endif
1383
1384void helper_sysret(int dflag)
1385{
1386 int cpl, selector;
1387
1388 if (!(env->efer & MSR_EFER_SCE)) {
1389 raise_exception_err(EXCP06_ILLOP, 0);
1390 }
1391 cpl = env->hflags & HF_CPL_MASK;
1392 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1393 raise_exception_err(EXCP0D_GPF, 0);
1394 }
1395 selector = (env->star >> 48) & 0xffff;
1396#ifdef TARGET_X86_64
1397 if (env->hflags & HF_LMA_MASK) {
1398 if (dflag == 2) {
1399 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1400 0, 0xffffffff,
1401 DESC_G_MASK | DESC_P_MASK |
1402 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1403 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1404 DESC_L_MASK);
1405 env->eip = ECX;
1406 } else {
1407 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1408 0, 0xffffffff,
1409 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1410 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1411 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1412 env->eip = (uint32_t)ECX;
1413 }
1414 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1415 0, 0xffffffff,
1416 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1417 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1418 DESC_W_MASK | DESC_A_MASK);
1419 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1420 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1421 cpu_x86_set_cpl(env, 3);
1422 } else
1423#endif
1424 {
1425 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1426 0, 0xffffffff,
1427 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1428 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1429 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1430 env->eip = (uint32_t)ECX;
1431 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1432 0, 0xffffffff,
1433 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1434 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1435 DESC_W_MASK | DESC_A_MASK);
1436 env->eflags |= IF_MASK;
1437 cpu_x86_set_cpl(env, 3);
1438 }
1439#ifdef USE_KQEMU
1440 if (kqemu_is_ok(env)) {
1441 if (env->hflags & HF_LMA_MASK)
1442 CC_OP = CC_OP_EFLAGS;
1443 env->exception_index = -1;
1444 cpu_loop_exit();
1445 }
1446#endif
1447}
1448
1449#ifdef VBOX
1450/**
1451 * Checks and processes external VMM events.
1452 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1453 */
1454void helper_external_event(void)
1455{
1456#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1457 uintptr_t uSP;
1458# ifdef RT_ARCH_AMD64
1459 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1460# else
1461 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1462# endif
1463 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1464#endif
1465 /* Keep in sync with flags checked by gen_check_external_event() */
1466 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1467 {
1468 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1469 ~CPU_INTERRUPT_EXTERNAL_HARD);
1470 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1471 }
1472 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1473 {
1474 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1475 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1476 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1477 }
1478 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1479 {
1480 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1481 ~CPU_INTERRUPT_EXTERNAL_DMA);
1482 remR3DmaRun(env);
1483 }
1484 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1485 {
1486 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1487 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1488 remR3TimersRun(env);
1489 }
1490}
1491/* helper for recording call instruction addresses for later scanning */
1492void helper_record_call()
1493{
1494 if ( !(env->state & CPU_RAW_RING0)
1495 && (env->cr[0] & CR0_PG_MASK)
1496 && !(env->eflags & X86_EFL_IF))
1497 remR3RecordCall(env);
1498}
1499#endif /* VBOX */
1500
1501/* real mode interrupt */
1502static void do_interrupt_real(int intno, int is_int, int error_code,
1503 unsigned int next_eip)
1504{
1505 SegmentCache *dt;
1506 target_ulong ptr, ssp;
1507 int selector;
1508 uint32_t offset, esp;
1509 uint32_t old_cs, old_eip;
1510
1511 /* real mode (simpler !) */
1512 dt = &env->idt;
1513#ifndef VBOX
1514 if (intno * 4 + 3 > dt->limit)
1515#else
1516 if ((unsigned)intno * 4 + 3 > dt->limit)
1517#endif
1518 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1519 ptr = dt->base + intno * 4;
1520 offset = lduw_kernel(ptr);
1521 selector = lduw_kernel(ptr + 2);
1522 esp = ESP;
1523 ssp = env->segs[R_SS].base;
1524 if (is_int)
1525 old_eip = next_eip;
1526 else
1527 old_eip = env->eip;
1528 old_cs = env->segs[R_CS].selector;
1529 /* XXX: use SS segment size ? */
1530 PUSHW(ssp, esp, 0xffff, compute_eflags());
1531 PUSHW(ssp, esp, 0xffff, old_cs);
1532 PUSHW(ssp, esp, 0xffff, old_eip);
1533
1534 /* update processor state */
1535 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1536 env->eip = offset;
1537 env->segs[R_CS].selector = selector;
1538 env->segs[R_CS].base = (selector << 4);
1539 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1540}
1541
1542/* fake user mode interrupt */
1543void do_interrupt_user(int intno, int is_int, int error_code,
1544 target_ulong next_eip)
1545{
1546 SegmentCache *dt;
1547 target_ulong ptr;
1548 int dpl, cpl, shift;
1549 uint32_t e2;
1550
1551 dt = &env->idt;
1552 if (env->hflags & HF_LMA_MASK) {
1553 shift = 4;
1554 } else {
1555 shift = 3;
1556 }
1557 ptr = dt->base + (intno << shift);
1558 e2 = ldl_kernel(ptr + 4);
1559
1560 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1561 cpl = env->hflags & HF_CPL_MASK;
1562 /* check privilege if software int */
1563 if (is_int && dpl < cpl)
1564 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1565
1566 /* Since we emulate only user space, we cannot do more than
1567 exiting the emulation with the suitable exception and error
1568 code */
1569 if (is_int)
1570 EIP = next_eip;
1571}
1572
1573/*
1574 * Begin execution of an interruption. is_int is TRUE if coming from
1575 * the int instruction. next_eip is the EIP value AFTER the interrupt
1576 * instruction. It is only relevant if is_int is TRUE.
1577 */
1578void do_interrupt(int intno, int is_int, int error_code,
1579 target_ulong next_eip, int is_hw)
1580{
1581 if (loglevel & CPU_LOG_INT) {
1582 if ((env->cr[0] & CR0_PE_MASK)) {
1583 static int count;
1584 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1585 count, intno, error_code, is_int,
1586 env->hflags & HF_CPL_MASK,
1587 env->segs[R_CS].selector, EIP,
1588 (int)env->segs[R_CS].base + EIP,
1589 env->segs[R_SS].selector, ESP);
1590 if (intno == 0x0e) {
1591 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1592 } else {
1593 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1594 }
1595 fprintf(logfile, "\n");
1596 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1597#if 0
1598 {
1599 int i;
1600 uint8_t *ptr;
1601 fprintf(logfile, " code=");
1602 ptr = env->segs[R_CS].base + env->eip;
1603 for(i = 0; i < 16; i++) {
1604 fprintf(logfile, " %02x", ldub(ptr + i));
1605 }
1606 fprintf(logfile, "\n");
1607 }
1608#endif
1609 count++;
1610 }
1611 }
1612 if (env->cr[0] & CR0_PE_MASK) {
1613#ifdef TARGET_X86_64
1614 if (env->hflags & HF_LMA_MASK) {
1615 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1616 } else
1617#endif
1618 {
1619#ifdef VBOX
1620 /* int xx *, v86 code and VME enabled? */
1621 if ( (env->eflags & VM_MASK)
1622 && (env->cr[4] & CR4_VME_MASK)
1623 && is_int
1624 && !is_hw
1625 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1626 )
1627 do_soft_interrupt_vme(intno, error_code, next_eip);
1628 else
1629#endif /* VBOX */
1630 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1631 }
1632 } else {
1633 do_interrupt_real(intno, is_int, error_code, next_eip);
1634 }
1635}
1636
1637/*
1638 * Check nested exceptions and change to double or triple fault if
1639 * needed. It should only be called, if this is not an interrupt.
1640 * Returns the new exception number.
1641 */
1642static int check_exception(int intno, int *error_code)
1643{
1644 int first_contributory = env->old_exception == 0 ||
1645 (env->old_exception >= 10 &&
1646 env->old_exception <= 13);
1647 int second_contributory = intno == 0 ||
1648 (intno >= 10 && intno <= 13);
1649
1650 if (loglevel & CPU_LOG_INT)
1651 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1652 env->old_exception, intno);
1653
1654 if (env->old_exception == EXCP08_DBLE)
1655 cpu_abort(env, "triple fault");
1656
1657 if ((first_contributory && second_contributory)
1658 || (env->old_exception == EXCP0E_PAGE &&
1659 (second_contributory || (intno == EXCP0E_PAGE)))) {
1660 intno = EXCP08_DBLE;
1661 *error_code = 0;
1662 }
1663
1664 if (second_contributory || (intno == EXCP0E_PAGE) ||
1665 (intno == EXCP08_DBLE))
1666 env->old_exception = intno;
1667
1668 return intno;
1669}
1670
1671/*
1672 * Signal an interruption. It is executed in the main CPU loop.
1673 * is_int is TRUE if coming from the int instruction. next_eip is the
1674 * EIP value AFTER the interrupt instruction. It is only relevant if
1675 * is_int is TRUE.
1676 */
1677void raise_interrupt(int intno, int is_int, int error_code,
1678 int next_eip_addend)
1679{
1680#if defined(VBOX) && defined(DEBUG)
1681 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1682#endif
1683 if (!is_int) {
1684 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1685 intno = check_exception(intno, &error_code);
1686 } else {
1687 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1688 }
1689
1690 env->exception_index = intno;
1691 env->error_code = error_code;
1692 env->exception_is_int = is_int;
1693 env->exception_next_eip = env->eip + next_eip_addend;
1694 cpu_loop_exit();
1695}
1696
1697/* shortcuts to generate exceptions */
1698
1699void (raise_exception_err)(int exception_index, int error_code)
1700{
1701 raise_interrupt(exception_index, 0, error_code, 0);
1702}
1703
1704void raise_exception(int exception_index)
1705{
1706 raise_interrupt(exception_index, 0, 0, 0);
1707}
1708
1709/* SMM support */
1710
1711#if defined(CONFIG_USER_ONLY)
1712
1713void do_smm_enter(void)
1714{
1715}
1716
1717void helper_rsm(void)
1718{
1719}
1720
1721#else
1722
1723#ifdef TARGET_X86_64
1724#define SMM_REVISION_ID 0x00020064
1725#else
1726#define SMM_REVISION_ID 0x00020000
1727#endif
1728
1729void do_smm_enter(void)
1730{
1731 target_ulong sm_state;
1732 SegmentCache *dt;
1733 int i, offset;
1734
1735 if (loglevel & CPU_LOG_INT) {
1736 fprintf(logfile, "SMM: enter\n");
1737 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1738 }
1739
1740 env->hflags |= HF_SMM_MASK;
1741 cpu_smm_update(env);
1742
1743 sm_state = env->smbase + 0x8000;
1744
1745#ifdef TARGET_X86_64
1746 for(i = 0; i < 6; i++) {
1747 dt = &env->segs[i];
1748 offset = 0x7e00 + i * 16;
1749 stw_phys(sm_state + offset, dt->selector);
1750 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1751 stl_phys(sm_state + offset + 4, dt->limit);
1752 stq_phys(sm_state + offset + 8, dt->base);
1753 }
1754
1755 stq_phys(sm_state + 0x7e68, env->gdt.base);
1756 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1757
1758 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1759 stq_phys(sm_state + 0x7e78, env->ldt.base);
1760 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1761 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1762
1763 stq_phys(sm_state + 0x7e88, env->idt.base);
1764 stl_phys(sm_state + 0x7e84, env->idt.limit);
1765
1766 stw_phys(sm_state + 0x7e90, env->tr.selector);
1767 stq_phys(sm_state + 0x7e98, env->tr.base);
1768 stl_phys(sm_state + 0x7e94, env->tr.limit);
1769 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1770
1771 stq_phys(sm_state + 0x7ed0, env->efer);
1772
1773 stq_phys(sm_state + 0x7ff8, EAX);
1774 stq_phys(sm_state + 0x7ff0, ECX);
1775 stq_phys(sm_state + 0x7fe8, EDX);
1776 stq_phys(sm_state + 0x7fe0, EBX);
1777 stq_phys(sm_state + 0x7fd8, ESP);
1778 stq_phys(sm_state + 0x7fd0, EBP);
1779 stq_phys(sm_state + 0x7fc8, ESI);
1780 stq_phys(sm_state + 0x7fc0, EDI);
1781 for(i = 8; i < 16; i++)
1782 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1783 stq_phys(sm_state + 0x7f78, env->eip);
1784 stl_phys(sm_state + 0x7f70, compute_eflags());
1785 stl_phys(sm_state + 0x7f68, env->dr[6]);
1786 stl_phys(sm_state + 0x7f60, env->dr[7]);
1787
1788 stl_phys(sm_state + 0x7f48, env->cr[4]);
1789 stl_phys(sm_state + 0x7f50, env->cr[3]);
1790 stl_phys(sm_state + 0x7f58, env->cr[0]);
1791
1792 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1793 stl_phys(sm_state + 0x7f00, env->smbase);
1794#else
1795 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1796 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1797 stl_phys(sm_state + 0x7ff4, compute_eflags());
1798 stl_phys(sm_state + 0x7ff0, env->eip);
1799 stl_phys(sm_state + 0x7fec, EDI);
1800 stl_phys(sm_state + 0x7fe8, ESI);
1801 stl_phys(sm_state + 0x7fe4, EBP);
1802 stl_phys(sm_state + 0x7fe0, ESP);
1803 stl_phys(sm_state + 0x7fdc, EBX);
1804 stl_phys(sm_state + 0x7fd8, EDX);
1805 stl_phys(sm_state + 0x7fd4, ECX);
1806 stl_phys(sm_state + 0x7fd0, EAX);
1807 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1808 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1809
1810 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1811 stl_phys(sm_state + 0x7f64, env->tr.base);
1812 stl_phys(sm_state + 0x7f60, env->tr.limit);
1813 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1814
1815 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1816 stl_phys(sm_state + 0x7f80, env->ldt.base);
1817 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1818 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1819
1820 stl_phys(sm_state + 0x7f74, env->gdt.base);
1821 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1822
1823 stl_phys(sm_state + 0x7f58, env->idt.base);
1824 stl_phys(sm_state + 0x7f54, env->idt.limit);
1825
1826 for(i = 0; i < 6; i++) {
1827 dt = &env->segs[i];
1828 if (i < 3)
1829 offset = 0x7f84 + i * 12;
1830 else
1831 offset = 0x7f2c + (i - 3) * 12;
1832 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1833 stl_phys(sm_state + offset + 8, dt->base);
1834 stl_phys(sm_state + offset + 4, dt->limit);
1835 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1836 }
1837 stl_phys(sm_state + 0x7f14, env->cr[4]);
1838
1839 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1840 stl_phys(sm_state + 0x7ef8, env->smbase);
1841#endif
1842 /* init SMM cpu state */
1843
1844#ifdef TARGET_X86_64
1845 cpu_load_efer(env, 0);
1846#endif
1847 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1848 env->eip = 0x00008000;
1849 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1850 0xffffffff, 0);
1851 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1852 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1853 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1854 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1855 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1856
1857 cpu_x86_update_cr0(env,
1858 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1859 cpu_x86_update_cr4(env, 0);
1860 env->dr[7] = 0x00000400;
1861 CC_OP = CC_OP_EFLAGS;
1862}
1863
1864void helper_rsm(void)
1865{
1866#ifdef VBOX
1867 cpu_abort(env, "helper_rsm");
1868#else /* !VBOX */
1869 target_ulong sm_
1870
1871 target_ulong sm_state;
1872 int i, offset;
1873 uint32_t val;
1874
1875 sm_state = env->smbase + 0x8000;
1876#ifdef TARGET_X86_64
1877 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1878
1879 for(i = 0; i < 6; i++) {
1880 offset = 0x7e00 + i * 16;
1881 cpu_x86_load_seg_cache(env, i,
1882 lduw_phys(sm_state + offset),
1883 ldq_phys(sm_state + offset + 8),
1884 ldl_phys(sm_state + offset + 4),
1885 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1886 }
1887
1888 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1889 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1890
1891 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1892 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1893 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1894 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1895
1896 env->idt.base = ldq_phys(sm_state + 0x7e88);
1897 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1898
1899 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1900 env->tr.base = ldq_phys(sm_state + 0x7e98);
1901 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1902 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1903
1904 EAX = ldq_phys(sm_state + 0x7ff8);
1905 ECX = ldq_phys(sm_state + 0x7ff0);
1906 EDX = ldq_phys(sm_state + 0x7fe8);
1907 EBX = ldq_phys(sm_state + 0x7fe0);
1908 ESP = ldq_phys(sm_state + 0x7fd8);
1909 EBP = ldq_phys(sm_state + 0x7fd0);
1910 ESI = ldq_phys(sm_state + 0x7fc8);
1911 EDI = ldq_phys(sm_state + 0x7fc0);
1912 for(i = 8; i < 16; i++)
1913 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1914 env->eip = ldq_phys(sm_state + 0x7f78);
1915 load_eflags(ldl_phys(sm_state + 0x7f70),
1916 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1917 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1918 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1919
1920 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1921 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1922 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1923
1924 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1925 if (val & 0x20000) {
1926 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1927 }
1928#else
1929 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1930 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1931 load_eflags(ldl_phys(sm_state + 0x7ff4),
1932 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1933 env->eip = ldl_phys(sm_state + 0x7ff0);
1934 EDI = ldl_phys(sm_state + 0x7fec);
1935 ESI = ldl_phys(sm_state + 0x7fe8);
1936 EBP = ldl_phys(sm_state + 0x7fe4);
1937 ESP = ldl_phys(sm_state + 0x7fe0);
1938 EBX = ldl_phys(sm_state + 0x7fdc);
1939 EDX = ldl_phys(sm_state + 0x7fd8);
1940 ECX = ldl_phys(sm_state + 0x7fd4);
1941 EAX = ldl_phys(sm_state + 0x7fd0);
1942 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1943 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1944
1945 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1946 env->tr.base = ldl_phys(sm_state + 0x7f64);
1947 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1948 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1949
1950 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1951 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1952 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1953 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1954
1955 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1956 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1957
1958 env->idt.base = ldl_phys(sm_state + 0x7f58);
1959 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1960
1961 for(i = 0; i < 6; i++) {
1962 if (i < 3)
1963 offset = 0x7f84 + i * 12;
1964 else
1965 offset = 0x7f2c + (i - 3) * 12;
1966 cpu_x86_load_seg_cache(env, i,
1967 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1968 ldl_phys(sm_state + offset + 8),
1969 ldl_phys(sm_state + offset + 4),
1970 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1971 }
1972 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1973
1974 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1975 if (val & 0x20000) {
1976 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1977 }
1978#endif
1979 CC_OP = CC_OP_EFLAGS;
1980 env->hflags &= ~HF_SMM_MASK;
1981 cpu_smm_update(env);
1982
1983 if (loglevel & CPU_LOG_INT) {
1984 fprintf(logfile, "SMM: after RSM\n");
1985 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1986 }
1987#endif /* !VBOX */
1988}
1989
1990#endif /* !CONFIG_USER_ONLY */
1991
1992
1993/* division, flags are undefined */
1994
1995void helper_divb_AL(target_ulong t0)
1996{
1997 unsigned int num, den, q, r;
1998
1999 num = (EAX & 0xffff);
2000 den = (t0 & 0xff);
2001 if (den == 0) {
2002 raise_exception(EXCP00_DIVZ);
2003 }
2004 q = (num / den);
2005 if (q > 0xff)
2006 raise_exception(EXCP00_DIVZ);
2007 q &= 0xff;
2008 r = (num % den) & 0xff;
2009 EAX = (EAX & ~0xffff) | (r << 8) | q;
2010}
2011
2012void helper_idivb_AL(target_ulong t0)
2013{
2014 int num, den, q, r;
2015
2016 num = (int16_t)EAX;
2017 den = (int8_t)t0;
2018 if (den == 0) {
2019 raise_exception(EXCP00_DIVZ);
2020 }
2021 q = (num / den);
2022 if (q != (int8_t)q)
2023 raise_exception(EXCP00_DIVZ);
2024 q &= 0xff;
2025 r = (num % den) & 0xff;
2026 EAX = (EAX & ~0xffff) | (r << 8) | q;
2027}
2028
2029void helper_divw_AX(target_ulong t0)
2030{
2031 unsigned int num, den, q, r;
2032
2033 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2034 den = (t0 & 0xffff);
2035 if (den == 0) {
2036 raise_exception(EXCP00_DIVZ);
2037 }
2038 q = (num / den);
2039 if (q > 0xffff)
2040 raise_exception(EXCP00_DIVZ);
2041 q &= 0xffff;
2042 r = (num % den) & 0xffff;
2043 EAX = (EAX & ~0xffff) | q;
2044 EDX = (EDX & ~0xffff) | r;
2045}
2046
2047void helper_idivw_AX(target_ulong t0)
2048{
2049 int num, den, q, r;
2050
2051 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2052 den = (int16_t)t0;
2053 if (den == 0) {
2054 raise_exception(EXCP00_DIVZ);
2055 }
2056 q = (num / den);
2057 if (q != (int16_t)q)
2058 raise_exception(EXCP00_DIVZ);
2059 q &= 0xffff;
2060 r = (num % den) & 0xffff;
2061 EAX = (EAX & ~0xffff) | q;
2062 EDX = (EDX & ~0xffff) | r;
2063}
2064
2065void helper_divl_EAX(target_ulong t0)
2066{
2067 unsigned int den, r;
2068 uint64_t num, q;
2069
2070 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2071 den = t0;
2072 if (den == 0) {
2073 raise_exception(EXCP00_DIVZ);
2074 }
2075 q = (num / den);
2076 r = (num % den);
2077 if (q > 0xffffffff)
2078 raise_exception(EXCP00_DIVZ);
2079 EAX = (uint32_t)q;
2080 EDX = (uint32_t)r;
2081}
2082
2083void helper_idivl_EAX(target_ulong t0)
2084{
2085 int den, r;
2086 int64_t num, q;
2087
2088 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2089 den = t0;
2090 if (den == 0) {
2091 raise_exception(EXCP00_DIVZ);
2092 }
2093 q = (num / den);
2094 r = (num % den);
2095 if (q != (int32_t)q)
2096 raise_exception(EXCP00_DIVZ);
2097 EAX = (uint32_t)q;
2098 EDX = (uint32_t)r;
2099}
2100
2101/* bcd */
2102
2103/* XXX: exception */
2104void helper_aam(int base)
2105{
2106 int al, ah;
2107 al = EAX & 0xff;
2108 ah = al / base;
2109 al = al % base;
2110 EAX = (EAX & ~0xffff) | al | (ah << 8);
2111 CC_DST = al;
2112}
2113
2114void helper_aad(int base)
2115{
2116 int al, ah;
2117 al = EAX & 0xff;
2118 ah = (EAX >> 8) & 0xff;
2119 al = ((ah * base) + al) & 0xff;
2120 EAX = (EAX & ~0xffff) | al;
2121 CC_DST = al;
2122}
2123
2124void helper_aaa(void)
2125{
2126 int icarry;
2127 int al, ah, af;
2128 int eflags;
2129
2130 eflags = cc_table[CC_OP].compute_all();
2131 af = eflags & CC_A;
2132 al = EAX & 0xff;
2133 ah = (EAX >> 8) & 0xff;
2134
2135 icarry = (al > 0xf9);
2136 if (((al & 0x0f) > 9 ) || af) {
2137 al = (al + 6) & 0x0f;
2138 ah = (ah + 1 + icarry) & 0xff;
2139 eflags |= CC_C | CC_A;
2140 } else {
2141 eflags &= ~(CC_C | CC_A);
2142 al &= 0x0f;
2143 }
2144 EAX = (EAX & ~0xffff) | al | (ah << 8);
2145 CC_SRC = eflags;
2146 FORCE_RET();
2147}
2148
2149void helper_aas(void)
2150{
2151 int icarry;
2152 int al, ah, af;
2153 int eflags;
2154
2155 eflags = cc_table[CC_OP].compute_all();
2156 af = eflags & CC_A;
2157 al = EAX & 0xff;
2158 ah = (EAX >> 8) & 0xff;
2159
2160 icarry = (al < 6);
2161 if (((al & 0x0f) > 9 ) || af) {
2162 al = (al - 6) & 0x0f;
2163 ah = (ah - 1 - icarry) & 0xff;
2164 eflags |= CC_C | CC_A;
2165 } else {
2166 eflags &= ~(CC_C | CC_A);
2167 al &= 0x0f;
2168 }
2169 EAX = (EAX & ~0xffff) | al | (ah << 8);
2170 CC_SRC = eflags;
2171 FORCE_RET();
2172}
2173
2174void helper_daa(void)
2175{
2176 int al, af, cf;
2177 int eflags;
2178
2179 eflags = cc_table[CC_OP].compute_all();
2180 cf = eflags & CC_C;
2181 af = eflags & CC_A;
2182 al = EAX & 0xff;
2183
2184 eflags = 0;
2185 if (((al & 0x0f) > 9 ) || af) {
2186 al = (al + 6) & 0xff;
2187 eflags |= CC_A;
2188 }
2189 if ((al > 0x9f) || cf) {
2190 al = (al + 0x60) & 0xff;
2191 eflags |= CC_C;
2192 }
2193 EAX = (EAX & ~0xff) | al;
2194 /* well, speed is not an issue here, so we compute the flags by hand */
2195 eflags |= (al == 0) << 6; /* zf */
2196 eflags |= parity_table[al]; /* pf */
2197 eflags |= (al & 0x80); /* sf */
2198 CC_SRC = eflags;
2199 FORCE_RET();
2200}
2201
2202void helper_das(void)
2203{
2204 int al, al1, af, cf;
2205 int eflags;
2206
2207 eflags = cc_table[CC_OP].compute_all();
2208 cf = eflags & CC_C;
2209 af = eflags & CC_A;
2210 al = EAX & 0xff;
2211
2212 eflags = 0;
2213 al1 = al;
2214 if (((al & 0x0f) > 9 ) || af) {
2215 eflags |= CC_A;
2216 if (al < 6 || cf)
2217 eflags |= CC_C;
2218 al = (al - 6) & 0xff;
2219 }
2220 if ((al1 > 0x99) || cf) {
2221 al = (al - 0x60) & 0xff;
2222 eflags |= CC_C;
2223 }
2224 EAX = (EAX & ~0xff) | al;
2225 /* well, speed is not an issue here, so we compute the flags by hand */
2226 eflags |= (al == 0) << 6; /* zf */
2227 eflags |= parity_table[al]; /* pf */
2228 eflags |= (al & 0x80); /* sf */
2229 CC_SRC = eflags;
2230 FORCE_RET();
2231}
2232
2233void helper_into(int next_eip_addend)
2234{
2235 int eflags;
2236 eflags = cc_table[CC_OP].compute_all();
2237 if (eflags & CC_O) {
2238 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2239 }
2240}
2241
2242void helper_cmpxchg8b(target_ulong a0)
2243{
2244 uint64_t d;
2245 int eflags;
2246
2247 eflags = cc_table[CC_OP].compute_all();
2248 d = ldq(a0);
2249 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2250 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2251 eflags |= CC_Z;
2252 } else {
2253 /* always do the store */
2254 stq(a0, d);
2255 EDX = (uint32_t)(d >> 32);
2256 EAX = (uint32_t)d;
2257 eflags &= ~CC_Z;
2258 }
2259 CC_SRC = eflags;
2260}
2261
2262#ifdef TARGET_X86_64
2263void helper_cmpxchg16b(target_ulong a0)
2264{
2265 uint64_t d0, d1;
2266 int eflags;
2267
2268 if ((a0 & 0xf) != 0)
2269 raise_exception(EXCP0D_GPF);
2270 eflags = cc_table[CC_OP].compute_all();
2271 d0 = ldq(a0);
2272 d1 = ldq(a0 + 8);
2273 if (d0 == EAX && d1 == EDX) {
2274 stq(a0, EBX);
2275 stq(a0 + 8, ECX);
2276 eflags |= CC_Z;
2277 } else {
2278 /* always do the store */
2279 stq(a0, d0);
2280 stq(a0 + 8, d1);
2281 EDX = d1;
2282 EAX = d0;
2283 eflags &= ~CC_Z;
2284 }
2285 CC_SRC = eflags;
2286}
2287#endif
2288
2289void helper_single_step(void)
2290{
2291 env->dr[6] |= 0x4000;
2292 raise_exception(EXCP01_SSTP);
2293}
2294
2295void helper_cpuid(void)
2296{
2297#ifndef VBOX
2298 uint32_t index;
2299
2300 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2301
2302 index = (uint32_t)EAX;
2303 /* test if maximum index reached */
2304 if (index & 0x80000000) {
2305 if (index > env->cpuid_xlevel)
2306 index = env->cpuid_level;
2307 } else {
2308 if (index > env->cpuid_level)
2309 index = env->cpuid_level;
2310 }
2311
2312 switch(index) {
2313 case 0:
2314 EAX = env->cpuid_level;
2315 EBX = env->cpuid_vendor1;
2316 EDX = env->cpuid_vendor2;
2317 ECX = env->cpuid_vendor3;
2318 break;
2319 case 1:
2320 EAX = env->cpuid_version;
2321 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2322 ECX = env->cpuid_ext_features;
2323 EDX = env->cpuid_features;
2324 break;
2325 case 2:
2326 /* cache info: needed for Pentium Pro compatibility */
2327 EAX = 1;
2328 EBX = 0;
2329 ECX = 0;
2330 EDX = 0x2c307d;
2331 break;
2332 case 4:
2333 /* cache info: needed for Core compatibility */
2334 switch (ECX) {
2335 case 0: /* L1 dcache info */
2336 EAX = 0x0000121;
2337 EBX = 0x1c0003f;
2338 ECX = 0x000003f;
2339 EDX = 0x0000001;
2340 break;
2341 case 1: /* L1 icache info */
2342 EAX = 0x0000122;
2343 EBX = 0x1c0003f;
2344 ECX = 0x000003f;
2345 EDX = 0x0000001;
2346 break;
2347 case 2: /* L2 cache info */
2348 EAX = 0x0000143;
2349 EBX = 0x3c0003f;
2350 ECX = 0x0000fff;
2351 EDX = 0x0000001;
2352 break;
2353 default: /* end of info */
2354 EAX = 0;
2355 EBX = 0;
2356 ECX = 0;
2357 EDX = 0;
2358 break;
2359 }
2360
2361 break;
2362 case 5:
2363 /* mwait info: needed for Core compatibility */
2364 EAX = 0; /* Smallest monitor-line size in bytes */
2365 EBX = 0; /* Largest monitor-line size in bytes */
2366 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2367 EDX = 0;
2368 break;
2369 case 6:
2370 /* Thermal and Power Leaf */
2371 EAX = 0;
2372 EBX = 0;
2373 ECX = 0;
2374 EDX = 0;
2375 break;
2376 case 9:
2377 /* Direct Cache Access Information Leaf */
2378 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2379 EBX = 0;
2380 ECX = 0;
2381 EDX = 0;
2382 break;
2383 case 0xA:
2384 /* Architectural Performance Monitoring Leaf */
2385 EAX = 0;
2386 EBX = 0;
2387 ECX = 0;
2388 EDX = 0;
2389 break;
2390 case 0x80000000:
2391 EAX = env->cpuid_xlevel;
2392 EBX = env->cpuid_vendor1;
2393 EDX = env->cpuid_vendor2;
2394 ECX = env->cpuid_vendor3;
2395 break;
2396 case 0x80000001:
2397 EAX = env->cpuid_features;
2398 EBX = 0;
2399 ECX = env->cpuid_ext3_features;
2400 EDX = env->cpuid_ext2_features;
2401 break;
2402 case 0x80000002:
2403 case 0x80000003:
2404 case 0x80000004:
2405 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2406 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2407 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2408 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2409 break;
2410 case 0x80000005:
2411 /* cache info (L1 cache) */
2412 EAX = 0x01ff01ff;
2413 EBX = 0x01ff01ff;
2414 ECX = 0x40020140;
2415 EDX = 0x40020140;
2416 break;
2417 case 0x80000006:
2418 /* cache info (L2 cache) */
2419 EAX = 0;
2420 EBX = 0x42004200;
2421 ECX = 0x02008140;
2422 EDX = 0;
2423 break;
2424 case 0x80000008:
2425 /* virtual & phys address size in low 2 bytes. */
2426/* XXX: This value must match the one used in the MMU code. */
2427 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2428 /* 64 bit processor */
2429#if defined(USE_KQEMU)
2430 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2431#else
2432/* XXX: The physical address space is limited to 42 bits in exec.c. */
2433 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2434#endif
2435 } else {
2436#if defined(USE_KQEMU)
2437 EAX = 0x00000020; /* 32 bits physical */
2438#else
2439 if (env->cpuid_features & CPUID_PSE36)
2440 EAX = 0x00000024; /* 36 bits physical */
2441 else
2442 EAX = 0x00000020; /* 32 bits physical */
2443#endif
2444 }
2445 EBX = 0;
2446 ECX = 0;
2447 EDX = 0;
2448 break;
2449 case 0x8000000A:
2450 EAX = 0x00000001;
2451 EBX = 0;
2452 ECX = 0;
2453 EDX = 0;
2454 break;
2455 default:
2456 /* reserved values: zero */
2457 EAX = 0;
2458 EBX = 0;
2459 ECX = 0;
2460 EDX = 0;
2461 break;
2462 }
2463#else /* VBOX */
2464 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2465#endif /* VBOX */
2466}
2467
2468void helper_enter_level(int level, int data32, target_ulong t1)
2469{
2470 target_ulong ssp;
2471 uint32_t esp_mask, esp, ebp;
2472
2473 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2474 ssp = env->segs[R_SS].base;
2475 ebp = EBP;
2476 esp = ESP;
2477 if (data32) {
2478 /* 32 bit */
2479 esp -= 4;
2480 while (--level) {
2481 esp -= 4;
2482 ebp -= 4;
2483 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2484 }
2485 esp -= 4;
2486 stl(ssp + (esp & esp_mask), t1);
2487 } else {
2488 /* 16 bit */
2489 esp -= 2;
2490 while (--level) {
2491 esp -= 2;
2492 ebp -= 2;
2493 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2494 }
2495 esp -= 2;
2496 stw(ssp + (esp & esp_mask), t1);
2497 }
2498}
2499
2500#ifdef TARGET_X86_64
2501void helper_enter64_level(int level, int data64, target_ulong t1)
2502{
2503 target_ulong esp, ebp;
2504 ebp = EBP;
2505 esp = ESP;
2506
2507 if (data64) {
2508 /* 64 bit */
2509 esp -= 8;
2510 while (--level) {
2511 esp -= 8;
2512 ebp -= 8;
2513 stq(esp, ldq(ebp));
2514 }
2515 esp -= 8;
2516 stq(esp, t1);
2517 } else {
2518 /* 16 bit */
2519 esp -= 2;
2520 while (--level) {
2521 esp -= 2;
2522 ebp -= 2;
2523 stw(esp, lduw(ebp));
2524 }
2525 esp -= 2;
2526 stw(esp, t1);
2527 }
2528}
2529#endif
2530
2531void helper_lldt(int selector)
2532{
2533 SegmentCache *dt;
2534 uint32_t e1, e2;
2535#ifndef VBOX
2536 int index, entry_limit;
2537#else
2538 unsigned int index, entry_limit;
2539#endif
2540 target_ulong ptr;
2541
2542#ifdef VBOX
2543 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2544 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2545#endif
2546
2547 selector &= 0xffff;
2548 if ((selector & 0xfffc) == 0) {
2549 /* XXX: NULL selector case: invalid LDT */
2550 env->ldt.base = 0;
2551 env->ldt.limit = 0;
2552 } else {
2553 if (selector & 0x4)
2554 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2555 dt = &env->gdt;
2556 index = selector & ~7;
2557#ifdef TARGET_X86_64
2558 if (env->hflags & HF_LMA_MASK)
2559 entry_limit = 15;
2560 else
2561#endif
2562 entry_limit = 7;
2563 if ((index + entry_limit) > dt->limit)
2564 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2565 ptr = dt->base + index;
2566 e1 = ldl_kernel(ptr);
2567 e2 = ldl_kernel(ptr + 4);
2568 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2569 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2570 if (!(e2 & DESC_P_MASK))
2571 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2572#ifdef TARGET_X86_64
2573 if (env->hflags & HF_LMA_MASK) {
2574 uint32_t e3;
2575 e3 = ldl_kernel(ptr + 8);
2576 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2577 env->ldt.base |= (target_ulong)e3 << 32;
2578 } else
2579#endif
2580 {
2581 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2582 }
2583 }
2584 env->ldt.selector = selector;
2585#ifdef VBOX
2586 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2587 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2588#endif
2589}
2590
2591void helper_ltr(int selector)
2592{
2593 SegmentCache *dt;
2594 uint32_t e1, e2;
2595#ifndef VBOX
2596 int index, type, entry_limit;
2597#else
2598 unsigned int index;
2599 int type, entry_limit;
2600#endif
2601 target_ulong ptr;
2602
2603#ifdef VBOX
2604 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2605 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2606 env->tr.flags, (RTSEL)(selector & 0xffff)));
2607#endif
2608 selector &= 0xffff;
2609 if ((selector & 0xfffc) == 0) {
2610 /* NULL selector case: invalid TR */
2611 env->tr.base = 0;
2612 env->tr.limit = 0;
2613 env->tr.flags = 0;
2614 } else {
2615 if (selector & 0x4)
2616 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2617 dt = &env->gdt;
2618 index = selector & ~7;
2619#ifdef TARGET_X86_64
2620 if (env->hflags & HF_LMA_MASK)
2621 entry_limit = 15;
2622 else
2623#endif
2624 entry_limit = 7;
2625 if ((index + entry_limit) > dt->limit)
2626 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2627 ptr = dt->base + index;
2628 e1 = ldl_kernel(ptr);
2629 e2 = ldl_kernel(ptr + 4);
2630 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2631 if ((e2 & DESC_S_MASK) ||
2632 (type != 1 && type != 9))
2633 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2634 if (!(e2 & DESC_P_MASK))
2635 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2636#ifdef TARGET_X86_64
2637 if (env->hflags & HF_LMA_MASK) {
2638 uint32_t e3, e4;
2639 e3 = ldl_kernel(ptr + 8);
2640 e4 = ldl_kernel(ptr + 12);
2641 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2642 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2643 load_seg_cache_raw_dt(&env->tr, e1, e2);
2644 env->tr.base |= (target_ulong)e3 << 32;
2645 } else
2646#endif
2647 {
2648 load_seg_cache_raw_dt(&env->tr, e1, e2);
2649 }
2650 e2 |= DESC_TSS_BUSY_MASK;
2651 stl_kernel(ptr + 4, e2);
2652 }
2653 env->tr.selector = selector;
2654#ifdef VBOX
2655 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2656 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2657 env->tr.flags, (RTSEL)(selector & 0xffff)));
2658#endif
2659}
2660
2661/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2662void helper_load_seg(int seg_reg, int selector)
2663{
2664 uint32_t e1, e2;
2665 int cpl, dpl, rpl;
2666 SegmentCache *dt;
2667#ifndef VBOX
2668 int index;
2669#else
2670 unsigned int index;
2671#endif
2672 target_ulong ptr;
2673
2674 selector &= 0xffff;
2675 cpl = env->hflags & HF_CPL_MASK;
2676
2677#ifdef VBOX
2678 /* Trying to load a selector with CPL=1? */
2679 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2680 {
2681 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2682 selector = selector & 0xfffc;
2683 }
2684#endif
2685 if ((selector & 0xfffc) == 0) {
2686 /* null selector case */
2687 if (seg_reg == R_SS
2688#ifdef TARGET_X86_64
2689 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2690#endif
2691 )
2692 raise_exception_err(EXCP0D_GPF, 0);
2693 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2694 } else {
2695
2696 if (selector & 0x4)
2697 dt = &env->ldt;
2698 else
2699 dt = &env->gdt;
2700 index = selector & ~7;
2701 if ((index + 7) > dt->limit)
2702 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2703 ptr = dt->base + index;
2704 e1 = ldl_kernel(ptr);
2705 e2 = ldl_kernel(ptr + 4);
2706
2707 if (!(e2 & DESC_S_MASK))
2708 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2709 rpl = selector & 3;
2710 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2711 if (seg_reg == R_SS) {
2712 /* must be writable segment */
2713 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2714 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2715 if (rpl != cpl || dpl != cpl)
2716 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2717 } else {
2718 /* must be readable segment */
2719 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2720 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2721
2722 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2723 /* if not conforming code, test rights */
2724 if (dpl < cpl || dpl < rpl)
2725 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2726 }
2727 }
2728
2729 if (!(e2 & DESC_P_MASK)) {
2730 if (seg_reg == R_SS)
2731 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2732 else
2733 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2734 }
2735
2736 /* set the access bit if not already set */
2737 if (!(e2 & DESC_A_MASK)) {
2738 e2 |= DESC_A_MASK;
2739 stl_kernel(ptr + 4, e2);
2740 }
2741
2742 cpu_x86_load_seg_cache(env, seg_reg, selector,
2743 get_seg_base(e1, e2),
2744 get_seg_limit(e1, e2),
2745 e2);
2746#if 0
2747 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2748 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2749#endif
2750 }
2751}
2752
2753/* protected mode jump */
2754void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2755 int next_eip_addend)
2756{
2757 int gate_cs, type;
2758 uint32_t e1, e2, cpl, dpl, rpl, limit;
2759 target_ulong next_eip;
2760
2761#ifdef VBOX
2762 e1 = e2 = 0;
2763#endif
2764 if ((new_cs & 0xfffc) == 0)
2765 raise_exception_err(EXCP0D_GPF, 0);
2766 if (load_segment(&e1, &e2, new_cs) != 0)
2767 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2768 cpl = env->hflags & HF_CPL_MASK;
2769 if (e2 & DESC_S_MASK) {
2770 if (!(e2 & DESC_CS_MASK))
2771 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2772 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2773 if (e2 & DESC_C_MASK) {
2774 /* conforming code segment */
2775 if (dpl > cpl)
2776 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2777 } else {
2778 /* non conforming code segment */
2779 rpl = new_cs & 3;
2780 if (rpl > cpl)
2781 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2782 if (dpl != cpl)
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 }
2785 if (!(e2 & DESC_P_MASK))
2786 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2787 limit = get_seg_limit(e1, e2);
2788 if (new_eip > limit &&
2789 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2790 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2791 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2792 get_seg_base(e1, e2), limit, e2);
2793 EIP = new_eip;
2794 } else {
2795 /* jump to call or task gate */
2796 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2797 rpl = new_cs & 3;
2798 cpl = env->hflags & HF_CPL_MASK;
2799 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2800 switch(type) {
2801 case 1: /* 286 TSS */
2802 case 9: /* 386 TSS */
2803 case 5: /* task gate */
2804 if (dpl < cpl || dpl < rpl)
2805 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2806 next_eip = env->eip + next_eip_addend;
2807 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2808 CC_OP = CC_OP_EFLAGS;
2809 break;
2810 case 4: /* 286 call gate */
2811 case 12: /* 386 call gate */
2812 if ((dpl < cpl) || (dpl < rpl))
2813 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2814 if (!(e2 & DESC_P_MASK))
2815 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2816 gate_cs = e1 >> 16;
2817 new_eip = (e1 & 0xffff);
2818 if (type == 12)
2819 new_eip |= (e2 & 0xffff0000);
2820 if (load_segment(&e1, &e2, gate_cs) != 0)
2821 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2822 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2823 /* must be code segment */
2824 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2825 (DESC_S_MASK | DESC_CS_MASK)))
2826 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2827 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2828 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2829 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2830 if (!(e2 & DESC_P_MASK))
2831#ifdef VBOX /* See page 3-514 of 253666.pdf */
2832 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2833#else
2834 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2835#endif
2836 limit = get_seg_limit(e1, e2);
2837 if (new_eip > limit)
2838 raise_exception_err(EXCP0D_GPF, 0);
2839 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2840 get_seg_base(e1, e2), limit, e2);
2841 EIP = new_eip;
2842 break;
2843 default:
2844 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2845 break;
2846 }
2847 }
2848}
2849
2850/* real mode call */
2851void helper_lcall_real(int new_cs, target_ulong new_eip1,
2852 int shift, int next_eip)
2853{
2854 int new_eip;
2855 uint32_t esp, esp_mask;
2856 target_ulong ssp;
2857
2858 new_eip = new_eip1;
2859 esp = ESP;
2860 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2861 ssp = env->segs[R_SS].base;
2862 if (shift) {
2863 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2864 PUSHL(ssp, esp, esp_mask, next_eip);
2865 } else {
2866 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2867 PUSHW(ssp, esp, esp_mask, next_eip);
2868 }
2869
2870 SET_ESP(esp, esp_mask);
2871 env->eip = new_eip;
2872 env->segs[R_CS].selector = new_cs;
2873 env->segs[R_CS].base = (new_cs << 4);
2874}
2875
2876/* protected mode call */
2877void helper_lcall_protected(int new_cs, target_ulong new_eip,
2878 int shift, int next_eip_addend)
2879{
2880 int new_stack, i;
2881 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2882 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2883 uint32_t val, limit, old_sp_mask;
2884 target_ulong ssp, old_ssp, next_eip;
2885
2886#ifdef VBOX
2887 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2888#endif
2889 next_eip = env->eip + next_eip_addend;
2890#ifdef DEBUG_PCALL
2891 if (loglevel & CPU_LOG_PCALL) {
2892 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2893 new_cs, (uint32_t)new_eip, shift);
2894 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2895 }
2896#endif
2897 if ((new_cs & 0xfffc) == 0)
2898 raise_exception_err(EXCP0D_GPF, 0);
2899 if (load_segment(&e1, &e2, new_cs) != 0)
2900 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2901 cpl = env->hflags & HF_CPL_MASK;
2902#ifdef DEBUG_PCALL
2903 if (loglevel & CPU_LOG_PCALL) {
2904 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2905 }
2906#endif
2907 if (e2 & DESC_S_MASK) {
2908 if (!(e2 & DESC_CS_MASK))
2909 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2911 if (e2 & DESC_C_MASK) {
2912 /* conforming code segment */
2913 if (dpl > cpl)
2914 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2915 } else {
2916 /* non conforming code segment */
2917 rpl = new_cs & 3;
2918 if (rpl > cpl)
2919 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2920 if (dpl != cpl)
2921 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2922 }
2923 if (!(e2 & DESC_P_MASK))
2924 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2925
2926#ifdef TARGET_X86_64
2927 /* XXX: check 16/32 bit cases in long mode */
2928 if (shift == 2) {
2929 target_ulong rsp;
2930 /* 64 bit case */
2931 rsp = ESP;
2932 PUSHQ(rsp, env->segs[R_CS].selector);
2933 PUSHQ(rsp, next_eip);
2934 /* from this point, not restartable */
2935 ESP = rsp;
2936 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2937 get_seg_base(e1, e2),
2938 get_seg_limit(e1, e2), e2);
2939 EIP = new_eip;
2940 } else
2941#endif
2942 {
2943 sp = ESP;
2944 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2945 ssp = env->segs[R_SS].base;
2946 if (shift) {
2947 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2948 PUSHL(ssp, sp, sp_mask, next_eip);
2949 } else {
2950 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2951 PUSHW(ssp, sp, sp_mask, next_eip);
2952 }
2953
2954 limit = get_seg_limit(e1, e2);
2955 if (new_eip > limit)
2956 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2957 /* from this point, not restartable */
2958 SET_ESP(sp, sp_mask);
2959 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2960 get_seg_base(e1, e2), limit, e2);
2961 EIP = new_eip;
2962 }
2963 } else {
2964 /* check gate type */
2965 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2966 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2967 rpl = new_cs & 3;
2968 switch(type) {
2969 case 1: /* available 286 TSS */
2970 case 9: /* available 386 TSS */
2971 case 5: /* task gate */
2972 if (dpl < cpl || dpl < rpl)
2973 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2974 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2975 CC_OP = CC_OP_EFLAGS;
2976 return;
2977 case 4: /* 286 call gate */
2978 case 12: /* 386 call gate */
2979 break;
2980 default:
2981 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2982 break;
2983 }
2984 shift = type >> 3;
2985
2986 if (dpl < cpl || dpl < rpl)
2987 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2988 /* check valid bit */
2989 if (!(e2 & DESC_P_MASK))
2990 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2991 selector = e1 >> 16;
2992 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2993 param_count = e2 & 0x1f;
2994 if ((selector & 0xfffc) == 0)
2995 raise_exception_err(EXCP0D_GPF, 0);
2996
2997 if (load_segment(&e1, &e2, selector) != 0)
2998 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2999 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3000 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3001 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3002 if (dpl > cpl)
3003 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3004 if (!(e2 & DESC_P_MASK))
3005 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3006
3007 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3008 /* to inner privilege */
3009 get_ss_esp_from_tss(&ss, &sp, dpl);
3010#ifdef DEBUG_PCALL
3011 if (loglevel & CPU_LOG_PCALL)
3012 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3013 ss, sp, param_count, ESP);
3014#endif
3015 if ((ss & 0xfffc) == 0)
3016 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3017 if ((ss & 3) != dpl)
3018 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3019 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3020 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3021 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3022 if (ss_dpl != dpl)
3023 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3024 if (!(ss_e2 & DESC_S_MASK) ||
3025 (ss_e2 & DESC_CS_MASK) ||
3026 !(ss_e2 & DESC_W_MASK))
3027 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3028 if (!(ss_e2 & DESC_P_MASK))
3029#ifdef VBOX /* See page 3-99 of 253666.pdf */
3030 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3031#else
3032 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3033#endif
3034
3035 // push_size = ((param_count * 2) + 8) << shift;
3036
3037 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3038 old_ssp = env->segs[R_SS].base;
3039
3040 sp_mask = get_sp_mask(ss_e2);
3041 ssp = get_seg_base(ss_e1, ss_e2);
3042 if (shift) {
3043 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3044 PUSHL(ssp, sp, sp_mask, ESP);
3045 for(i = param_count - 1; i >= 0; i--) {
3046 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3047 PUSHL(ssp, sp, sp_mask, val);
3048 }
3049 } else {
3050 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3051 PUSHW(ssp, sp, sp_mask, ESP);
3052 for(i = param_count - 1; i >= 0; i--) {
3053 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3054 PUSHW(ssp, sp, sp_mask, val);
3055 }
3056 }
3057 new_stack = 1;
3058 } else {
3059 /* to same privilege */
3060 sp = ESP;
3061 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3062 ssp = env->segs[R_SS].base;
3063 // push_size = (4 << shift);
3064 new_stack = 0;
3065 }
3066
3067 if (shift) {
3068 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3069 PUSHL(ssp, sp, sp_mask, next_eip);
3070 } else {
3071 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3072 PUSHW(ssp, sp, sp_mask, next_eip);
3073 }
3074
3075 /* from this point, not restartable */
3076
3077 if (new_stack) {
3078 ss = (ss & ~3) | dpl;
3079 cpu_x86_load_seg_cache(env, R_SS, ss,
3080 ssp,
3081 get_seg_limit(ss_e1, ss_e2),
3082 ss_e2);
3083 }
3084
3085 selector = (selector & ~3) | dpl;
3086 cpu_x86_load_seg_cache(env, R_CS, selector,
3087 get_seg_base(e1, e2),
3088 get_seg_limit(e1, e2),
3089 e2);
3090 cpu_x86_set_cpl(env, dpl);
3091 SET_ESP(sp, sp_mask);
3092 EIP = offset;
3093 }
3094#ifdef USE_KQEMU
3095 if (kqemu_is_ok(env)) {
3096 env->exception_index = -1;
3097 cpu_loop_exit();
3098 }
3099#endif
3100}
3101
3102/* real and vm86 mode iret */
3103void helper_iret_real(int shift)
3104{
3105 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3106 target_ulong ssp;
3107 int eflags_mask;
3108#ifdef VBOX
3109 bool fVME = false;
3110
3111 remR3TrapClear(env->pVM);
3112#endif /* VBOX */
3113
3114 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3115 sp = ESP;
3116 ssp = env->segs[R_SS].base;
3117 if (shift == 1) {
3118 /* 32 bits */
3119 POPL(ssp, sp, sp_mask, new_eip);
3120 POPL(ssp, sp, sp_mask, new_cs);
3121 new_cs &= 0xffff;
3122 POPL(ssp, sp, sp_mask, new_eflags);
3123 } else {
3124 /* 16 bits */
3125 POPW(ssp, sp, sp_mask, new_eip);
3126 POPW(ssp, sp, sp_mask, new_cs);
3127 POPW(ssp, sp, sp_mask, new_eflags);
3128 }
3129#ifdef VBOX
3130 if ( (env->eflags & VM_MASK)
3131 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3132 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3133 {
3134 fVME = true;
3135 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3136 /* if TF will be set -> #GP */
3137 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3138 || (new_eflags & TF_MASK))
3139 raise_exception(EXCP0D_GPF);
3140 }
3141#endif /* VBOX */
3142 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3143 env->segs[R_CS].selector = new_cs;
3144 env->segs[R_CS].base = (new_cs << 4);
3145 env->eip = new_eip;
3146#ifdef VBOX
3147 if (fVME)
3148 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3149 else
3150#endif
3151 if (env->eflags & VM_MASK)
3152 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3153 else
3154 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3155 if (shift == 0)
3156 eflags_mask &= 0xffff;
3157 load_eflags(new_eflags, eflags_mask);
3158 env->hflags2 &= ~HF2_NMI_MASK;
3159#ifdef VBOX
3160 if (fVME)
3161 {
3162 if (new_eflags & IF_MASK)
3163 env->eflags |= VIF_MASK;
3164 else
3165 env->eflags &= ~VIF_MASK;
3166 }
3167#endif /* VBOX */
3168}
3169
3170#ifndef VBOX
3171static inline void validate_seg(int seg_reg, int cpl)
3172#else /* VBOX */
3173DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3174#endif /* VBOX */
3175{
3176 int dpl;
3177 uint32_t e2;
3178
3179 /* XXX: on x86_64, we do not want to nullify FS and GS because
3180 they may still contain a valid base. I would be interested to
3181 know how a real x86_64 CPU behaves */
3182 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3183 (env->segs[seg_reg].selector & 0xfffc) == 0)
3184 return;
3185
3186 e2 = env->segs[seg_reg].flags;
3187 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3188 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3189 /* data or non conforming code segment */
3190 if (dpl < cpl) {
3191 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3192 }
3193 }
3194}
3195
3196/* protected mode iret */
3197#ifndef VBOX
3198static inline void helper_ret_protected(int shift, int is_iret, int addend)
3199#else /* VBOX */
3200DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3201#endif /* VBOX */
3202{
3203 uint32_t new_cs, new_eflags, new_ss;
3204 uint32_t new_es, new_ds, new_fs, new_gs;
3205 uint32_t e1, e2, ss_e1, ss_e2;
3206 int cpl, dpl, rpl, eflags_mask, iopl;
3207 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3208
3209#ifdef VBOX
3210 ss_e1 = ss_e2 = e1 = e2 = 0;
3211#endif
3212
3213#ifdef TARGET_X86_64
3214 if (shift == 2)
3215 sp_mask = -1;
3216 else
3217#endif
3218 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3219 sp = ESP;
3220 ssp = env->segs[R_SS].base;
3221 new_eflags = 0; /* avoid warning */
3222#ifdef TARGET_X86_64
3223 if (shift == 2) {
3224 POPQ(sp, new_eip);
3225 POPQ(sp, new_cs);
3226 new_cs &= 0xffff;
3227 if (is_iret) {
3228 POPQ(sp, new_eflags);
3229 }
3230 } else
3231#endif
3232 if (shift == 1) {
3233 /* 32 bits */
3234 POPL(ssp, sp, sp_mask, new_eip);
3235 POPL(ssp, sp, sp_mask, new_cs);
3236 new_cs &= 0xffff;
3237 if (is_iret) {
3238 POPL(ssp, sp, sp_mask, new_eflags);
3239#if defined(VBOX) && defined(DEBUG)
3240 printf("iret: new CS %04X\n", new_cs);
3241 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3242 printf("iret: new EFLAGS %08X\n", new_eflags);
3243 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3244#endif
3245 if (new_eflags & VM_MASK)
3246 goto return_to_vm86;
3247 }
3248#ifdef VBOX
3249 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3250 {
3251#ifdef DEBUG
3252 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3253#endif
3254 new_cs = new_cs & 0xfffc;
3255 }
3256#endif
3257 } else {
3258 /* 16 bits */
3259 POPW(ssp, sp, sp_mask, new_eip);
3260 POPW(ssp, sp, sp_mask, new_cs);
3261 if (is_iret)
3262 POPW(ssp, sp, sp_mask, new_eflags);
3263 }
3264#ifdef DEBUG_PCALL
3265 if (loglevel & CPU_LOG_PCALL) {
3266 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3267 new_cs, new_eip, shift, addend);
3268 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3269 }
3270#endif
3271 if ((new_cs & 0xfffc) == 0)
3272 {
3273#if defined(VBOX) && defined(DEBUG)
3274 printf("new_cs & 0xfffc) == 0\n");
3275#endif
3276 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3277 }
3278 if (load_segment(&e1, &e2, new_cs) != 0)
3279 {
3280#if defined(VBOX) && defined(DEBUG)
3281 printf("load_segment failed\n");
3282#endif
3283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3284 }
3285 if (!(e2 & DESC_S_MASK) ||
3286 !(e2 & DESC_CS_MASK))
3287 {
3288#if defined(VBOX) && defined(DEBUG)
3289 printf("e2 mask %08x\n", e2);
3290#endif
3291 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3292 }
3293 cpl = env->hflags & HF_CPL_MASK;
3294 rpl = new_cs & 3;
3295 if (rpl < cpl)
3296 {
3297#if defined(VBOX) && defined(DEBUG)
3298 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3299#endif
3300 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3301 }
3302 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3303 if (e2 & DESC_C_MASK) {
3304 if (dpl > rpl)
3305 {
3306#if defined(VBOX) && defined(DEBUG)
3307 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3308#endif
3309 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3310 }
3311 } else {
3312 if (dpl != rpl)
3313 {
3314#if defined(VBOX) && defined(DEBUG)
3315 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3316#endif
3317 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3318 }
3319 }
3320 if (!(e2 & DESC_P_MASK))
3321 {
3322#if defined(VBOX) && defined(DEBUG)
3323 printf("DESC_P_MASK e2=%08x\n", e2);
3324#endif
3325 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3326 }
3327
3328 sp += addend;
3329 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3330 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3331 /* return to same privilege level */
3332 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3333 get_seg_base(e1, e2),
3334 get_seg_limit(e1, e2),
3335 e2);
3336 } else {
3337 /* return to different privilege level */
3338#ifdef TARGET_X86_64
3339 if (shift == 2) {
3340 POPQ(sp, new_esp);
3341 POPQ(sp, new_ss);
3342 new_ss &= 0xffff;
3343 } else
3344#endif
3345 if (shift == 1) {
3346 /* 32 bits */
3347 POPL(ssp, sp, sp_mask, new_esp);
3348 POPL(ssp, sp, sp_mask, new_ss);
3349 new_ss &= 0xffff;
3350 } else {
3351 /* 16 bits */
3352 POPW(ssp, sp, sp_mask, new_esp);
3353 POPW(ssp, sp, sp_mask, new_ss);
3354 }
3355#ifdef DEBUG_PCALL
3356 if (loglevel & CPU_LOG_PCALL) {
3357 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3358 new_ss, new_esp);
3359 }
3360#endif
3361 if ((new_ss & 0xfffc) == 0) {
3362#ifdef TARGET_X86_64
3363 /* NULL ss is allowed in long mode if cpl != 3*/
3364 /* XXX: test CS64 ? */
3365 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3366 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3367 0, 0xffffffff,
3368 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3369 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3370 DESC_W_MASK | DESC_A_MASK);
3371 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3372 } else
3373#endif
3374 {
3375 raise_exception_err(EXCP0D_GPF, 0);
3376 }
3377 } else {
3378 if ((new_ss & 3) != rpl)
3379 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3380 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3381 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3382 if (!(ss_e2 & DESC_S_MASK) ||
3383 (ss_e2 & DESC_CS_MASK) ||
3384 !(ss_e2 & DESC_W_MASK))
3385 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3386 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3387 if (dpl != rpl)
3388 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3389 if (!(ss_e2 & DESC_P_MASK))
3390 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3391 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3392 get_seg_base(ss_e1, ss_e2),
3393 get_seg_limit(ss_e1, ss_e2),
3394 ss_e2);
3395 }
3396
3397 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3398 get_seg_base(e1, e2),
3399 get_seg_limit(e1, e2),
3400 e2);
3401 cpu_x86_set_cpl(env, rpl);
3402 sp = new_esp;
3403#ifdef TARGET_X86_64
3404 if (env->hflags & HF_CS64_MASK)
3405 sp_mask = -1;
3406 else
3407#endif
3408 sp_mask = get_sp_mask(ss_e2);
3409
3410 /* validate data segments */
3411 validate_seg(R_ES, rpl);
3412 validate_seg(R_DS, rpl);
3413 validate_seg(R_FS, rpl);
3414 validate_seg(R_GS, rpl);
3415
3416 sp += addend;
3417 }
3418 SET_ESP(sp, sp_mask);
3419 env->eip = new_eip;
3420 if (is_iret) {
3421 /* NOTE: 'cpl' is the _old_ CPL */
3422 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3423 if (cpl == 0)
3424#ifdef VBOX
3425 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3426#else
3427 eflags_mask |= IOPL_MASK;
3428#endif
3429 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3430 if (cpl <= iopl)
3431 eflags_mask |= IF_MASK;
3432 if (shift == 0)
3433 eflags_mask &= 0xffff;
3434 load_eflags(new_eflags, eflags_mask);
3435 }
3436 return;
3437
3438 return_to_vm86:
3439 POPL(ssp, sp, sp_mask, new_esp);
3440 POPL(ssp, sp, sp_mask, new_ss);
3441 POPL(ssp, sp, sp_mask, new_es);
3442 POPL(ssp, sp, sp_mask, new_ds);
3443 POPL(ssp, sp, sp_mask, new_fs);
3444 POPL(ssp, sp, sp_mask, new_gs);
3445
3446 /* modify processor state */
3447 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3448 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3449 load_seg_vm(R_CS, new_cs & 0xffff);
3450 cpu_x86_set_cpl(env, 3);
3451 load_seg_vm(R_SS, new_ss & 0xffff);
3452 load_seg_vm(R_ES, new_es & 0xffff);
3453 load_seg_vm(R_DS, new_ds & 0xffff);
3454 load_seg_vm(R_FS, new_fs & 0xffff);
3455 load_seg_vm(R_GS, new_gs & 0xffff);
3456
3457 env->eip = new_eip & 0xffff;
3458 ESP = new_esp;
3459}
3460
3461void helper_iret_protected(int shift, int next_eip)
3462{
3463 int tss_selector, type;
3464 uint32_t e1, e2;
3465
3466#ifdef VBOX
3467 e1 = e2 = 0;
3468 remR3TrapClear(env->pVM);
3469#endif
3470
3471 /* specific case for TSS */
3472 if (env->eflags & NT_MASK) {
3473#ifdef TARGET_X86_64
3474 if (env->hflags & HF_LMA_MASK)
3475 raise_exception_err(EXCP0D_GPF, 0);
3476#endif
3477 tss_selector = lduw_kernel(env->tr.base + 0);
3478 if (tss_selector & 4)
3479 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3480 if (load_segment(&e1, &e2, tss_selector) != 0)
3481 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3482 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3483 /* NOTE: we check both segment and busy TSS */
3484 if (type != 3)
3485 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3486 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3487 } else {
3488 helper_ret_protected(shift, 1, 0);
3489 }
3490 env->hflags2 &= ~HF2_NMI_MASK;
3491#ifdef USE_KQEMU
3492 if (kqemu_is_ok(env)) {
3493 CC_OP = CC_OP_EFLAGS;
3494 env->exception_index = -1;
3495 cpu_loop_exit();
3496 }
3497#endif
3498}
3499
3500void helper_lret_protected(int shift, int addend)
3501{
3502 helper_ret_protected(shift, 0, addend);
3503#ifdef USE_KQEMU
3504 if (kqemu_is_ok(env)) {
3505 env->exception_index = -1;
3506 cpu_loop_exit();
3507 }
3508#endif
3509}
3510
3511void helper_sysenter(void)
3512{
3513 if (env->sysenter_cs == 0) {
3514 raise_exception_err(EXCP0D_GPF, 0);
3515 }
3516 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3517 cpu_x86_set_cpl(env, 0);
3518
3519#ifdef TARGET_X86_64
3520 if (env->hflags & HF_LMA_MASK) {
3521 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3522 0, 0xffffffff,
3523 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3524 DESC_S_MASK |
3525 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3526 } else
3527#endif
3528 {
3529 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3530 0, 0xffffffff,
3531 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3532 DESC_S_MASK |
3533 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3534 }
3535 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3536 0, 0xffffffff,
3537 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3538 DESC_S_MASK |
3539 DESC_W_MASK | DESC_A_MASK);
3540 ESP = env->sysenter_esp;
3541 EIP = env->sysenter_eip;
3542}
3543
3544void helper_sysexit(int dflag)
3545{
3546 int cpl;
3547
3548 cpl = env->hflags & HF_CPL_MASK;
3549 if (env->sysenter_cs == 0 || cpl != 0) {
3550 raise_exception_err(EXCP0D_GPF, 0);
3551 }
3552 cpu_x86_set_cpl(env, 3);
3553#ifdef TARGET_X86_64
3554 if (dflag == 2) {
3555 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3556 0, 0xffffffff,
3557 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3558 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3559 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3560 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3561 0, 0xffffffff,
3562 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3563 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3564 DESC_W_MASK | DESC_A_MASK);
3565 } else
3566#endif
3567 {
3568 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3569 0, 0xffffffff,
3570 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3571 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3572 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3573 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3574 0, 0xffffffff,
3575 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3576 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3577 DESC_W_MASK | DESC_A_MASK);
3578 }
3579 ESP = ECX;
3580 EIP = EDX;
3581#ifdef USE_KQEMU
3582 if (kqemu_is_ok(env)) {
3583 env->exception_index = -1;
3584 cpu_loop_exit();
3585 }
3586#endif
3587}
3588
3589#if defined(CONFIG_USER_ONLY)
3590target_ulong helper_read_crN(int reg)
3591{
3592 return 0;
3593}
3594
3595void helper_write_crN(int reg, target_ulong t0)
3596{
3597}
3598#else
3599target_ulong helper_read_crN(int reg)
3600{
3601 target_ulong val;
3602
3603 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3604 switch(reg) {
3605 default:
3606 val = env->cr[reg];
3607 break;
3608 case 8:
3609 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3610 val = cpu_get_apic_tpr(env);
3611 } else {
3612 val = env->v_tpr;
3613 }
3614 break;
3615 }
3616 return val;
3617}
3618
3619void helper_write_crN(int reg, target_ulong t0)
3620{
3621 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3622 switch(reg) {
3623 case 0:
3624 cpu_x86_update_cr0(env, t0);
3625 break;
3626 case 3:
3627 cpu_x86_update_cr3(env, t0);
3628 break;
3629 case 4:
3630 cpu_x86_update_cr4(env, t0);
3631 break;
3632 case 8:
3633 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3634 cpu_set_apic_tpr(env, t0);
3635 }
3636 env->v_tpr = t0 & 0x0f;
3637 break;
3638 default:
3639 env->cr[reg] = t0;
3640 break;
3641 }
3642}
3643#endif
3644
3645void helper_lmsw(target_ulong t0)
3646{
3647 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3648 if already set to one. */
3649 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3650 helper_write_crN(0, t0);
3651}
3652
3653void helper_clts(void)
3654{
3655 env->cr[0] &= ~CR0_TS_MASK;
3656 env->hflags &= ~HF_TS_MASK;
3657}
3658
3659/* XXX: do more */
3660void helper_movl_drN_T0(int reg, target_ulong t0)
3661{
3662 env->dr[reg] = t0;
3663}
3664
3665void helper_invlpg(target_ulong addr)
3666{
3667 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3668 tlb_flush_page(env, addr);
3669}
3670
3671void helper_rdtsc(void)
3672{
3673 uint64_t val;
3674
3675 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3676 raise_exception(EXCP0D_GPF);
3677 }
3678 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3679
3680 val = cpu_get_tsc(env) + env->tsc_offset;
3681 EAX = (uint32_t)(val);
3682 EDX = (uint32_t)(val >> 32);
3683}
3684
3685#ifdef VBOX
3686void helper_rdtscp(void)
3687{
3688 uint64_t val;
3689 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3690 raise_exception(EXCP0D_GPF);
3691 }
3692
3693 val = cpu_get_tsc(env);
3694 EAX = (uint32_t)(val);
3695 EDX = (uint32_t)(val >> 32);
3696 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3697}
3698#endif
3699
3700void helper_rdpmc(void)
3701{
3702 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3703 raise_exception(EXCP0D_GPF);
3704 }
3705 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3706
3707 /* currently unimplemented */
3708 raise_exception_err(EXCP06_ILLOP, 0);
3709}
3710
3711#if defined(CONFIG_USER_ONLY)
3712void helper_wrmsr(void)
3713{
3714}
3715
3716void helper_rdmsr(void)
3717{
3718}
3719#else
3720void helper_wrmsr(void)
3721{
3722 uint64_t val;
3723
3724 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3725
3726 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3727
3728 switch((uint32_t)ECX) {
3729 case MSR_IA32_SYSENTER_CS:
3730 env->sysenter_cs = val & 0xffff;
3731 break;
3732 case MSR_IA32_SYSENTER_ESP:
3733 env->sysenter_esp = val;
3734 break;
3735 case MSR_IA32_SYSENTER_EIP:
3736 env->sysenter_eip = val;
3737 break;
3738 case MSR_IA32_APICBASE:
3739 cpu_set_apic_base(env, val);
3740 break;
3741 case MSR_EFER:
3742 {
3743 uint64_t update_mask;
3744 update_mask = 0;
3745 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3746 update_mask |= MSR_EFER_SCE;
3747 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3748 update_mask |= MSR_EFER_LME;
3749 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3750 update_mask |= MSR_EFER_FFXSR;
3751 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3752 update_mask |= MSR_EFER_NXE;
3753 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3754 update_mask |= MSR_EFER_SVME;
3755 cpu_load_efer(env, (env->efer & ~update_mask) |
3756 (val & update_mask));
3757 }
3758 break;
3759 case MSR_STAR:
3760 env->star = val;
3761 break;
3762 case MSR_PAT:
3763 env->pat = val;
3764 break;
3765 case MSR_VM_HSAVE_PA:
3766 env->vm_hsave = val;
3767 break;
3768#ifdef TARGET_X86_64
3769 case MSR_LSTAR:
3770 env->lstar = val;
3771 break;
3772 case MSR_CSTAR:
3773 env->cstar = val;
3774 break;
3775 case MSR_FMASK:
3776 env->fmask = val;
3777 break;
3778 case MSR_FSBASE:
3779 env->segs[R_FS].base = val;
3780 break;
3781 case MSR_GSBASE:
3782 env->segs[R_GS].base = val;
3783 break;
3784 case MSR_KERNELGSBASE:
3785 env->kernelgsbase = val;
3786 break;
3787#endif
3788 default:
3789#ifndef VBOX
3790 /* XXX: exception ? */
3791 break;
3792#else /* VBOX */
3793 {
3794 uint32_t ecx = (uint32_t)ECX;
3795 /* In X2APIC specification this range is reserved for APIC control. */
3796 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3797 cpu_apic_wrmsr(env, ecx, val);
3798 /** @todo else exception? */
3799 break;
3800 }
3801 case MSR_K8_TSC_AUX:
3802 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3803 break;
3804#endif /* VBOX */
3805 }
3806}
3807
3808void helper_rdmsr(void)
3809{
3810 uint64_t val;
3811
3812 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3813
3814 switch((uint32_t)ECX) {
3815 case MSR_IA32_SYSENTER_CS:
3816 val = env->sysenter_cs;
3817 break;
3818 case MSR_IA32_SYSENTER_ESP:
3819 val = env->sysenter_esp;
3820 break;
3821 case MSR_IA32_SYSENTER_EIP:
3822 val = env->sysenter_eip;
3823 break;
3824 case MSR_IA32_APICBASE:
3825 val = cpu_get_apic_base(env);
3826 break;
3827 case MSR_EFER:
3828 val = env->efer;
3829 break;
3830 case MSR_STAR:
3831 val = env->star;
3832 break;
3833 case MSR_PAT:
3834 val = env->pat;
3835 break;
3836 case MSR_VM_HSAVE_PA:
3837 val = env->vm_hsave;
3838 break;
3839 case MSR_IA32_PERF_STATUS:
3840 /* tsc_increment_by_tick */
3841 val = 1000ULL;
3842 /* CPU multiplier */
3843 val |= (((uint64_t)4ULL) << 40);
3844 break;
3845#ifdef TARGET_X86_64
3846 case MSR_LSTAR:
3847 val = env->lstar;
3848 break;
3849 case MSR_CSTAR:
3850 val = env->cstar;
3851 break;
3852 case MSR_FMASK:
3853 val = env->fmask;
3854 break;
3855 case MSR_FSBASE:
3856 val = env->segs[R_FS].base;
3857 break;
3858 case MSR_GSBASE:
3859 val = env->segs[R_GS].base;
3860 break;
3861 case MSR_KERNELGSBASE:
3862 val = env->kernelgsbase;
3863 break;
3864#endif
3865#ifdef USE_KQEMU
3866 case MSR_QPI_COMMBASE:
3867 if (env->kqemu_enabled) {
3868 val = kqemu_comm_base;
3869 } else {
3870 val = 0;
3871 }
3872 break;
3873#endif
3874 default:
3875#ifndef VBOX
3876 /* XXX: exception ? */
3877 val = 0;
3878 break;
3879#else /* VBOX */
3880 {
3881 uint32_t ecx = (uint32_t)ECX;
3882 /* In X2APIC specification this range is reserved for APIC control. */
3883 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3884 val = cpu_apic_rdmsr(env, ecx);
3885 else
3886 val = 0; /** @todo else exception? */
3887 break;
3888 }
3889 case MSR_K8_TSC_AUX:
3890 val = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3891 break;
3892#endif /* VBOX */
3893 }
3894 EAX = (uint32_t)(val);
3895 EDX = (uint32_t)(val >> 32);
3896}
3897#endif
3898
3899target_ulong helper_lsl(target_ulong selector1)
3900{
3901 unsigned int limit;
3902 uint32_t e1, e2, eflags, selector;
3903 int rpl, dpl, cpl, type;
3904
3905 selector = selector1 & 0xffff;
3906 eflags = cc_table[CC_OP].compute_all();
3907 if (load_segment(&e1, &e2, selector) != 0)
3908 goto fail;
3909 rpl = selector & 3;
3910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3911 cpl = env->hflags & HF_CPL_MASK;
3912 if (e2 & DESC_S_MASK) {
3913 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3914 /* conforming */
3915 } else {
3916 if (dpl < cpl || dpl < rpl)
3917 goto fail;
3918 }
3919 } else {
3920 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3921 switch(type) {
3922 case 1:
3923 case 2:
3924 case 3:
3925 case 9:
3926 case 11:
3927 break;
3928 default:
3929 goto fail;
3930 }
3931 if (dpl < cpl || dpl < rpl) {
3932 fail:
3933 CC_SRC = eflags & ~CC_Z;
3934 return 0;
3935 }
3936 }
3937 limit = get_seg_limit(e1, e2);
3938 CC_SRC = eflags | CC_Z;
3939 return limit;
3940}
3941
3942target_ulong helper_lar(target_ulong selector1)
3943{
3944 uint32_t e1, e2, eflags, selector;
3945 int rpl, dpl, cpl, type;
3946
3947 selector = selector1 & 0xffff;
3948 eflags = cc_table[CC_OP].compute_all();
3949 if ((selector & 0xfffc) == 0)
3950 goto fail;
3951 if (load_segment(&e1, &e2, selector) != 0)
3952 goto fail;
3953 rpl = selector & 3;
3954 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3955 cpl = env->hflags & HF_CPL_MASK;
3956 if (e2 & DESC_S_MASK) {
3957 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3958 /* conforming */
3959 } else {
3960 if (dpl < cpl || dpl < rpl)
3961 goto fail;
3962 }
3963 } else {
3964 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3965 switch(type) {
3966 case 1:
3967 case 2:
3968 case 3:
3969 case 4:
3970 case 5:
3971 case 9:
3972 case 11:
3973 case 12:
3974 break;
3975 default:
3976 goto fail;
3977 }
3978 if (dpl < cpl || dpl < rpl) {
3979 fail:
3980 CC_SRC = eflags & ~CC_Z;
3981 return 0;
3982 }
3983 }
3984 CC_SRC = eflags | CC_Z;
3985 return e2 & 0x00f0ff00;
3986}
3987
3988void helper_verr(target_ulong selector1)
3989{
3990 uint32_t e1, e2, eflags, selector;
3991 int rpl, dpl, cpl;
3992
3993 selector = selector1 & 0xffff;
3994 eflags = cc_table[CC_OP].compute_all();
3995 if ((selector & 0xfffc) == 0)
3996 goto fail;
3997 if (load_segment(&e1, &e2, selector) != 0)
3998 goto fail;
3999 if (!(e2 & DESC_S_MASK))
4000 goto fail;
4001 rpl = selector & 3;
4002 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4003 cpl = env->hflags & HF_CPL_MASK;
4004 if (e2 & DESC_CS_MASK) {
4005 if (!(e2 & DESC_R_MASK))
4006 goto fail;
4007 if (!(e2 & DESC_C_MASK)) {
4008 if (dpl < cpl || dpl < rpl)
4009 goto fail;
4010 }
4011 } else {
4012 if (dpl < cpl || dpl < rpl) {
4013 fail:
4014 CC_SRC = eflags & ~CC_Z;
4015 return;
4016 }
4017 }
4018 CC_SRC = eflags | CC_Z;
4019}
4020
4021void helper_verw(target_ulong selector1)
4022{
4023 uint32_t e1, e2, eflags, selector;
4024 int rpl, dpl, cpl;
4025
4026 selector = selector1 & 0xffff;
4027 eflags = cc_table[CC_OP].compute_all();
4028 if ((selector & 0xfffc) == 0)
4029 goto fail;
4030 if (load_segment(&e1, &e2, selector) != 0)
4031 goto fail;
4032 if (!(e2 & DESC_S_MASK))
4033 goto fail;
4034 rpl = selector & 3;
4035 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4036 cpl = env->hflags & HF_CPL_MASK;
4037 if (e2 & DESC_CS_MASK) {
4038 goto fail;
4039 } else {
4040 if (dpl < cpl || dpl < rpl)
4041 goto fail;
4042 if (!(e2 & DESC_W_MASK)) {
4043 fail:
4044 CC_SRC = eflags & ~CC_Z;
4045 return;
4046 }
4047 }
4048 CC_SRC = eflags | CC_Z;
4049}
4050
4051/* x87 FPU helpers */
4052
4053static void fpu_set_exception(int mask)
4054{
4055 env->fpus |= mask;
4056 if (env->fpus & (~env->fpuc & FPUC_EM))
4057 env->fpus |= FPUS_SE | FPUS_B;
4058}
4059
4060#ifndef VBOX
4061static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4062#else /* VBOX */
4063DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4064#endif /* VBOX */
4065{
4066 if (b == 0.0)
4067 fpu_set_exception(FPUS_ZE);
4068 return a / b;
4069}
4070
4071void fpu_raise_exception(void)
4072{
4073 if (env->cr[0] & CR0_NE_MASK) {
4074 raise_exception(EXCP10_COPR);
4075 }
4076#if !defined(CONFIG_USER_ONLY)
4077 else {
4078 cpu_set_ferr(env);
4079 }
4080#endif
4081}
4082
4083void helper_flds_FT0(uint32_t val)
4084{
4085 union {
4086 float32 f;
4087 uint32_t i;
4088 } u;
4089 u.i = val;
4090 FT0 = float32_to_floatx(u.f, &env->fp_status);
4091}
4092
4093void helper_fldl_FT0(uint64_t val)
4094{
4095 union {
4096 float64 f;
4097 uint64_t i;
4098 } u;
4099 u.i = val;
4100 FT0 = float64_to_floatx(u.f, &env->fp_status);
4101}
4102
4103void helper_fildl_FT0(int32_t val)
4104{
4105 FT0 = int32_to_floatx(val, &env->fp_status);
4106}
4107
4108void helper_flds_ST0(uint32_t val)
4109{
4110 int new_fpstt;
4111 union {
4112 float32 f;
4113 uint32_t i;
4114 } u;
4115 new_fpstt = (env->fpstt - 1) & 7;
4116 u.i = val;
4117 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4118 env->fpstt = new_fpstt;
4119 env->fptags[new_fpstt] = 0; /* validate stack entry */
4120}
4121
4122void helper_fldl_ST0(uint64_t val)
4123{
4124 int new_fpstt;
4125 union {
4126 float64 f;
4127 uint64_t i;
4128 } u;
4129 new_fpstt = (env->fpstt - 1) & 7;
4130 u.i = val;
4131 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4132 env->fpstt = new_fpstt;
4133 env->fptags[new_fpstt] = 0; /* validate stack entry */
4134}
4135
4136void helper_fildl_ST0(int32_t val)
4137{
4138 int new_fpstt;
4139 new_fpstt = (env->fpstt - 1) & 7;
4140 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4141 env->fpstt = new_fpstt;
4142 env->fptags[new_fpstt] = 0; /* validate stack entry */
4143}
4144
4145void helper_fildll_ST0(int64_t val)
4146{
4147 int new_fpstt;
4148 new_fpstt = (env->fpstt - 1) & 7;
4149 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4150 env->fpstt = new_fpstt;
4151 env->fptags[new_fpstt] = 0; /* validate stack entry */
4152}
4153
4154#ifndef VBOX
4155uint32_t helper_fsts_ST0(void)
4156#else
4157RTCCUINTREG helper_fsts_ST0(void)
4158#endif
4159{
4160 union {
4161 float32 f;
4162 uint32_t i;
4163 } u;
4164 u.f = floatx_to_float32(ST0, &env->fp_status);
4165 return u.i;
4166}
4167
4168uint64_t helper_fstl_ST0(void)
4169{
4170 union {
4171 float64 f;
4172 uint64_t i;
4173 } u;
4174 u.f = floatx_to_float64(ST0, &env->fp_status);
4175 return u.i;
4176}
4177#ifndef VBOX
4178int32_t helper_fist_ST0(void)
4179#else
4180RTCCINTREG helper_fist_ST0(void)
4181#endif
4182{
4183 int32_t val;
4184 val = floatx_to_int32(ST0, &env->fp_status);
4185 if (val != (int16_t)val)
4186 val = -32768;
4187 return val;
4188}
4189
4190#ifndef VBOX
4191int32_t helper_fistl_ST0(void)
4192#else
4193RTCCINTREG helper_fistl_ST0(void)
4194#endif
4195{
4196 int32_t val;
4197 val = floatx_to_int32(ST0, &env->fp_status);
4198 return val;
4199}
4200
4201int64_t helper_fistll_ST0(void)
4202{
4203 int64_t val;
4204 val = floatx_to_int64(ST0, &env->fp_status);
4205 return val;
4206}
4207
4208#ifndef VBOX
4209int32_t helper_fistt_ST0(void)
4210#else
4211RTCCINTREG helper_fistt_ST0(void)
4212#endif
4213{
4214 int32_t val;
4215 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4216 if (val != (int16_t)val)
4217 val = -32768;
4218 return val;
4219}
4220
4221#ifndef VBOX
4222int32_t helper_fisttl_ST0(void)
4223#else
4224RTCCINTREG helper_fisttl_ST0(void)
4225#endif
4226{
4227 int32_t val;
4228 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4229 return val;
4230}
4231
4232int64_t helper_fisttll_ST0(void)
4233{
4234 int64_t val;
4235 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4236 return val;
4237}
4238
4239void helper_fldt_ST0(target_ulong ptr)
4240{
4241 int new_fpstt;
4242 new_fpstt = (env->fpstt - 1) & 7;
4243 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4244 env->fpstt = new_fpstt;
4245 env->fptags[new_fpstt] = 0; /* validate stack entry */
4246}
4247
4248void helper_fstt_ST0(target_ulong ptr)
4249{
4250 helper_fstt(ST0, ptr);
4251}
4252
4253void helper_fpush(void)
4254{
4255 fpush();
4256}
4257
4258void helper_fpop(void)
4259{
4260 fpop();
4261}
4262
4263void helper_fdecstp(void)
4264{
4265 env->fpstt = (env->fpstt - 1) & 7;
4266 env->fpus &= (~0x4700);
4267}
4268
4269void helper_fincstp(void)
4270{
4271 env->fpstt = (env->fpstt + 1) & 7;
4272 env->fpus &= (~0x4700);
4273}
4274
4275/* FPU move */
4276
4277void helper_ffree_STN(int st_index)
4278{
4279 env->fptags[(env->fpstt + st_index) & 7] = 1;
4280}
4281
4282void helper_fmov_ST0_FT0(void)
4283{
4284 ST0 = FT0;
4285}
4286
4287void helper_fmov_FT0_STN(int st_index)
4288{
4289 FT0 = ST(st_index);
4290}
4291
4292void helper_fmov_ST0_STN(int st_index)
4293{
4294 ST0 = ST(st_index);
4295}
4296
4297void helper_fmov_STN_ST0(int st_index)
4298{
4299 ST(st_index) = ST0;
4300}
4301
4302void helper_fxchg_ST0_STN(int st_index)
4303{
4304 CPU86_LDouble tmp;
4305 tmp = ST(st_index);
4306 ST(st_index) = ST0;
4307 ST0 = tmp;
4308}
4309
4310/* FPU operations */
4311
4312static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4313
4314void helper_fcom_ST0_FT0(void)
4315{
4316 int ret;
4317
4318 ret = floatx_compare(ST0, FT0, &env->fp_status);
4319 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4320 FORCE_RET();
4321}
4322
4323void helper_fucom_ST0_FT0(void)
4324{
4325 int ret;
4326
4327 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4328 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4329 FORCE_RET();
4330}
4331
4332static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4333
4334void helper_fcomi_ST0_FT0(void)
4335{
4336 int eflags;
4337 int ret;
4338
4339 ret = floatx_compare(ST0, FT0, &env->fp_status);
4340 eflags = cc_table[CC_OP].compute_all();
4341 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4342 CC_SRC = eflags;
4343 FORCE_RET();
4344}
4345
4346void helper_fucomi_ST0_FT0(void)
4347{
4348 int eflags;
4349 int ret;
4350
4351 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4352 eflags = cc_table[CC_OP].compute_all();
4353 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4354 CC_SRC = eflags;
4355 FORCE_RET();
4356}
4357
4358void helper_fadd_ST0_FT0(void)
4359{
4360 ST0 += FT0;
4361}
4362
4363void helper_fmul_ST0_FT0(void)
4364{
4365 ST0 *= FT0;
4366}
4367
4368void helper_fsub_ST0_FT0(void)
4369{
4370 ST0 -= FT0;
4371}
4372
4373void helper_fsubr_ST0_FT0(void)
4374{
4375 ST0 = FT0 - ST0;
4376}
4377
4378void helper_fdiv_ST0_FT0(void)
4379{
4380 ST0 = helper_fdiv(ST0, FT0);
4381}
4382
4383void helper_fdivr_ST0_FT0(void)
4384{
4385 ST0 = helper_fdiv(FT0, ST0);
4386}
4387
4388/* fp operations between STN and ST0 */
4389
4390void helper_fadd_STN_ST0(int st_index)
4391{
4392 ST(st_index) += ST0;
4393}
4394
4395void helper_fmul_STN_ST0(int st_index)
4396{
4397 ST(st_index) *= ST0;
4398}
4399
4400void helper_fsub_STN_ST0(int st_index)
4401{
4402 ST(st_index) -= ST0;
4403}
4404
4405void helper_fsubr_STN_ST0(int st_index)
4406{
4407 CPU86_LDouble *p;
4408 p = &ST(st_index);
4409 *p = ST0 - *p;
4410}
4411
4412void helper_fdiv_STN_ST0(int st_index)
4413{
4414 CPU86_LDouble *p;
4415 p = &ST(st_index);
4416 *p = helper_fdiv(*p, ST0);
4417}
4418
4419void helper_fdivr_STN_ST0(int st_index)
4420{
4421 CPU86_LDouble *p;
4422 p = &ST(st_index);
4423 *p = helper_fdiv(ST0, *p);
4424}
4425
4426/* misc FPU operations */
4427void helper_fchs_ST0(void)
4428{
4429 ST0 = floatx_chs(ST0);
4430}
4431
4432void helper_fabs_ST0(void)
4433{
4434 ST0 = floatx_abs(ST0);
4435}
4436
4437void helper_fld1_ST0(void)
4438{
4439 ST0 = f15rk[1];
4440}
4441
4442void helper_fldl2t_ST0(void)
4443{
4444 ST0 = f15rk[6];
4445}
4446
4447void helper_fldl2e_ST0(void)
4448{
4449 ST0 = f15rk[5];
4450}
4451
4452void helper_fldpi_ST0(void)
4453{
4454 ST0 = f15rk[2];
4455}
4456
4457void helper_fldlg2_ST0(void)
4458{
4459 ST0 = f15rk[3];
4460}
4461
4462void helper_fldln2_ST0(void)
4463{
4464 ST0 = f15rk[4];
4465}
4466
4467void helper_fldz_ST0(void)
4468{
4469 ST0 = f15rk[0];
4470}
4471
4472void helper_fldz_FT0(void)
4473{
4474 FT0 = f15rk[0];
4475}
4476
4477#ifndef VBOX
4478uint32_t helper_fnstsw(void)
4479#else
4480RTCCUINTREG helper_fnstsw(void)
4481#endif
4482{
4483 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4484}
4485
4486#ifndef VBOX
4487uint32_t helper_fnstcw(void)
4488#else
4489RTCCUINTREG helper_fnstcw(void)
4490#endif
4491{
4492 return env->fpuc;
4493}
4494
4495static void update_fp_status(void)
4496{
4497 int rnd_type;
4498
4499 /* set rounding mode */
4500 switch(env->fpuc & RC_MASK) {
4501 default:
4502 case RC_NEAR:
4503 rnd_type = float_round_nearest_even;
4504 break;
4505 case RC_DOWN:
4506 rnd_type = float_round_down;
4507 break;
4508 case RC_UP:
4509 rnd_type = float_round_up;
4510 break;
4511 case RC_CHOP:
4512 rnd_type = float_round_to_zero;
4513 break;
4514 }
4515 set_float_rounding_mode(rnd_type, &env->fp_status);
4516#ifdef FLOATX80
4517 switch((env->fpuc >> 8) & 3) {
4518 case 0:
4519 rnd_type = 32;
4520 break;
4521 case 2:
4522 rnd_type = 64;
4523 break;
4524 case 3:
4525 default:
4526 rnd_type = 80;
4527 break;
4528 }
4529 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4530#endif
4531}
4532
4533void helper_fldcw(uint32_t val)
4534{
4535 env->fpuc = val;
4536 update_fp_status();
4537}
4538
4539void helper_fclex(void)
4540{
4541 env->fpus &= 0x7f00;
4542}
4543
4544void helper_fwait(void)
4545{
4546 if (env->fpus & FPUS_SE)
4547 fpu_raise_exception();
4548 FORCE_RET();
4549}
4550
4551void helper_fninit(void)
4552{
4553 env->fpus = 0;
4554 env->fpstt = 0;
4555 env->fpuc = 0x37f;
4556 env->fptags[0] = 1;
4557 env->fptags[1] = 1;
4558 env->fptags[2] = 1;
4559 env->fptags[3] = 1;
4560 env->fptags[4] = 1;
4561 env->fptags[5] = 1;
4562 env->fptags[6] = 1;
4563 env->fptags[7] = 1;
4564}
4565
4566/* BCD ops */
4567
4568void helper_fbld_ST0(target_ulong ptr)
4569{
4570 CPU86_LDouble tmp;
4571 uint64_t val;
4572 unsigned int v;
4573 int i;
4574
4575 val = 0;
4576 for(i = 8; i >= 0; i--) {
4577 v = ldub(ptr + i);
4578 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4579 }
4580 tmp = val;
4581 if (ldub(ptr + 9) & 0x80)
4582 tmp = -tmp;
4583 fpush();
4584 ST0 = tmp;
4585}
4586
4587void helper_fbst_ST0(target_ulong ptr)
4588{
4589 int v;
4590 target_ulong mem_ref, mem_end;
4591 int64_t val;
4592
4593 val = floatx_to_int64(ST0, &env->fp_status);
4594 mem_ref = ptr;
4595 mem_end = mem_ref + 9;
4596 if (val < 0) {
4597 stb(mem_end, 0x80);
4598 val = -val;
4599 } else {
4600 stb(mem_end, 0x00);
4601 }
4602 while (mem_ref < mem_end) {
4603 if (val == 0)
4604 break;
4605 v = val % 100;
4606 val = val / 100;
4607 v = ((v / 10) << 4) | (v % 10);
4608 stb(mem_ref++, v);
4609 }
4610 while (mem_ref < mem_end) {
4611 stb(mem_ref++, 0);
4612 }
4613}
4614
4615void helper_f2xm1(void)
4616{
4617 ST0 = pow(2.0,ST0) - 1.0;
4618}
4619
4620void helper_fyl2x(void)
4621{
4622 CPU86_LDouble fptemp;
4623
4624 fptemp = ST0;
4625 if (fptemp>0.0){
4626 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4627 ST1 *= fptemp;
4628 fpop();
4629 } else {
4630 env->fpus &= (~0x4700);
4631 env->fpus |= 0x400;
4632 }
4633}
4634
4635void helper_fptan(void)
4636{
4637 CPU86_LDouble fptemp;
4638
4639 fptemp = ST0;
4640 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4641 env->fpus |= 0x400;
4642 } else {
4643 ST0 = tan(fptemp);
4644 fpush();
4645 ST0 = 1.0;
4646 env->fpus &= (~0x400); /* C2 <-- 0 */
4647 /* the above code is for |arg| < 2**52 only */
4648 }
4649}
4650
4651void helper_fpatan(void)
4652{
4653 CPU86_LDouble fptemp, fpsrcop;
4654
4655 fpsrcop = ST1;
4656 fptemp = ST0;
4657 ST1 = atan2(fpsrcop,fptemp);
4658 fpop();
4659}
4660
4661void helper_fxtract(void)
4662{
4663 CPU86_LDoubleU temp;
4664 unsigned int expdif;
4665
4666 temp.d = ST0;
4667 expdif = EXPD(temp) - EXPBIAS;
4668 /*DP exponent bias*/
4669 ST0 = expdif;
4670 fpush();
4671 BIASEXPONENT(temp);
4672 ST0 = temp.d;
4673}
4674
4675#ifdef VBOX
4676#ifdef _MSC_VER
4677/* MSC cannot divide by zero */
4678extern double _Nan;
4679#define NaN _Nan
4680#else
4681#define NaN (0.0 / 0.0)
4682#endif
4683#endif /* VBOX */
4684
4685void helper_fprem1(void)
4686{
4687 CPU86_LDouble dblq, fpsrcop, fptemp;
4688 CPU86_LDoubleU fpsrcop1, fptemp1;
4689 int expdif;
4690 signed long long int q;
4691
4692#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4693 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4694#else
4695 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4696#endif
4697 ST0 = 0.0 / 0.0; /* NaN */
4698 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4699 return;
4700 }
4701
4702 fpsrcop = ST0;
4703 fptemp = ST1;
4704 fpsrcop1.d = fpsrcop;
4705 fptemp1.d = fptemp;
4706 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4707
4708 if (expdif < 0) {
4709 /* optimisation? taken from the AMD docs */
4710 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4711 /* ST0 is unchanged */
4712 return;
4713 }
4714
4715 if (expdif < 53) {
4716 dblq = fpsrcop / fptemp;
4717 /* round dblq towards nearest integer */
4718 dblq = rint(dblq);
4719 ST0 = fpsrcop - fptemp * dblq;
4720
4721 /* convert dblq to q by truncating towards zero */
4722 if (dblq < 0.0)
4723 q = (signed long long int)(-dblq);
4724 else
4725 q = (signed long long int)dblq;
4726
4727 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4728 /* (C0,C3,C1) <-- (q2,q1,q0) */
4729 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4730 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4731 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4732 } else {
4733 env->fpus |= 0x400; /* C2 <-- 1 */
4734 fptemp = pow(2.0, expdif - 50);
4735 fpsrcop = (ST0 / ST1) / fptemp;
4736 /* fpsrcop = integer obtained by chopping */
4737 fpsrcop = (fpsrcop < 0.0) ?
4738 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4739 ST0 -= (ST1 * fpsrcop * fptemp);
4740 }
4741}
4742
4743void helper_fprem(void)
4744{
4745 CPU86_LDouble dblq, fpsrcop, fptemp;
4746 CPU86_LDoubleU fpsrcop1, fptemp1;
4747 int expdif;
4748 signed long long int q;
4749
4750#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4751 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4752#else
4753 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4754#endif
4755 ST0 = 0.0 / 0.0; /* NaN */
4756 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4757 return;
4758 }
4759
4760 fpsrcop = (CPU86_LDouble)ST0;
4761 fptemp = (CPU86_LDouble)ST1;
4762 fpsrcop1.d = fpsrcop;
4763 fptemp1.d = fptemp;
4764 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4765
4766 if (expdif < 0) {
4767 /* optimisation? taken from the AMD docs */
4768 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4769 /* ST0 is unchanged */
4770 return;
4771 }
4772
4773 if ( expdif < 53 ) {
4774 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4775 /* round dblq towards zero */
4776 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4777 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4778
4779 /* convert dblq to q by truncating towards zero */
4780 if (dblq < 0.0)
4781 q = (signed long long int)(-dblq);
4782 else
4783 q = (signed long long int)dblq;
4784
4785 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4786 /* (C0,C3,C1) <-- (q2,q1,q0) */
4787 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4788 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4789 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4790 } else {
4791 int N = 32 + (expdif % 32); /* as per AMD docs */
4792 env->fpus |= 0x400; /* C2 <-- 1 */
4793 fptemp = pow(2.0, (double)(expdif - N));
4794 fpsrcop = (ST0 / ST1) / fptemp;
4795 /* fpsrcop = integer obtained by chopping */
4796 fpsrcop = (fpsrcop < 0.0) ?
4797 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4798 ST0 -= (ST1 * fpsrcop * fptemp);
4799 }
4800}
4801
4802void helper_fyl2xp1(void)
4803{
4804 CPU86_LDouble fptemp;
4805
4806 fptemp = ST0;
4807 if ((fptemp+1.0)>0.0) {
4808 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4809 ST1 *= fptemp;
4810 fpop();
4811 } else {
4812 env->fpus &= (~0x4700);
4813 env->fpus |= 0x400;
4814 }
4815}
4816
4817void helper_fsqrt(void)
4818{
4819 CPU86_LDouble fptemp;
4820
4821 fptemp = ST0;
4822 if (fptemp<0.0) {
4823 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4824 env->fpus |= 0x400;
4825 }
4826 ST0 = sqrt(fptemp);
4827}
4828
4829void helper_fsincos(void)
4830{
4831 CPU86_LDouble fptemp;
4832
4833 fptemp = ST0;
4834 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4835 env->fpus |= 0x400;
4836 } else {
4837 ST0 = sin(fptemp);
4838 fpush();
4839 ST0 = cos(fptemp);
4840 env->fpus &= (~0x400); /* C2 <-- 0 */
4841 /* the above code is for |arg| < 2**63 only */
4842 }
4843}
4844
4845void helper_frndint(void)
4846{
4847 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4848}
4849
4850void helper_fscale(void)
4851{
4852 ST0 = ldexp (ST0, (int)(ST1));
4853}
4854
4855void helper_fsin(void)
4856{
4857 CPU86_LDouble fptemp;
4858
4859 fptemp = ST0;
4860 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4861 env->fpus |= 0x400;
4862 } else {
4863 ST0 = sin(fptemp);
4864 env->fpus &= (~0x400); /* C2 <-- 0 */
4865 /* the above code is for |arg| < 2**53 only */
4866 }
4867}
4868
4869void helper_fcos(void)
4870{
4871 CPU86_LDouble fptemp;
4872
4873 fptemp = ST0;
4874 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4875 env->fpus |= 0x400;
4876 } else {
4877 ST0 = cos(fptemp);
4878 env->fpus &= (~0x400); /* C2 <-- 0 */
4879 /* the above code is for |arg5 < 2**63 only */
4880 }
4881}
4882
4883void helper_fxam_ST0(void)
4884{
4885 CPU86_LDoubleU temp;
4886 int expdif;
4887
4888 temp.d = ST0;
4889
4890 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4891 if (SIGND(temp))
4892 env->fpus |= 0x200; /* C1 <-- 1 */
4893
4894 /* XXX: test fptags too */
4895 expdif = EXPD(temp);
4896 if (expdif == MAXEXPD) {
4897#ifdef USE_X86LDOUBLE
4898 if (MANTD(temp) == 0x8000000000000000ULL)
4899#else
4900 if (MANTD(temp) == 0)
4901#endif
4902 env->fpus |= 0x500 /*Infinity*/;
4903 else
4904 env->fpus |= 0x100 /*NaN*/;
4905 } else if (expdif == 0) {
4906 if (MANTD(temp) == 0)
4907 env->fpus |= 0x4000 /*Zero*/;
4908 else
4909 env->fpus |= 0x4400 /*Denormal*/;
4910 } else {
4911 env->fpus |= 0x400;
4912 }
4913}
4914
4915void helper_fstenv(target_ulong ptr, int data32)
4916{
4917 int fpus, fptag, exp, i;
4918 uint64_t mant;
4919 CPU86_LDoubleU tmp;
4920
4921 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4922 fptag = 0;
4923 for (i=7; i>=0; i--) {
4924 fptag <<= 2;
4925 if (env->fptags[i]) {
4926 fptag |= 3;
4927 } else {
4928 tmp.d = env->fpregs[i].d;
4929 exp = EXPD(tmp);
4930 mant = MANTD(tmp);
4931 if (exp == 0 && mant == 0) {
4932 /* zero */
4933 fptag |= 1;
4934 } else if (exp == 0 || exp == MAXEXPD
4935#ifdef USE_X86LDOUBLE
4936 || (mant & (1LL << 63)) == 0
4937#endif
4938 ) {
4939 /* NaNs, infinity, denormal */
4940 fptag |= 2;
4941 }
4942 }
4943 }
4944 if (data32) {
4945 /* 32 bit */
4946 stl(ptr, env->fpuc);
4947 stl(ptr + 4, fpus);
4948 stl(ptr + 8, fptag);
4949 stl(ptr + 12, 0); /* fpip */
4950 stl(ptr + 16, 0); /* fpcs */
4951 stl(ptr + 20, 0); /* fpoo */
4952 stl(ptr + 24, 0); /* fpos */
4953 } else {
4954 /* 16 bit */
4955 stw(ptr, env->fpuc);
4956 stw(ptr + 2, fpus);
4957 stw(ptr + 4, fptag);
4958 stw(ptr + 6, 0);
4959 stw(ptr + 8, 0);
4960 stw(ptr + 10, 0);
4961 stw(ptr + 12, 0);
4962 }
4963}
4964
4965void helper_fldenv(target_ulong ptr, int data32)
4966{
4967 int i, fpus, fptag;
4968
4969 if (data32) {
4970 env->fpuc = lduw(ptr);
4971 fpus = lduw(ptr + 4);
4972 fptag = lduw(ptr + 8);
4973 }
4974 else {
4975 env->fpuc = lduw(ptr);
4976 fpus = lduw(ptr + 2);
4977 fptag = lduw(ptr + 4);
4978 }
4979 env->fpstt = (fpus >> 11) & 7;
4980 env->fpus = fpus & ~0x3800;
4981 for(i = 0;i < 8; i++) {
4982 env->fptags[i] = ((fptag & 3) == 3);
4983 fptag >>= 2;
4984 }
4985}
4986
4987void helper_fsave(target_ulong ptr, int data32)
4988{
4989 CPU86_LDouble tmp;
4990 int i;
4991
4992 helper_fstenv(ptr, data32);
4993
4994 ptr += (14 << data32);
4995 for(i = 0;i < 8; i++) {
4996 tmp = ST(i);
4997 helper_fstt(tmp, ptr);
4998 ptr += 10;
4999 }
5000
5001 /* fninit */
5002 env->fpus = 0;
5003 env->fpstt = 0;
5004 env->fpuc = 0x37f;
5005 env->fptags[0] = 1;
5006 env->fptags[1] = 1;
5007 env->fptags[2] = 1;
5008 env->fptags[3] = 1;
5009 env->fptags[4] = 1;
5010 env->fptags[5] = 1;
5011 env->fptags[6] = 1;
5012 env->fptags[7] = 1;
5013}
5014
5015void helper_frstor(target_ulong ptr, int data32)
5016{
5017 CPU86_LDouble tmp;
5018 int i;
5019
5020 helper_fldenv(ptr, data32);
5021 ptr += (14 << data32);
5022
5023 for(i = 0;i < 8; i++) {
5024 tmp = helper_fldt(ptr);
5025 ST(i) = tmp;
5026 ptr += 10;
5027 }
5028}
5029
5030void helper_fxsave(target_ulong ptr, int data64)
5031{
5032 int fpus, fptag, i, nb_xmm_regs;
5033 CPU86_LDouble tmp;
5034 target_ulong addr;
5035
5036 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5037 fptag = 0;
5038 for(i = 0; i < 8; i++) {
5039 fptag |= (env->fptags[i] << i);
5040 }
5041 stw(ptr, env->fpuc);
5042 stw(ptr + 2, fpus);
5043 stw(ptr + 4, fptag ^ 0xff);
5044#ifdef TARGET_X86_64
5045 if (data64) {
5046 stq(ptr + 0x08, 0); /* rip */
5047 stq(ptr + 0x10, 0); /* rdp */
5048 } else
5049#endif
5050 {
5051 stl(ptr + 0x08, 0); /* eip */
5052 stl(ptr + 0x0c, 0); /* sel */
5053 stl(ptr + 0x10, 0); /* dp */
5054 stl(ptr + 0x14, 0); /* sel */
5055 }
5056
5057 addr = ptr + 0x20;
5058 for(i = 0;i < 8; i++) {
5059 tmp = ST(i);
5060 helper_fstt(tmp, addr);
5061 addr += 16;
5062 }
5063
5064 if (env->cr[4] & CR4_OSFXSR_MASK) {
5065 /* XXX: finish it */
5066 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5067 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5068 if (env->hflags & HF_CS64_MASK)
5069 nb_xmm_regs = 16;
5070 else
5071 nb_xmm_regs = 8;
5072 addr = ptr + 0xa0;
5073 for(i = 0; i < nb_xmm_regs; i++) {
5074 stq(addr, env->xmm_regs[i].XMM_Q(0));
5075 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5076 addr += 16;
5077 }
5078 }
5079}
5080
5081void helper_fxrstor(target_ulong ptr, int data64)
5082{
5083 int i, fpus, fptag, nb_xmm_regs;
5084 CPU86_LDouble tmp;
5085 target_ulong addr;
5086
5087 env->fpuc = lduw(ptr);
5088 fpus = lduw(ptr + 2);
5089 fptag = lduw(ptr + 4);
5090 env->fpstt = (fpus >> 11) & 7;
5091 env->fpus = fpus & ~0x3800;
5092 fptag ^= 0xff;
5093 for(i = 0;i < 8; i++) {
5094 env->fptags[i] = ((fptag >> i) & 1);
5095 }
5096
5097 addr = ptr + 0x20;
5098 for(i = 0;i < 8; i++) {
5099 tmp = helper_fldt(addr);
5100 ST(i) = tmp;
5101 addr += 16;
5102 }
5103
5104 if (env->cr[4] & CR4_OSFXSR_MASK) {
5105 /* XXX: finish it */
5106 env->mxcsr = ldl(ptr + 0x18);
5107 //ldl(ptr + 0x1c);
5108 if (env->hflags & HF_CS64_MASK)
5109 nb_xmm_regs = 16;
5110 else
5111 nb_xmm_regs = 8;
5112 addr = ptr + 0xa0;
5113 for(i = 0; i < nb_xmm_regs; i++) {
5114#if !defined(VBOX) || __GNUC__ < 4
5115 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5116 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5117#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5118# if 1
5119 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5120 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5121 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5122 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5123# else
5124 /* this works fine on Mac OS X, gcc 4.0.1 */
5125 uint64_t u64 = ldq(addr);
5126 env->xmm_regs[i].XMM_Q(0);
5127 u64 = ldq(addr + 4);
5128 env->xmm_regs[i].XMM_Q(1) = u64;
5129# endif
5130#endif
5131 addr += 16;
5132 }
5133 }
5134}
5135
5136#ifndef USE_X86LDOUBLE
5137
5138void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5139{
5140 CPU86_LDoubleU temp;
5141 int e;
5142
5143 temp.d = f;
5144 /* mantissa */
5145 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5146 /* exponent + sign */
5147 e = EXPD(temp) - EXPBIAS + 16383;
5148 e |= SIGND(temp) >> 16;
5149 *pexp = e;
5150}
5151
5152CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5153{
5154 CPU86_LDoubleU temp;
5155 int e;
5156 uint64_t ll;
5157
5158 /* XXX: handle overflow ? */
5159 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5160 e |= (upper >> 4) & 0x800; /* sign */
5161 ll = (mant >> 11) & ((1LL << 52) - 1);
5162#ifdef __arm__
5163 temp.l.upper = (e << 20) | (ll >> 32);
5164 temp.l.lower = ll;
5165#else
5166 temp.ll = ll | ((uint64_t)e << 52);
5167#endif
5168 return temp.d;
5169}
5170
5171#else
5172
5173void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5174{
5175 CPU86_LDoubleU temp;
5176
5177 temp.d = f;
5178 *pmant = temp.l.lower;
5179 *pexp = temp.l.upper;
5180}
5181
5182CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5183{
5184 CPU86_LDoubleU temp;
5185
5186 temp.l.upper = upper;
5187 temp.l.lower = mant;
5188 return temp.d;
5189}
5190#endif
5191
5192#ifdef TARGET_X86_64
5193
5194//#define DEBUG_MULDIV
5195
5196static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5197{
5198 *plow += a;
5199 /* carry test */
5200 if (*plow < a)
5201 (*phigh)++;
5202 *phigh += b;
5203}
5204
5205static void neg128(uint64_t *plow, uint64_t *phigh)
5206{
5207 *plow = ~ *plow;
5208 *phigh = ~ *phigh;
5209 add128(plow, phigh, 1, 0);
5210}
5211
5212/* return TRUE if overflow */
5213static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5214{
5215 uint64_t q, r, a1, a0;
5216 int i, qb, ab;
5217
5218 a0 = *plow;
5219 a1 = *phigh;
5220 if (a1 == 0) {
5221 q = a0 / b;
5222 r = a0 % b;
5223 *plow = q;
5224 *phigh = r;
5225 } else {
5226 if (a1 >= b)
5227 return 1;
5228 /* XXX: use a better algorithm */
5229 for(i = 0; i < 64; i++) {
5230 ab = a1 >> 63;
5231 a1 = (a1 << 1) | (a0 >> 63);
5232 if (ab || a1 >= b) {
5233 a1 -= b;
5234 qb = 1;
5235 } else {
5236 qb = 0;
5237 }
5238 a0 = (a0 << 1) | qb;
5239 }
5240#if defined(DEBUG_MULDIV)
5241 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5242 *phigh, *plow, b, a0, a1);
5243#endif
5244 *plow = a0;
5245 *phigh = a1;
5246 }
5247 return 0;
5248}
5249
5250/* return TRUE if overflow */
5251static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5252{
5253 int sa, sb;
5254 sa = ((int64_t)*phigh < 0);
5255 if (sa)
5256 neg128(plow, phigh);
5257 sb = (b < 0);
5258 if (sb)
5259 b = -b;
5260 if (div64(plow, phigh, b) != 0)
5261 return 1;
5262 if (sa ^ sb) {
5263 if (*plow > (1ULL << 63))
5264 return 1;
5265 *plow = - *plow;
5266 } else {
5267 if (*plow >= (1ULL << 63))
5268 return 1;
5269 }
5270 if (sa)
5271 *phigh = - *phigh;
5272 return 0;
5273}
5274
5275void helper_mulq_EAX_T0(target_ulong t0)
5276{
5277 uint64_t r0, r1;
5278
5279 mulu64(&r0, &r1, EAX, t0);
5280 EAX = r0;
5281 EDX = r1;
5282 CC_DST = r0;
5283 CC_SRC = r1;
5284}
5285
5286void helper_imulq_EAX_T0(target_ulong t0)
5287{
5288 uint64_t r0, r1;
5289
5290 muls64(&r0, &r1, EAX, t0);
5291 EAX = r0;
5292 EDX = r1;
5293 CC_DST = r0;
5294 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5295}
5296
5297target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5298{
5299 uint64_t r0, r1;
5300
5301 muls64(&r0, &r1, t0, t1);
5302 CC_DST = r0;
5303 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5304 return r0;
5305}
5306
5307void helper_divq_EAX(target_ulong t0)
5308{
5309 uint64_t r0, r1;
5310 if (t0 == 0) {
5311 raise_exception(EXCP00_DIVZ);
5312 }
5313 r0 = EAX;
5314 r1 = EDX;
5315 if (div64(&r0, &r1, t0))
5316 raise_exception(EXCP00_DIVZ);
5317 EAX = r0;
5318 EDX = r1;
5319}
5320
5321void helper_idivq_EAX(target_ulong t0)
5322{
5323 uint64_t r0, r1;
5324 if (t0 == 0) {
5325 raise_exception(EXCP00_DIVZ);
5326 }
5327 r0 = EAX;
5328 r1 = EDX;
5329 if (idiv64(&r0, &r1, t0))
5330 raise_exception(EXCP00_DIVZ);
5331 EAX = r0;
5332 EDX = r1;
5333}
5334#endif
5335
5336static void do_hlt(void)
5337{
5338 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5339 env->halted = 1;
5340 env->exception_index = EXCP_HLT;
5341 cpu_loop_exit();
5342}
5343
5344void helper_hlt(int next_eip_addend)
5345{
5346 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5347 EIP += next_eip_addend;
5348
5349 do_hlt();
5350}
5351
5352void helper_monitor(target_ulong ptr)
5353{
5354 if ((uint32_t)ECX != 0)
5355 raise_exception(EXCP0D_GPF);
5356 /* XXX: store address ? */
5357 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5358}
5359
5360void helper_mwait(int next_eip_addend)
5361{
5362 if ((uint32_t)ECX != 0)
5363 raise_exception(EXCP0D_GPF);
5364#ifdef VBOX
5365 helper_hlt(next_eip_addend);
5366#else
5367 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5368 EIP += next_eip_addend;
5369
5370 /* XXX: not complete but not completely erroneous */
5371 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5372 /* more than one CPU: do not sleep because another CPU may
5373 wake this one */
5374 } else {
5375 do_hlt();
5376 }
5377#endif
5378}
5379
5380void helper_debug(void)
5381{
5382 env->exception_index = EXCP_DEBUG;
5383 cpu_loop_exit();
5384}
5385
5386void helper_raise_interrupt(int intno, int next_eip_addend)
5387{
5388 raise_interrupt(intno, 1, 0, next_eip_addend);
5389}
5390
5391void helper_raise_exception(int exception_index)
5392{
5393 raise_exception(exception_index);
5394}
5395
5396void helper_cli(void)
5397{
5398 env->eflags &= ~IF_MASK;
5399}
5400
5401void helper_sti(void)
5402{
5403 env->eflags |= IF_MASK;
5404}
5405
5406#ifdef VBOX
5407void helper_cli_vme(void)
5408{
5409 env->eflags &= ~VIF_MASK;
5410}
5411
5412void helper_sti_vme(void)
5413{
5414 /* First check, then change eflags according to the AMD manual */
5415 if (env->eflags & VIP_MASK) {
5416 raise_exception(EXCP0D_GPF);
5417 }
5418 env->eflags |= VIF_MASK;
5419}
5420#endif
5421
5422#if 0
5423/* vm86plus instructions */
5424void helper_cli_vm(void)
5425{
5426 env->eflags &= ~VIF_MASK;
5427}
5428
5429void helper_sti_vm(void)
5430{
5431 env->eflags |= VIF_MASK;
5432 if (env->eflags & VIP_MASK) {
5433 raise_exception(EXCP0D_GPF);
5434 }
5435}
5436#endif
5437
5438void helper_set_inhibit_irq(void)
5439{
5440 env->hflags |= HF_INHIBIT_IRQ_MASK;
5441}
5442
5443void helper_reset_inhibit_irq(void)
5444{
5445 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5446}
5447
5448void helper_boundw(target_ulong a0, int v)
5449{
5450 int low, high;
5451 low = ldsw(a0);
5452 high = ldsw(a0 + 2);
5453 v = (int16_t)v;
5454 if (v < low || v > high) {
5455 raise_exception(EXCP05_BOUND);
5456 }
5457 FORCE_RET();
5458}
5459
5460void helper_boundl(target_ulong a0, int v)
5461{
5462 int low, high;
5463 low = ldl(a0);
5464 high = ldl(a0 + 4);
5465 if (v < low || v > high) {
5466 raise_exception(EXCP05_BOUND);
5467 }
5468 FORCE_RET();
5469}
5470
5471static float approx_rsqrt(float a)
5472{
5473 return 1.0 / sqrt(a);
5474}
5475
5476static float approx_rcp(float a)
5477{
5478 return 1.0 / a;
5479}
5480
5481#if !defined(CONFIG_USER_ONLY)
5482
5483#define MMUSUFFIX _mmu
5484
5485#define SHIFT 0
5486#include "softmmu_template.h"
5487
5488#define SHIFT 1
5489#include "softmmu_template.h"
5490
5491#define SHIFT 2
5492#include "softmmu_template.h"
5493
5494#define SHIFT 3
5495#include "softmmu_template.h"
5496
5497#endif
5498
5499#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5500/* This code assumes real physical address always fit into host CPU reg,
5501 which is wrong in general, but true for our current use cases. */
5502RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5503{
5504 return remR3PhysReadS8(addr);
5505}
5506RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5507{
5508 return remR3PhysReadU8(addr);
5509}
5510void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5511{
5512 remR3PhysWriteU8(addr, val);
5513}
5514RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5515{
5516 return remR3PhysReadS16(addr);
5517}
5518RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5519{
5520 return remR3PhysReadU16(addr);
5521}
5522void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5523{
5524 remR3PhysWriteU16(addr, val);
5525}
5526RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5527{
5528 return remR3PhysReadS32(addr);
5529}
5530RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5531{
5532 return remR3PhysReadU32(addr);
5533}
5534void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5535{
5536 remR3PhysWriteU32(addr, val);
5537}
5538uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5539{
5540 return remR3PhysReadU64(addr);
5541}
5542void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5543{
5544 remR3PhysWriteU64(addr, val);
5545}
5546#endif
5547
5548/* try to fill the TLB and return an exception if error. If retaddr is
5549 NULL, it means that the function was called in C code (i.e. not
5550 from generated code or from helper.c) */
5551/* XXX: fix it to restore all registers */
5552void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5553{
5554 TranslationBlock *tb;
5555 int ret;
5556 unsigned long pc;
5557 CPUX86State *saved_env;
5558
5559 /* XXX: hack to restore env in all cases, even if not called from
5560 generated code */
5561 saved_env = env;
5562 env = cpu_single_env;
5563
5564 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5565 if (ret) {
5566 if (retaddr) {
5567 /* now we have a real cpu fault */
5568 pc = (unsigned long)retaddr;
5569 tb = tb_find_pc(pc);
5570 if (tb) {
5571 /* the PC is inside the translated code. It means that we have
5572 a virtual CPU fault */
5573 cpu_restore_state(tb, env, pc, NULL);
5574 }
5575 }
5576 raise_exception_err(env->exception_index, env->error_code);
5577 }
5578 env = saved_env;
5579}
5580
5581#ifdef VBOX
5582
5583/**
5584 * Correctly computes the eflags.
5585 * @returns eflags.
5586 * @param env1 CPU environment.
5587 */
5588uint32_t raw_compute_eflags(CPUX86State *env1)
5589{
5590 CPUX86State *savedenv = env;
5591 uint32_t efl;
5592 env = env1;
5593 efl = compute_eflags();
5594 env = savedenv;
5595 return efl;
5596}
5597
5598/**
5599 * Reads byte from virtual address in guest memory area.
5600 * XXX: is it working for any addresses? swapped out pages?
5601 * @returns readed data byte.
5602 * @param env1 CPU environment.
5603 * @param pvAddr GC Virtual address.
5604 */
5605uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5606{
5607 CPUX86State *savedenv = env;
5608 uint8_t u8;
5609 env = env1;
5610 u8 = ldub_kernel(addr);
5611 env = savedenv;
5612 return u8;
5613}
5614
5615/**
5616 * Reads byte from virtual address in guest memory area.
5617 * XXX: is it working for any addresses? swapped out pages?
5618 * @returns readed data byte.
5619 * @param env1 CPU environment.
5620 * @param pvAddr GC Virtual address.
5621 */
5622uint16_t read_word(CPUX86State *env1, target_ulong addr)
5623{
5624 CPUX86State *savedenv = env;
5625 uint16_t u16;
5626 env = env1;
5627 u16 = lduw_kernel(addr);
5628 env = savedenv;
5629 return u16;
5630}
5631
5632/**
5633 * Reads byte from virtual address in guest memory area.
5634 * XXX: is it working for any addresses? swapped out pages?
5635 * @returns readed data byte.
5636 * @param env1 CPU environment.
5637 * @param pvAddr GC Virtual address.
5638 */
5639uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5640{
5641 CPUX86State *savedenv = env;
5642 uint32_t u32;
5643 env = env1;
5644 u32 = ldl_kernel(addr);
5645 env = savedenv;
5646 return u32;
5647}
5648
5649/**
5650 * Writes byte to virtual address in guest memory area.
5651 * XXX: is it working for any addresses? swapped out pages?
5652 * @returns readed data byte.
5653 * @param env1 CPU environment.
5654 * @param pvAddr GC Virtual address.
5655 * @param val byte value
5656 */
5657void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5658{
5659 CPUX86State *savedenv = env;
5660 env = env1;
5661 stb(addr, val);
5662 env = savedenv;
5663}
5664
5665void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5666{
5667 CPUX86State *savedenv = env;
5668 env = env1;
5669 stw(addr, val);
5670 env = savedenv;
5671}
5672
5673void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5674{
5675 CPUX86State *savedenv = env;
5676 env = env1;
5677 stl(addr, val);
5678 env = savedenv;
5679}
5680
5681/**
5682 * Correctly loads selector into segment register with updating internal
5683 * qemu data/caches.
5684 * @param env1 CPU environment.
5685 * @param seg_reg Segment register.
5686 * @param selector Selector to load.
5687 */
5688void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5689{
5690 CPUX86State *savedenv = env;
5691 jmp_buf old_buf;
5692
5693 env = env1;
5694
5695 if ( env->eflags & X86_EFL_VM
5696 || !(env->cr[0] & X86_CR0_PE))
5697 {
5698 load_seg_vm(seg_reg, selector);
5699
5700 env = savedenv;
5701
5702 /* Successful sync. */
5703 env1->segs[seg_reg].newselector = 0;
5704 }
5705 else
5706 {
5707 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5708 time critical - let's not do that */
5709#ifdef FORCE_SEGMENT_SYNC
5710 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5711#endif
5712 if (setjmp(env1->jmp_env) == 0)
5713 {
5714 if (seg_reg == R_CS)
5715 {
5716 uint32_t e1, e2;
5717 e1 = e2 = 0;
5718 load_segment(&e1, &e2, selector);
5719 cpu_x86_load_seg_cache(env, R_CS, selector,
5720 get_seg_base(e1, e2),
5721 get_seg_limit(e1, e2),
5722 e2);
5723 }
5724 else
5725 helper_load_seg(seg_reg, selector);
5726 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5727 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5728
5729 env = savedenv;
5730
5731 /* Successful sync. */
5732 env1->segs[seg_reg].newselector = 0;
5733 }
5734 else
5735 {
5736 env = savedenv;
5737
5738 /* Postpone sync until the guest uses the selector. */
5739 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5740 env1->segs[seg_reg].newselector = selector;
5741 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5742 env1->exception_index = -1;
5743 env1->error_code = 0;
5744 env1->old_exception = -1;
5745 }
5746#ifdef FORCE_SEGMENT_SYNC
5747 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5748#endif
5749 }
5750
5751}
5752
5753DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5754{
5755 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5756}
5757
5758
5759int emulate_single_instr(CPUX86State *env1)
5760{
5761 TranslationBlock *tb;
5762 TranslationBlock *current;
5763 int flags;
5764 uint8_t *tc_ptr;
5765 target_ulong old_eip;
5766
5767 /* ensures env is loaded! */
5768 CPUX86State *savedenv = env;
5769 env = env1;
5770
5771 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5772
5773 current = env->current_tb;
5774 env->current_tb = NULL;
5775 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5776
5777 /*
5778 * Translate only one instruction.
5779 */
5780 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5781 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5782 env->segs[R_CS].base, flags, 0);
5783
5784 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5785
5786
5787 /* tb_link_phys: */
5788 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5789 tb->jmp_next[0] = NULL;
5790 tb->jmp_next[1] = NULL;
5791 Assert(tb->jmp_next[0] == NULL);
5792 Assert(tb->jmp_next[1] == NULL);
5793 if (tb->tb_next_offset[0] != 0xffff)
5794 tb_reset_jump(tb, 0);
5795 if (tb->tb_next_offset[1] != 0xffff)
5796 tb_reset_jump(tb, 1);
5797
5798 /*
5799 * Execute it using emulation
5800 */
5801 old_eip = env->eip;
5802 env->current_tb = tb;
5803
5804 /*
5805 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5806 * perhaps not a very safe hack
5807 */
5808 while(old_eip == env->eip)
5809 {
5810 tc_ptr = tb->tc_ptr;
5811
5812#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5813 int fake_ret;
5814 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5815#else
5816 tcg_qemu_tb_exec(tc_ptr);
5817#endif
5818 /*
5819 * Exit once we detect an external interrupt and interrupts are enabled
5820 */
5821 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5822 ( (env->eflags & IF_MASK) &&
5823 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5824 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5825 {
5826 break;
5827 }
5828 }
5829 env->current_tb = current;
5830
5831 tb_phys_invalidate(tb, -1);
5832 tb_free(tb);
5833/*
5834 Assert(tb->tb_next_offset[0] == 0xffff);
5835 Assert(tb->tb_next_offset[1] == 0xffff);
5836 Assert(tb->tb_next[0] == 0xffff);
5837 Assert(tb->tb_next[1] == 0xffff);
5838 Assert(tb->jmp_next[0] == NULL);
5839 Assert(tb->jmp_next[1] == NULL);
5840 Assert(tb->jmp_first == NULL); */
5841
5842 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5843
5844 /*
5845 * Execute the next instruction when we encounter instruction fusing.
5846 */
5847 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5848 {
5849 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5850 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5851 emulate_single_instr(env);
5852 }
5853
5854 env = savedenv;
5855 return 0;
5856}
5857
5858/**
5859 * Correctly loads a new ldtr selector.
5860 *
5861 * @param env1 CPU environment.
5862 * @param selector Selector to load.
5863 */
5864void sync_ldtr(CPUX86State *env1, int selector)
5865{
5866 CPUX86State *saved_env = env;
5867 if (setjmp(env1->jmp_env) == 0)
5868 {
5869 env = env1;
5870 helper_lldt(selector);
5871 env = saved_env;
5872 }
5873 else
5874 {
5875 env = saved_env;
5876#ifdef VBOX_STRICT
5877 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5878#endif
5879 }
5880}
5881
5882int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5883 uint32_t *esp_ptr, int dpl)
5884{
5885 int type, index, shift;
5886
5887 CPUX86State *savedenv = env;
5888 env = env1;
5889
5890 if (!(env->tr.flags & DESC_P_MASK))
5891 cpu_abort(env, "invalid tss");
5892 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5893 if ((type & 7) != 1)
5894 cpu_abort(env, "invalid tss type %d", type);
5895 shift = type >> 3;
5896 index = (dpl * 4 + 2) << shift;
5897 if (index + (4 << shift) - 1 > env->tr.limit)
5898 {
5899 env = savedenv;
5900 return 0;
5901 }
5902 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5903
5904 if (shift == 0) {
5905 *esp_ptr = lduw_kernel(env->tr.base + index);
5906 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5907 } else {
5908 *esp_ptr = ldl_kernel(env->tr.base + index);
5909 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5910 }
5911
5912 env = savedenv;
5913 return 1;
5914}
5915
5916//*****************************************************************************
5917// Needs to be at the bottom of the file (overriding macros)
5918
5919#ifndef VBOX
5920static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5921#else /* VBOX */
5922DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5923#endif /* VBOX */
5924{
5925 return *(CPU86_LDouble *)ptr;
5926}
5927
5928#ifndef VBOX
5929static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5930#else /* VBOX */
5931DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5932#endif /* VBOX */
5933{
5934 *(CPU86_LDouble *)ptr = f;
5935}
5936
5937#undef stw
5938#undef stl
5939#undef stq
5940#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5941#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5942#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5943
5944//*****************************************************************************
5945void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5946{
5947 int fpus, fptag, i, nb_xmm_regs;
5948 CPU86_LDouble tmp;
5949 uint8_t *addr;
5950 int data64 = !!(env->hflags & HF_LMA_MASK);
5951
5952 if (env->cpuid_features & CPUID_FXSR)
5953 {
5954 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5955 fptag = 0;
5956 for(i = 0; i < 8; i++) {
5957 fptag |= (env->fptags[i] << i);
5958 }
5959 stw(ptr, env->fpuc);
5960 stw(ptr + 2, fpus);
5961 stw(ptr + 4, fptag ^ 0xff);
5962
5963 addr = ptr + 0x20;
5964 for(i = 0;i < 8; i++) {
5965 tmp = ST(i);
5966 helper_fstt_raw(tmp, addr);
5967 addr += 16;
5968 }
5969
5970 if (env->cr[4] & CR4_OSFXSR_MASK) {
5971 /* XXX: finish it */
5972 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5973 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5974 nb_xmm_regs = 8 << data64;
5975 addr = ptr + 0xa0;
5976 for(i = 0; i < nb_xmm_regs; i++) {
5977#if __GNUC__ < 4
5978 stq(addr, env->xmm_regs[i].XMM_Q(0));
5979 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5980#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5981 stl(addr, env->xmm_regs[i].XMM_L(0));
5982 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5983 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5984 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5985#endif
5986 addr += 16;
5987 }
5988 }
5989 }
5990 else
5991 {
5992 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5993 int fptag;
5994
5995 fp->FCW = env->fpuc;
5996 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5997 fptag = 0;
5998 for (i=7; i>=0; i--) {
5999 fptag <<= 2;
6000 if (env->fptags[i]) {
6001 fptag |= 3;
6002 } else {
6003 /* the FPU automatically computes it */
6004 }
6005 }
6006 fp->FTW = fptag;
6007
6008 for(i = 0;i < 8; i++) {
6009 tmp = ST(i);
6010 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6011 }
6012 }
6013}
6014
6015//*****************************************************************************
6016#undef lduw
6017#undef ldl
6018#undef ldq
6019#define lduw(a) *(uint16_t *)(a)
6020#define ldl(a) *(uint32_t *)(a)
6021#define ldq(a) *(uint64_t *)(a)
6022//*****************************************************************************
6023void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6024{
6025 int i, fpus, fptag, nb_xmm_regs;
6026 CPU86_LDouble tmp;
6027 uint8_t *addr;
6028 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6029
6030 if (env->cpuid_features & CPUID_FXSR)
6031 {
6032 env->fpuc = lduw(ptr);
6033 fpus = lduw(ptr + 2);
6034 fptag = lduw(ptr + 4);
6035 env->fpstt = (fpus >> 11) & 7;
6036 env->fpus = fpus & ~0x3800;
6037 fptag ^= 0xff;
6038 for(i = 0;i < 8; i++) {
6039 env->fptags[i] = ((fptag >> i) & 1);
6040 }
6041
6042 addr = ptr + 0x20;
6043 for(i = 0;i < 8; i++) {
6044 tmp = helper_fldt_raw(addr);
6045 ST(i) = tmp;
6046 addr += 16;
6047 }
6048
6049 if (env->cr[4] & CR4_OSFXSR_MASK) {
6050 /* XXX: finish it, endianness */
6051 env->mxcsr = ldl(ptr + 0x18);
6052 //ldl(ptr + 0x1c);
6053 nb_xmm_regs = 8 << data64;
6054 addr = ptr + 0xa0;
6055 for(i = 0; i < nb_xmm_regs; i++) {
6056#if HC_ARCH_BITS == 32
6057 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6058 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6059 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6060 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6061 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6062#else
6063 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6064 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6065#endif
6066 addr += 16;
6067 }
6068 }
6069 }
6070 else
6071 {
6072 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6073 int fptag, j;
6074
6075 env->fpuc = fp->FCW;
6076 env->fpstt = (fp->FSW >> 11) & 7;
6077 env->fpus = fp->FSW & ~0x3800;
6078 fptag = fp->FTW;
6079 for(i = 0;i < 8; i++) {
6080 env->fptags[i] = ((fptag & 3) == 3);
6081 fptag >>= 2;
6082 }
6083 j = env->fpstt;
6084 for(i = 0;i < 8; i++) {
6085 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6086 ST(i) = tmp;
6087 }
6088 }
6089}
6090//*****************************************************************************
6091//*****************************************************************************
6092
6093#endif /* VBOX */
6094
6095/* Secure Virtual Machine helpers */
6096
6097#if defined(CONFIG_USER_ONLY)
6098
6099void helper_vmrun(int aflag, int next_eip_addend)
6100{
6101}
6102void helper_vmmcall(void)
6103{
6104}
6105void helper_vmload(int aflag)
6106{
6107}
6108void helper_vmsave(int aflag)
6109{
6110}
6111void helper_stgi(void)
6112{
6113}
6114void helper_clgi(void)
6115{
6116}
6117void helper_skinit(void)
6118{
6119}
6120void helper_invlpga(int aflag)
6121{
6122}
6123void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6124{
6125}
6126void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6127{
6128}
6129
6130void helper_svm_check_io(uint32_t port, uint32_t param,
6131 uint32_t next_eip_addend)
6132{
6133}
6134#else
6135
6136#ifndef VBOX
6137static inline void svm_save_seg(target_phys_addr_t addr,
6138#else /* VBOX */
6139DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6140#endif /* VBOX */
6141 const SegmentCache *sc)
6142{
6143 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6144 sc->selector);
6145 stq_phys(addr + offsetof(struct vmcb_seg, base),
6146 sc->base);
6147 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6148 sc->limit);
6149 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6150 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6151}
6152
6153#ifndef VBOX
6154static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6155#else /* VBOX */
6156DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6157#endif /* VBOX */
6158{
6159 unsigned int flags;
6160
6161 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6162 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6163 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6164 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6165 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6166}
6167
6168#ifndef VBOX
6169static inline void svm_load_seg_cache(target_phys_addr_t addr,
6170#else /* VBOX */
6171DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6172#endif /* VBOX */
6173 CPUState *env, int seg_reg)
6174{
6175 SegmentCache sc1, *sc = &sc1;
6176 svm_load_seg(addr, sc);
6177 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6178 sc->base, sc->limit, sc->flags);
6179}
6180
6181void helper_vmrun(int aflag, int next_eip_addend)
6182{
6183 target_ulong addr;
6184 uint32_t event_inj;
6185 uint32_t int_ctl;
6186
6187 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6188
6189 if (aflag == 2)
6190 addr = EAX;
6191 else
6192 addr = (uint32_t)EAX;
6193
6194 if (loglevel & CPU_LOG_TB_IN_ASM)
6195 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6196
6197 env->vm_vmcb = addr;
6198
6199 /* save the current CPU state in the hsave page */
6200 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6201 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6202
6203 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6204 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6205
6206 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6207 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6208 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6209 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6210 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6211 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6212
6213 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6214 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6215
6216 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6217 &env->segs[R_ES]);
6218 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6219 &env->segs[R_CS]);
6220 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6221 &env->segs[R_SS]);
6222 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6223 &env->segs[R_DS]);
6224
6225 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6226 EIP + next_eip_addend);
6227 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6228 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6229
6230 /* load the interception bitmaps so we do not need to access the
6231 vmcb in svm mode */
6232 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6233 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6234 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6235 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6236 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6237 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6238
6239 /* enable intercepts */
6240 env->hflags |= HF_SVMI_MASK;
6241
6242 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6243
6244 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6245 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6246
6247 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6248 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6249
6250 /* clear exit_info_2 so we behave like the real hardware */
6251 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6252
6253 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6254 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6255 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6256 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6257 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6258 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6259 if (int_ctl & V_INTR_MASKING_MASK) {
6260 env->v_tpr = int_ctl & V_TPR_MASK;
6261 env->hflags2 |= HF2_VINTR_MASK;
6262 if (env->eflags & IF_MASK)
6263 env->hflags2 |= HF2_HIF_MASK;
6264 }
6265
6266 cpu_load_efer(env,
6267 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6268 env->eflags = 0;
6269 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6270 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6271 CC_OP = CC_OP_EFLAGS;
6272
6273 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6274 env, R_ES);
6275 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6276 env, R_CS);
6277 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6278 env, R_SS);
6279 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6280 env, R_DS);
6281
6282 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6283 env->eip = EIP;
6284 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6285 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6286 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6287 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6288 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6289
6290 /* FIXME: guest state consistency checks */
6291
6292 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6293 case TLB_CONTROL_DO_NOTHING:
6294 break;
6295 case TLB_CONTROL_FLUSH_ALL_ASID:
6296 /* FIXME: this is not 100% correct but should work for now */
6297 tlb_flush(env, 1);
6298 break;
6299 }
6300
6301 env->hflags2 |= HF2_GIF_MASK;
6302
6303 if (int_ctl & V_IRQ_MASK) {
6304 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6305 }
6306
6307 /* maybe we need to inject an event */
6308 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6309 if (event_inj & SVM_EVTINJ_VALID) {
6310 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6311 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6312 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6313 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6314
6315 if (loglevel & CPU_LOG_TB_IN_ASM)
6316 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6317 /* FIXME: need to implement valid_err */
6318 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6319 case SVM_EVTINJ_TYPE_INTR:
6320 env->exception_index = vector;
6321 env->error_code = event_inj_err;
6322 env->exception_is_int = 0;
6323 env->exception_next_eip = -1;
6324 if (loglevel & CPU_LOG_TB_IN_ASM)
6325 fprintf(logfile, "INTR");
6326 /* XXX: is it always correct ? */
6327 do_interrupt(vector, 0, 0, 0, 1);
6328 break;
6329 case SVM_EVTINJ_TYPE_NMI:
6330 env->exception_index = EXCP02_NMI;
6331 env->error_code = event_inj_err;
6332 env->exception_is_int = 0;
6333 env->exception_next_eip = EIP;
6334 if (loglevel & CPU_LOG_TB_IN_ASM)
6335 fprintf(logfile, "NMI");
6336 cpu_loop_exit();
6337 break;
6338 case SVM_EVTINJ_TYPE_EXEPT:
6339 env->exception_index = vector;
6340 env->error_code = event_inj_err;
6341 env->exception_is_int = 0;
6342 env->exception_next_eip = -1;
6343 if (loglevel & CPU_LOG_TB_IN_ASM)
6344 fprintf(logfile, "EXEPT");
6345 cpu_loop_exit();
6346 break;
6347 case SVM_EVTINJ_TYPE_SOFT:
6348 env->exception_index = vector;
6349 env->error_code = event_inj_err;
6350 env->exception_is_int = 1;
6351 env->exception_next_eip = EIP;
6352 if (loglevel & CPU_LOG_TB_IN_ASM)
6353 fprintf(logfile, "SOFT");
6354 cpu_loop_exit();
6355 break;
6356 }
6357 if (loglevel & CPU_LOG_TB_IN_ASM)
6358 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6359 }
6360}
6361
6362void helper_vmmcall(void)
6363{
6364 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6365 raise_exception(EXCP06_ILLOP);
6366}
6367
6368void helper_vmload(int aflag)
6369{
6370 target_ulong addr;
6371 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6372
6373 if (aflag == 2)
6374 addr = EAX;
6375 else
6376 addr = (uint32_t)EAX;
6377
6378 if (loglevel & CPU_LOG_TB_IN_ASM)
6379 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6380 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6381 env->segs[R_FS].base);
6382
6383 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6384 env, R_FS);
6385 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6386 env, R_GS);
6387 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6388 &env->tr);
6389 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6390 &env->ldt);
6391
6392#ifdef TARGET_X86_64
6393 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6394 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6395 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6396 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6397#endif
6398 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6399 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6400 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6401 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6402}
6403
6404void helper_vmsave(int aflag)
6405{
6406 target_ulong addr;
6407 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6408
6409 if (aflag == 2)
6410 addr = EAX;
6411 else
6412 addr = (uint32_t)EAX;
6413
6414 if (loglevel & CPU_LOG_TB_IN_ASM)
6415 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6416 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6417 env->segs[R_FS].base);
6418
6419 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6420 &env->segs[R_FS]);
6421 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6422 &env->segs[R_GS]);
6423 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6424 &env->tr);
6425 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6426 &env->ldt);
6427
6428#ifdef TARGET_X86_64
6429 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6430 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6431 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6432 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6433#endif
6434 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6435 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6436 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6437 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6438}
6439
6440void helper_stgi(void)
6441{
6442 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6443 env->hflags2 |= HF2_GIF_MASK;
6444}
6445
6446void helper_clgi(void)
6447{
6448 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6449 env->hflags2 &= ~HF2_GIF_MASK;
6450}
6451
6452void helper_skinit(void)
6453{
6454 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6455 /* XXX: not implemented */
6456 raise_exception(EXCP06_ILLOP);
6457}
6458
6459void helper_invlpga(int aflag)
6460{
6461 target_ulong addr;
6462 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6463
6464 if (aflag == 2)
6465 addr = EAX;
6466 else
6467 addr = (uint32_t)EAX;
6468
6469 /* XXX: could use the ASID to see if it is needed to do the
6470 flush */
6471 tlb_flush_page(env, addr);
6472}
6473
6474void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6475{
6476 if (likely(!(env->hflags & HF_SVMI_MASK)))
6477 return;
6478#ifndef VBOX
6479 switch(type) {
6480#ifndef VBOX
6481 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6482#else
6483 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6484 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6485 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6486#endif
6487 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6488 helper_vmexit(type, param);
6489 }
6490 break;
6491#ifndef VBOX
6492 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6493#else
6494 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6495 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6496 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6497#endif
6498 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6499 helper_vmexit(type, param);
6500 }
6501 break;
6502 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6503 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6504 helper_vmexit(type, param);
6505 }
6506 break;
6507 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6508 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6509 helper_vmexit(type, param);
6510 }
6511 break;
6512 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6513 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6514 helper_vmexit(type, param);
6515 }
6516 break;
6517 case SVM_EXIT_MSR:
6518 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6519 /* FIXME: this should be read in at vmrun (faster this way?) */
6520 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6521 uint32_t t0, t1;
6522 switch((uint32_t)ECX) {
6523 case 0 ... 0x1fff:
6524 t0 = (ECX * 2) % 8;
6525 t1 = ECX / 8;
6526 break;
6527 case 0xc0000000 ... 0xc0001fff:
6528 t0 = (8192 + ECX - 0xc0000000) * 2;
6529 t1 = (t0 / 8);
6530 t0 %= 8;
6531 break;
6532 case 0xc0010000 ... 0xc0011fff:
6533 t0 = (16384 + ECX - 0xc0010000) * 2;
6534 t1 = (t0 / 8);
6535 t0 %= 8;
6536 break;
6537 default:
6538 helper_vmexit(type, param);
6539 t0 = 0;
6540 t1 = 0;
6541 break;
6542 }
6543 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6544 helper_vmexit(type, param);
6545 }
6546 break;
6547 default:
6548 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6549 helper_vmexit(type, param);
6550 }
6551 break;
6552 }
6553#else
6554 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6555#endif
6556}
6557
6558void helper_svm_check_io(uint32_t port, uint32_t param,
6559 uint32_t next_eip_addend)
6560{
6561 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6562 /* FIXME: this should be read in at vmrun (faster this way?) */
6563 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6564 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6565 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6566 /* next EIP */
6567 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6568 env->eip + next_eip_addend);
6569 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6570 }
6571 }
6572}
6573
6574/* Note: currently only 32 bits of exit_code are used */
6575void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6576{
6577 uint32_t int_ctl;
6578
6579 if (loglevel & CPU_LOG_TB_IN_ASM)
6580 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6581 exit_code, exit_info_1,
6582 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6583 EIP);
6584
6585 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6586 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6587 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6588 } else {
6589 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6590 }
6591
6592 /* Save the VM state in the vmcb */
6593 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6594 &env->segs[R_ES]);
6595 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6596 &env->segs[R_CS]);
6597 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6598 &env->segs[R_SS]);
6599 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6600 &env->segs[R_DS]);
6601
6602 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6603 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6604
6605 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6606 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6607
6608 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6609 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6610 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6611 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6612 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6613
6614 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6615 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6616 int_ctl |= env->v_tpr & V_TPR_MASK;
6617 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6618 int_ctl |= V_IRQ_MASK;
6619 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6620
6621 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6622 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6623 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6624 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6625 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6626 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6627 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6628
6629 /* Reload the host state from vm_hsave */
6630 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6631 env->hflags &= ~HF_SVMI_MASK;
6632 env->intercept = 0;
6633 env->intercept_exceptions = 0;
6634 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6635 env->tsc_offset = 0;
6636
6637 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6638 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6639
6640 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6641 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6642
6643 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6644 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6645 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6646 /* we need to set the efer after the crs so the hidden flags get
6647 set properly */
6648 cpu_load_efer(env,
6649 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6650 env->eflags = 0;
6651 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6652 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6653 CC_OP = CC_OP_EFLAGS;
6654
6655 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6656 env, R_ES);
6657 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6658 env, R_CS);
6659 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6660 env, R_SS);
6661 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6662 env, R_DS);
6663
6664 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6665 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6666 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6667
6668 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6669 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6670
6671 /* other setups */
6672 cpu_x86_set_cpl(env, 0);
6673 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6674 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6675
6676 env->hflags2 &= ~HF2_GIF_MASK;
6677 /* FIXME: Resets the current ASID register to zero (host ASID). */
6678
6679 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6680
6681 /* Clears the TSC_OFFSET inside the processor. */
6682
6683 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6684 from the page table indicated the host's CR3. If the PDPEs contain
6685 illegal state, the processor causes a shutdown. */
6686
6687 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6688 env->cr[0] |= CR0_PE_MASK;
6689 env->eflags &= ~VM_MASK;
6690
6691 /* Disables all breakpoints in the host DR7 register. */
6692
6693 /* Checks the reloaded host state for consistency. */
6694
6695 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6696 host's code segment or non-canonical (in the case of long mode), a
6697 #GP fault is delivered inside the host.) */
6698
6699 /* remove any pending exception */
6700 env->exception_index = -1;
6701 env->error_code = 0;
6702 env->old_exception = -1;
6703
6704 cpu_loop_exit();
6705}
6706
6707#endif
6708
6709/* MMX/SSE */
6710/* XXX: optimize by storing fptt and fptags in the static cpu state */
6711void helper_enter_mmx(void)
6712{
6713 env->fpstt = 0;
6714 *(uint32_t *)(env->fptags) = 0;
6715 *(uint32_t *)(env->fptags + 4) = 0;
6716}
6717
6718void helper_emms(void)
6719{
6720 /* set to empty state */
6721 *(uint32_t *)(env->fptags) = 0x01010101;
6722 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6723}
6724
6725/* XXX: suppress */
6726void helper_movq(uint64_t *d, uint64_t *s)
6727{
6728 *d = *s;
6729}
6730
6731#define SHIFT 0
6732#include "ops_sse.h"
6733
6734#define SHIFT 1
6735#include "ops_sse.h"
6736
6737#define SHIFT 0
6738#include "helper_template.h"
6739#undef SHIFT
6740
6741#define SHIFT 1
6742#include "helper_template.h"
6743#undef SHIFT
6744
6745#define SHIFT 2
6746#include "helper_template.h"
6747#undef SHIFT
6748
6749#ifdef TARGET_X86_64
6750
6751#define SHIFT 3
6752#include "helper_template.h"
6753#undef SHIFT
6754
6755#endif
6756
6757/* bit operations */
6758target_ulong helper_bsf(target_ulong t0)
6759{
6760 int count;
6761 target_ulong res;
6762
6763 res = t0;
6764 count = 0;
6765 while ((res & 1) == 0) {
6766 count++;
6767 res >>= 1;
6768 }
6769 return count;
6770}
6771
6772target_ulong helper_bsr(target_ulong t0)
6773{
6774 int count;
6775 target_ulong res, mask;
6776
6777 res = t0;
6778 count = TARGET_LONG_BITS - 1;
6779 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6780 while ((res & mask) == 0) {
6781 count--;
6782 res <<= 1;
6783 }
6784 return count;
6785}
6786
6787
6788static int compute_all_eflags(void)
6789{
6790 return CC_SRC;
6791}
6792
6793static int compute_c_eflags(void)
6794{
6795 return CC_SRC & CC_C;
6796}
6797
6798#ifndef VBOX
6799CCTable cc_table[CC_OP_NB] = {
6800 [CC_OP_DYNAMIC] = { /* should never happen */ },
6801
6802 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6803
6804 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6805 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6806 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6807
6808 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6809 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6810 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6811
6812 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6813 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6814 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6815
6816 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6817 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6818 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6819
6820 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6821 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6822 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6823
6824 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6825 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6826 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6827
6828 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6829 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6830 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6831
6832 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6833 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6834 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6835
6836 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6837 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6838 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6839
6840 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6841 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6842 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6843
6844#ifdef TARGET_X86_64
6845 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6846
6847 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6848
6849 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6850
6851 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6852
6853 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6854
6855 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6856
6857 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6858
6859 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6860
6861 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6862
6863 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6864#endif
6865};
6866#else /* VBOX */
6867/* Sync carefully with cpu.h */
6868CCTable cc_table[CC_OP_NB] = {
6869 /* CC_OP_DYNAMIC */ { 0, 0 },
6870
6871 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6872
6873 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6874 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6875 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6876#ifdef TARGET_X86_64
6877 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6878#else
6879 /* CC_OP_MULQ */ { 0, 0 },
6880#endif
6881
6882 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6883 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6884 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6885#ifdef TARGET_X86_64
6886 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6887#else
6888 /* CC_OP_ADDQ */ { 0, 0 },
6889#endif
6890
6891 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6892 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6893 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6894#ifdef TARGET_X86_64
6895 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6896#else
6897 /* CC_OP_ADCQ */ { 0, 0 },
6898#endif
6899
6900 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6901 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6902 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6903#ifdef TARGET_X86_64
6904 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6905#else
6906 /* CC_OP_SUBQ */ { 0, 0 },
6907#endif
6908
6909 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6910 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6911 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6912#ifdef TARGET_X86_64
6913 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6914#else
6915 /* CC_OP_SBBQ */ { 0, 0 },
6916#endif
6917
6918 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6919 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6920 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6921#ifdef TARGET_X86_64
6922 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6923#else
6924 /* CC_OP_LOGICQ */ { 0, 0 },
6925#endif
6926
6927 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6928 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6929 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6930#ifdef TARGET_X86_64
6931 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6932#else
6933 /* CC_OP_INCQ */ { 0, 0 },
6934#endif
6935
6936 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6937 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6938 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6939#ifdef TARGET_X86_64
6940 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6941#else
6942 /* CC_OP_DECQ */ { 0, 0 },
6943#endif
6944
6945 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6946 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6947 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6948#ifdef TARGET_X86_64
6949 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6950#else
6951 /* CC_OP_SHLQ */ { 0, 0 },
6952#endif
6953
6954 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6955 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6956 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6957#ifdef TARGET_X86_64
6958 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6959#else
6960 /* CC_OP_SARQ */ { 0, 0 },
6961#endif
6962};
6963#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette