VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/op_helper.c@ 15864

Last change on this file since 15864 was 15744, checked in by vboxsync, 16 years ago

Updated check + comment

File size: 194.4 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
148 /* if TF will be set -> #GP */
149 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
150 || (new_eflags & TF_MASK)) {
151 raise_exception(EXCP0D_GPF);
152 } else {
153 load_eflags(new_eflags, (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
154
155 if (new_eflags & IF_MASK) {
156 env->eflags |= VIF_MASK;
157 } else {
158 env->eflags &= ~VIF_MASK;
159 }
160 }
161}
162
163target_ulong helper_read_eflags_vme(void)
164{
165 uint32_t eflags;
166 eflags = cc_table[CC_OP].compute_all();
167 eflags |= (DF & DF_MASK);
168 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
169 if (env->eflags & VIF_MASK)
170 eflags |= IF_MASK;
171 return eflags;
172}
173
174void helper_dump_state()
175{
176 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
177 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
178 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
179 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
180 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
181 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
182 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
183}
184#endif
185
186/* return non zero if error */
187#ifndef VBOX
188static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
189#else /* VBOX */
190DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
191#endif /* VBOX */
192 int selector)
193{
194 SegmentCache *dt;
195 int index;
196 target_ulong ptr;
197
198#ifdef VBOX
199 /* Trying to load a selector with CPL=1? */
200 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
201 {
202 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
203 selector = selector & 0xfffc;
204 }
205#endif
206
207 if (selector & 0x4)
208 dt = &env->ldt;
209 else
210 dt = &env->gdt;
211 index = selector & ~7;
212 if ((index + 7) > dt->limit)
213 return -1;
214 ptr = dt->base + index;
215 *e1_ptr = ldl_kernel(ptr);
216 *e2_ptr = ldl_kernel(ptr + 4);
217 return 0;
218}
219
220#ifndef VBOX
221static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
222#else /* VBOX */
223DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
224#endif /* VBOX */
225{
226 unsigned int limit;
227 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
228 if (e2 & DESC_G_MASK)
229 limit = (limit << 12) | 0xfff;
230 return limit;
231}
232
233#ifndef VBOX
234static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
235#else /* VBOX */
236DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
237#endif /* VBOX */
238{
239 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
240}
241
242#ifndef VBOX
243static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
244#else /* VBOX */
245DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
246#endif /* VBOX */
247{
248 sc->base = get_seg_base(e1, e2);
249 sc->limit = get_seg_limit(e1, e2);
250 sc->flags = e2;
251}
252
253/* init the segment cache in vm86 mode. */
254#ifndef VBOX
255static inline void load_seg_vm(int seg, int selector)
256#else /* VBOX */
257DECLINLINE(void) load_seg_vm(int seg, int selector)
258#endif /* VBOX */
259{
260 selector &= 0xffff;
261#ifdef VBOX
262 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
263
264 if (seg == R_CS)
265 flags |= DESC_CS_MASK;
266
267 cpu_x86_load_seg_cache(env, seg, selector,
268 (selector << 4), 0xffff, flags);
269#else
270 cpu_x86_load_seg_cache(env, seg, selector,
271 (selector << 4), 0xffff, 0);
272#endif
273}
274
275#ifndef VBOX
276static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
277#else /* VBOX */
278DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
279#endif /* VBOX */
280 uint32_t *esp_ptr, int dpl)
281{
282#ifndef VBOX
283 int type, index, shift;
284#else
285 unsigned int type, index, shift;
286#endif
287
288#if 0
289 {
290 int i;
291 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
292 for(i=0;i<env->tr.limit;i++) {
293 printf("%02x ", env->tr.base[i]);
294 if ((i & 7) == 7) printf("\n");
295 }
296 printf("\n");
297 }
298#endif
299
300 if (!(env->tr.flags & DESC_P_MASK))
301 cpu_abort(env, "invalid tss");
302 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
303 if ((type & 7) != 1)
304 cpu_abort(env, "invalid tss type");
305 shift = type >> 3;
306 index = (dpl * 4 + 2) << shift;
307 if (index + (4 << shift) - 1 > env->tr.limit)
308 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
309 if (shift == 0) {
310 *esp_ptr = lduw_kernel(env->tr.base + index);
311 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
312 } else {
313 *esp_ptr = ldl_kernel(env->tr.base + index);
314 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
315 }
316}
317
318/* XXX: merge with load_seg() */
319static void tss_load_seg(int seg_reg, int selector)
320{
321 uint32_t e1, e2;
322 int rpl, dpl, cpl;
323
324#ifdef VBOX
325 e1 = e2 = 0;
326 cpl = env->hflags & HF_CPL_MASK;
327 /* Trying to load a selector with CPL=1? */
328 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
329 {
330 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
331 selector = selector & 0xfffc;
332 }
333#endif
334
335 if ((selector & 0xfffc) != 0) {
336 if (load_segment(&e1, &e2, selector) != 0)
337 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
338 if (!(e2 & DESC_S_MASK))
339 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
340 rpl = selector & 3;
341 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
342 cpl = env->hflags & HF_CPL_MASK;
343 if (seg_reg == R_CS) {
344 if (!(e2 & DESC_CS_MASK))
345 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346 /* XXX: is it correct ? */
347 if (dpl != rpl)
348 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
349 if ((e2 & DESC_C_MASK) && dpl > rpl)
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 } else if (seg_reg == R_SS) {
352 /* SS must be writable data */
353 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 if (dpl != cpl || dpl != rpl)
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 } else {
358 /* not readable code */
359 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 /* if data or non conforming code, checks the rights */
362 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
363 if (dpl < cpl || dpl < rpl)
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 }
366 }
367 if (!(e2 & DESC_P_MASK))
368 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
369 cpu_x86_load_seg_cache(env, seg_reg, selector,
370 get_seg_base(e1, e2),
371 get_seg_limit(e1, e2),
372 e2);
373 } else {
374 if (seg_reg == R_SS || seg_reg == R_CS)
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376 }
377}
378
379#define SWITCH_TSS_JMP 0
380#define SWITCH_TSS_IRET 1
381#define SWITCH_TSS_CALL 2
382
383/* XXX: restore CPU state in registers (PowerPC case) */
384static void switch_tss(int tss_selector,
385 uint32_t e1, uint32_t e2, int source,
386 uint32_t next_eip)
387{
388 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
389 target_ulong tss_base;
390 uint32_t new_regs[8], new_segs[6];
391 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
392 uint32_t old_eflags, eflags_mask;
393 SegmentCache *dt;
394#ifndef VBOX
395 int index;
396#else
397 unsigned int index;
398#endif
399 target_ulong ptr;
400
401 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
402#ifdef DEBUG_PCALL
403 if (loglevel & CPU_LOG_PCALL)
404 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
405#endif
406
407#if defined(VBOX) && defined(DEBUG)
408 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
409#endif
410
411 /* if task gate, we read the TSS segment and we load it */
412 if (type == 5) {
413 if (!(e2 & DESC_P_MASK))
414 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
415 tss_selector = e1 >> 16;
416 if (tss_selector & 4)
417 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
418 if (load_segment(&e1, &e2, tss_selector) != 0)
419 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
420 if (e2 & DESC_S_MASK)
421 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
422 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
423 if ((type & 7) != 1)
424 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
425 }
426
427 if (!(e2 & DESC_P_MASK))
428 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
429
430 if (type & 8)
431 tss_limit_max = 103;
432 else
433 tss_limit_max = 43;
434 tss_limit = get_seg_limit(e1, e2);
435 tss_base = get_seg_base(e1, e2);
436 if ((tss_selector & 4) != 0 ||
437 tss_limit < tss_limit_max)
438 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
439 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
440 if (old_type & 8)
441 old_tss_limit_max = 103;
442 else
443 old_tss_limit_max = 43;
444
445 /* read all the registers from the new TSS */
446 if (type & 8) {
447 /* 32 bit */
448 new_cr3 = ldl_kernel(tss_base + 0x1c);
449 new_eip = ldl_kernel(tss_base + 0x20);
450 new_eflags = ldl_kernel(tss_base + 0x24);
451 for(i = 0; i < 8; i++)
452 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
453 for(i = 0; i < 6; i++)
454 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
455 new_ldt = lduw_kernel(tss_base + 0x60);
456 new_trap = ldl_kernel(tss_base + 0x64);
457 } else {
458 /* 16 bit */
459 new_cr3 = 0;
460 new_eip = lduw_kernel(tss_base + 0x0e);
461 new_eflags = lduw_kernel(tss_base + 0x10);
462 for(i = 0; i < 8; i++)
463 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
464 for(i = 0; i < 4; i++)
465 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
466 new_ldt = lduw_kernel(tss_base + 0x2a);
467 new_segs[R_FS] = 0;
468 new_segs[R_GS] = 0;
469 new_trap = 0;
470 }
471
472 /* NOTE: we must avoid memory exceptions during the task switch,
473 so we make dummy accesses before */
474 /* XXX: it can still fail in some cases, so a bigger hack is
475 necessary to valid the TLB after having done the accesses */
476
477 v1 = ldub_kernel(env->tr.base);
478 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
479 stb_kernel(env->tr.base, v1);
480 stb_kernel(env->tr.base + old_tss_limit_max, v2);
481
482 /* clear busy bit (it is restartable) */
483 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
484 target_ulong ptr;
485 uint32_t e2;
486 ptr = env->gdt.base + (env->tr.selector & ~7);
487 e2 = ldl_kernel(ptr + 4);
488 e2 &= ~DESC_TSS_BUSY_MASK;
489 stl_kernel(ptr + 4, e2);
490 }
491 old_eflags = compute_eflags();
492 if (source == SWITCH_TSS_IRET)
493 old_eflags &= ~NT_MASK;
494
495 /* save the current state in the old TSS */
496 if (type & 8) {
497 /* 32 bit */
498 stl_kernel(env->tr.base + 0x20, next_eip);
499 stl_kernel(env->tr.base + 0x24, old_eflags);
500 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
501 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
502 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
503 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
504 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
505 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
506 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
507 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
508 for(i = 0; i < 6; i++)
509 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
510#if defined(VBOX) && defined(DEBUG)
511 printf("TSS 32 bits switch\n");
512 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
513#endif
514 } else {
515 /* 16 bit */
516 stw_kernel(env->tr.base + 0x0e, next_eip);
517 stw_kernel(env->tr.base + 0x10, old_eflags);
518 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
519 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
520 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
521 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
522 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
523 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
524 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
525 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
526 for(i = 0; i < 4; i++)
527 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
528 }
529
530 /* now if an exception occurs, it will occurs in the next task
531 context */
532
533 if (source == SWITCH_TSS_CALL) {
534 stw_kernel(tss_base, env->tr.selector);
535 new_eflags |= NT_MASK;
536 }
537
538 /* set busy bit */
539 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
540 target_ulong ptr;
541 uint32_t e2;
542 ptr = env->gdt.base + (tss_selector & ~7);
543 e2 = ldl_kernel(ptr + 4);
544 e2 |= DESC_TSS_BUSY_MASK;
545 stl_kernel(ptr + 4, e2);
546 }
547
548 /* set the new CPU state */
549 /* from this point, any exception which occurs can give problems */
550 env->cr[0] |= CR0_TS_MASK;
551 env->hflags |= HF_TS_MASK;
552 env->tr.selector = tss_selector;
553 env->tr.base = tss_base;
554 env->tr.limit = tss_limit;
555 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
556
557 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
558 cpu_x86_update_cr3(env, new_cr3);
559 }
560
561 /* load all registers without an exception, then reload them with
562 possible exception */
563 env->eip = new_eip;
564 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
565 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
566 if (!(type & 8))
567 eflags_mask &= 0xffff;
568 load_eflags(new_eflags, eflags_mask);
569 /* XXX: what to do in 16 bit case ? */
570 EAX = new_regs[0];
571 ECX = new_regs[1];
572 EDX = new_regs[2];
573 EBX = new_regs[3];
574 ESP = new_regs[4];
575 EBP = new_regs[5];
576 ESI = new_regs[6];
577 EDI = new_regs[7];
578 if (new_eflags & VM_MASK) {
579 for(i = 0; i < 6; i++)
580 load_seg_vm(i, new_segs[i]);
581 /* in vm86, CPL is always 3 */
582 cpu_x86_set_cpl(env, 3);
583 } else {
584 /* CPL is set the RPL of CS */
585 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
586 /* first just selectors as the rest may trigger exceptions */
587 for(i = 0; i < 6; i++)
588 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
589 }
590
591 env->ldt.selector = new_ldt & ~4;
592 env->ldt.base = 0;
593 env->ldt.limit = 0;
594 env->ldt.flags = 0;
595
596 /* load the LDT */
597 if (new_ldt & 4)
598 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
599
600 if ((new_ldt & 0xfffc) != 0) {
601 dt = &env->gdt;
602 index = new_ldt & ~7;
603 if ((index + 7) > dt->limit)
604 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
605 ptr = dt->base + index;
606 e1 = ldl_kernel(ptr);
607 e2 = ldl_kernel(ptr + 4);
608 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
609 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
610 if (!(e2 & DESC_P_MASK))
611 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
612 load_seg_cache_raw_dt(&env->ldt, e1, e2);
613 }
614
615 /* load the segments */
616 if (!(new_eflags & VM_MASK)) {
617 tss_load_seg(R_CS, new_segs[R_CS]);
618 tss_load_seg(R_SS, new_segs[R_SS]);
619 tss_load_seg(R_ES, new_segs[R_ES]);
620 tss_load_seg(R_DS, new_segs[R_DS]);
621 tss_load_seg(R_FS, new_segs[R_FS]);
622 tss_load_seg(R_GS, new_segs[R_GS]);
623 }
624
625 /* check that EIP is in the CS segment limits */
626 if (new_eip > env->segs[R_CS].limit) {
627 /* XXX: different exception if CALL ? */
628 raise_exception_err(EXCP0D_GPF, 0);
629 }
630}
631
632/* check if Port I/O is allowed in TSS */
633#ifndef VBOX
634static inline void check_io(int addr, int size)
635{
636 int io_offset, val, mask;
637
638#else /* VBOX */
639DECLINLINE(void) check_io(int addr, int size)
640{
641 int val, mask;
642 unsigned int io_offset;
643#endif /* VBOX */
644 /* TSS must be a valid 32 bit one */
645 if (!(env->tr.flags & DESC_P_MASK) ||
646 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
647 env->tr.limit < 103)
648 goto fail;
649 io_offset = lduw_kernel(env->tr.base + 0x66);
650 io_offset += (addr >> 3);
651 /* Note: the check needs two bytes */
652 if ((io_offset + 1) > env->tr.limit)
653 goto fail;
654 val = lduw_kernel(env->tr.base + io_offset);
655 val >>= (addr & 7);
656 mask = (1 << size) - 1;
657 /* all bits must be zero to allow the I/O */
658 if ((val & mask) != 0) {
659 fail:
660 raise_exception_err(EXCP0D_GPF, 0);
661 }
662}
663
664#ifdef VBOX
665/* Keep in sync with gen_check_external_event() */
666void helper_check_external_event()
667{
668 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
669 | CPU_INTERRUPT_EXTERNAL_TIMER
670 | CPU_INTERRUPT_EXTERNAL_DMA))
671 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
672 && (env->eflags & IF_MASK)
673 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
674 {
675 helper_external_event();
676 }
677
678}
679
680void helper_sync_seg(uint32_t reg)
681{
682 assert(env->segs[reg].newselector != 0);
683 sync_seg(env, reg, env->segs[reg].newselector);
684}
685#endif
686
687void helper_check_iob(uint32_t t0)
688{
689 check_io(t0, 1);
690}
691
692void helper_check_iow(uint32_t t0)
693{
694 check_io(t0, 2);
695}
696
697void helper_check_iol(uint32_t t0)
698{
699 check_io(t0, 4);
700}
701
702void helper_outb(uint32_t port, uint32_t data)
703{
704 cpu_outb(env, port, data & 0xff);
705}
706
707target_ulong helper_inb(uint32_t port)
708{
709 return cpu_inb(env, port);
710}
711
712void helper_outw(uint32_t port, uint32_t data)
713{
714 cpu_outw(env, port, data & 0xffff);
715}
716
717target_ulong helper_inw(uint32_t port)
718{
719 return cpu_inw(env, port);
720}
721
722void helper_outl(uint32_t port, uint32_t data)
723{
724 cpu_outl(env, port, data);
725}
726
727target_ulong helper_inl(uint32_t port)
728{
729 return cpu_inl(env, port);
730}
731
732#ifndef VBOX
733static inline unsigned int get_sp_mask(unsigned int e2)
734#else /* VBOX */
735DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
736#endif /* VBOX */
737{
738 if (e2 & DESC_B_MASK)
739 return 0xffffffff;
740 else
741 return 0xffff;
742}
743
744#ifdef TARGET_X86_64
745#define SET_ESP(val, sp_mask)\
746do {\
747 if ((sp_mask) == 0xffff)\
748 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
749 else if ((sp_mask) == 0xffffffffLL)\
750 ESP = (uint32_t)(val);\
751 else\
752 ESP = (val);\
753} while (0)
754#else
755#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
756#endif
757
758/* in 64-bit machines, this can overflow. So this segment addition macro
759 * can be used to trim the value to 32-bit whenever needed */
760#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
761
762/* XXX: add a is_user flag to have proper security support */
763#define PUSHW(ssp, sp, sp_mask, val)\
764{\
765 sp -= 2;\
766 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
767}
768
769#define PUSHL(ssp, sp, sp_mask, val)\
770{\
771 sp -= 4;\
772 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
773}
774
775#define POPW(ssp, sp, sp_mask, val)\
776{\
777 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
778 sp += 2;\
779}
780
781#define POPL(ssp, sp, sp_mask, val)\
782{\
783 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
784 sp += 4;\
785}
786
787/* protected mode interrupt */
788static void do_interrupt_protected(int intno, int is_int, int error_code,
789 unsigned int next_eip, int is_hw)
790{
791 SegmentCache *dt;
792 target_ulong ptr, ssp;
793 int type, dpl, selector, ss_dpl, cpl;
794 int has_error_code, new_stack, shift;
795 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
796 uint32_t old_eip, sp_mask;
797
798#ifdef VBOX
799 ss = ss_e1 = ss_e2 = 0;
800# ifdef VBOX_WITH_VMI
801 if ( intno == 6
802 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
803 {
804 env->exception_index = EXCP_PARAV_CALL;
805 cpu_loop_exit();
806 }
807# endif
808 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
809 cpu_loop_exit();
810#endif
811
812 has_error_code = 0;
813 if (!is_int && !is_hw) {
814 switch(intno) {
815 case 8:
816 case 10:
817 case 11:
818 case 12:
819 case 13:
820 case 14:
821 case 17:
822 has_error_code = 1;
823 break;
824 }
825 }
826 if (is_int)
827 old_eip = next_eip;
828 else
829 old_eip = env->eip;
830
831 dt = &env->idt;
832#ifndef VBOX
833 if (intno * 8 + 7 > dt->limit)
834#else
835 if ((unsigned)intno * 8 + 7 > dt->limit)
836#endif
837 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
838 ptr = dt->base + intno * 8;
839 e1 = ldl_kernel(ptr);
840 e2 = ldl_kernel(ptr + 4);
841 /* check gate type */
842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
843 switch(type) {
844 case 5: /* task gate */
845 /* must do that check here to return the correct error code */
846 if (!(e2 & DESC_P_MASK))
847 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
848 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
849 if (has_error_code) {
850 int type;
851 uint32_t mask;
852 /* push the error code */
853 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
854 shift = type >> 3;
855 if (env->segs[R_SS].flags & DESC_B_MASK)
856 mask = 0xffffffff;
857 else
858 mask = 0xffff;
859 esp = (ESP - (2 << shift)) & mask;
860 ssp = env->segs[R_SS].base + esp;
861 if (shift)
862 stl_kernel(ssp, error_code);
863 else
864 stw_kernel(ssp, error_code);
865 SET_ESP(esp, mask);
866 }
867 return;
868 case 6: /* 286 interrupt gate */
869 case 7: /* 286 trap gate */
870 case 14: /* 386 interrupt gate */
871 case 15: /* 386 trap gate */
872 break;
873 default:
874 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
875 break;
876 }
877 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
878 cpl = env->hflags & HF_CPL_MASK;
879 /* check privilege if software int */
880 if (is_int && dpl < cpl)
881 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
882 /* check valid bit */
883 if (!(e2 & DESC_P_MASK))
884 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
885 selector = e1 >> 16;
886 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
887 if ((selector & 0xfffc) == 0)
888 raise_exception_err(EXCP0D_GPF, 0);
889
890 if (load_segment(&e1, &e2, selector) != 0)
891 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
892 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
893 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
894 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
895 if (dpl > cpl)
896 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
897 if (!(e2 & DESC_P_MASK))
898 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
899 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
900 /* to inner privilege */
901 get_ss_esp_from_tss(&ss, &esp, dpl);
902 if ((ss & 0xfffc) == 0)
903 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
904 if ((ss & 3) != dpl)
905 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
906 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
907 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
908 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
909 if (ss_dpl != dpl)
910 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
911 if (!(ss_e2 & DESC_S_MASK) ||
912 (ss_e2 & DESC_CS_MASK) ||
913 !(ss_e2 & DESC_W_MASK))
914 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
915 if (!(ss_e2 & DESC_P_MASK))
916#ifdef VBOX /* See page 3-477 of 253666.pdf */
917 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
918#else
919 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
920#endif
921 new_stack = 1;
922 sp_mask = get_sp_mask(ss_e2);
923 ssp = get_seg_base(ss_e1, ss_e2);
924#if defined(VBOX) && defined(DEBUG)
925 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
926#endif
927 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
928 /* to same privilege */
929 if (env->eflags & VM_MASK)
930 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
931 new_stack = 0;
932 sp_mask = get_sp_mask(env->segs[R_SS].flags);
933 ssp = env->segs[R_SS].base;
934 esp = ESP;
935 dpl = cpl;
936 } else {
937 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938 new_stack = 0; /* avoid warning */
939 sp_mask = 0; /* avoid warning */
940 ssp = 0; /* avoid warning */
941 esp = 0; /* avoid warning */
942 }
943
944 shift = type >> 3;
945
946#if 0
947 /* XXX: check that enough room is available */
948 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
949 if (env->eflags & VM_MASK)
950 push_size += 8;
951 push_size <<= shift;
952#endif
953 if (shift == 1) {
954 if (new_stack) {
955 if (env->eflags & VM_MASK) {
956 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
957 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
958 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
959 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
960 }
961 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
962 PUSHL(ssp, esp, sp_mask, ESP);
963 }
964 PUSHL(ssp, esp, sp_mask, compute_eflags());
965 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
966 PUSHL(ssp, esp, sp_mask, old_eip);
967 if (has_error_code) {
968 PUSHL(ssp, esp, sp_mask, error_code);
969 }
970 } else {
971 if (new_stack) {
972 if (env->eflags & VM_MASK) {
973 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
974 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
975 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
976 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
977 }
978 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
979 PUSHW(ssp, esp, sp_mask, ESP);
980 }
981 PUSHW(ssp, esp, sp_mask, compute_eflags());
982 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
983 PUSHW(ssp, esp, sp_mask, old_eip);
984 if (has_error_code) {
985 PUSHW(ssp, esp, sp_mask, error_code);
986 }
987 }
988
989 if (new_stack) {
990 if (env->eflags & VM_MASK) {
991 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
992 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
993 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
994 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
995 }
996 ss = (ss & ~3) | dpl;
997 cpu_x86_load_seg_cache(env, R_SS, ss,
998 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
999 }
1000 SET_ESP(esp, sp_mask);
1001
1002 selector = (selector & ~3) | dpl;
1003 cpu_x86_load_seg_cache(env, R_CS, selector,
1004 get_seg_base(e1, e2),
1005 get_seg_limit(e1, e2),
1006 e2);
1007 cpu_x86_set_cpl(env, dpl);
1008 env->eip = offset;
1009
1010 /* interrupt gate clear IF mask */
1011 if ((type & 1) == 0) {
1012 env->eflags &= ~IF_MASK;
1013 }
1014 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1015}
1016#ifdef VBOX
1017
1018/* check if VME interrupt redirection is enabled in TSS */
1019DECLINLINE(bool) is_vme_irq_redirected(int intno)
1020{
1021 unsigned int io_offset, intredir_offset;
1022 unsigned char val, mask;
1023
1024 /* TSS must be a valid 32 bit one */
1025 if (!(env->tr.flags & DESC_P_MASK) ||
1026 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1027 env->tr.limit < 103)
1028 goto fail;
1029 io_offset = lduw_kernel(env->tr.base + 0x66);
1030 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1031 if (io_offset < 0x68 + 0x20)
1032 io_offset = 0x68 + 0x20;
1033 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1034 intredir_offset = io_offset - 0x20;
1035
1036 intredir_offset += (intno >> 3);
1037 if ((intredir_offset) > env->tr.limit)
1038 goto fail;
1039
1040 val = ldub_kernel(env->tr.base + intredir_offset);
1041 mask = 1 << (unsigned char)(intno & 7);
1042
1043 /* bit set means no redirection. */
1044 if ((val & mask) != 0) {
1045 return false;
1046 }
1047 return true;
1048
1049fail:
1050 raise_exception_err(EXCP0D_GPF, 0);
1051 return true;
1052}
1053
1054/* V86 mode software interrupt with CR4.VME=1 */
1055static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1056{
1057 target_ulong ptr, ssp;
1058 int selector;
1059 uint32_t offset, esp;
1060 uint32_t old_cs, old_eflags;
1061 uint32_t iopl;
1062
1063 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1064
1065 if (!is_vme_irq_redirected(intno))
1066 {
1067 if (iopl == 3)
1068 {
1069 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1070 return;
1071 }
1072 else
1073 raise_exception_err(EXCP0D_GPF, 0);
1074 }
1075
1076 /* virtual mode idt is at linear address 0 */
1077 ptr = 0 + intno * 4;
1078 offset = lduw_kernel(ptr);
1079 selector = lduw_kernel(ptr + 2);
1080 esp = ESP;
1081 ssp = env->segs[R_SS].base;
1082 old_cs = env->segs[R_CS].selector;
1083
1084 old_eflags = compute_eflags();
1085 if (iopl < 3)
1086 {
1087 /* copy VIF into IF and set IOPL to 3 */
1088 if (env->eflags & VIF_MASK)
1089 old_eflags |= IF_MASK;
1090 else
1091 old_eflags &= ~IF_MASK;
1092
1093 old_eflags |= (3 << IOPL_SHIFT);
1094 }
1095
1096 /* XXX: use SS segment size ? */
1097 PUSHW(ssp, esp, 0xffff, old_eflags);
1098 PUSHW(ssp, esp, 0xffff, old_cs);
1099 PUSHW(ssp, esp, 0xffff, next_eip);
1100
1101 /* update processor state */
1102 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1103 env->eip = offset;
1104 env->segs[R_CS].selector = selector;
1105 env->segs[R_CS].base = (selector << 4);
1106 env->eflags &= ~(TF_MASK | RF_MASK);
1107
1108 if (iopl < 3)
1109 env->eflags &= ~VIF_MASK;
1110 else
1111 env->eflags &= ~IF_MASK;
1112}
1113#endif /* VBOX */
1114
1115#ifdef TARGET_X86_64
1116
1117#define PUSHQ(sp, val)\
1118{\
1119 sp -= 8;\
1120 stq_kernel(sp, (val));\
1121}
1122
1123#define POPQ(sp, val)\
1124{\
1125 val = ldq_kernel(sp);\
1126 sp += 8;\
1127}
1128
1129#ifndef VBOX
1130static inline target_ulong get_rsp_from_tss(int level)
1131#else /* VBOX */
1132DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1133#endif /* VBOX */
1134{
1135 int index;
1136
1137#if 0
1138 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1139 env->tr.base, env->tr.limit);
1140#endif
1141
1142 if (!(env->tr.flags & DESC_P_MASK))
1143 cpu_abort(env, "invalid tss");
1144 index = 8 * level + 4;
1145 if ((index + 7) > env->tr.limit)
1146 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1147 return ldq_kernel(env->tr.base + index);
1148}
1149
1150/* 64 bit interrupt */
1151static void do_interrupt64(int intno, int is_int, int error_code,
1152 target_ulong next_eip, int is_hw)
1153{
1154 SegmentCache *dt;
1155 target_ulong ptr;
1156 int type, dpl, selector, cpl, ist;
1157 int has_error_code, new_stack;
1158 uint32_t e1, e2, e3, ss;
1159 target_ulong old_eip, esp, offset;
1160
1161#ifdef VBOX
1162 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1163 cpu_loop_exit();
1164#endif
1165
1166 has_error_code = 0;
1167 if (!is_int && !is_hw) {
1168 switch(intno) {
1169 case 8:
1170 case 10:
1171 case 11:
1172 case 12:
1173 case 13:
1174 case 14:
1175 case 17:
1176 has_error_code = 1;
1177 break;
1178 }
1179 }
1180 if (is_int)
1181 old_eip = next_eip;
1182 else
1183 old_eip = env->eip;
1184
1185 dt = &env->idt;
1186 if (intno * 16 + 15 > dt->limit)
1187 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1188 ptr = dt->base + intno * 16;
1189 e1 = ldl_kernel(ptr);
1190 e2 = ldl_kernel(ptr + 4);
1191 e3 = ldl_kernel(ptr + 8);
1192 /* check gate type */
1193 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1194 switch(type) {
1195 case 14: /* 386 interrupt gate */
1196 case 15: /* 386 trap gate */
1197 break;
1198 default:
1199 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1200 break;
1201 }
1202 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1203 cpl = env->hflags & HF_CPL_MASK;
1204 /* check privilege if software int */
1205 if (is_int && dpl < cpl)
1206 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1207 /* check valid bit */
1208 if (!(e2 & DESC_P_MASK))
1209 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1210 selector = e1 >> 16;
1211 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1212 ist = e2 & 7;
1213 if ((selector & 0xfffc) == 0)
1214 raise_exception_err(EXCP0D_GPF, 0);
1215
1216 if (load_segment(&e1, &e2, selector) != 0)
1217 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1218 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1219 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1220 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1221 if (dpl > cpl)
1222 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1223 if (!(e2 & DESC_P_MASK))
1224 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1225 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1226 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1227 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1228 /* to inner privilege */
1229 if (ist != 0)
1230 esp = get_rsp_from_tss(ist + 3);
1231 else
1232 esp = get_rsp_from_tss(dpl);
1233 esp &= ~0xfLL; /* align stack */
1234 ss = 0;
1235 new_stack = 1;
1236 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1237 /* to same privilege */
1238 if (env->eflags & VM_MASK)
1239 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1240 new_stack = 0;
1241 if (ist != 0)
1242 esp = get_rsp_from_tss(ist + 3);
1243 else
1244 esp = ESP;
1245 esp &= ~0xfLL; /* align stack */
1246 dpl = cpl;
1247 } else {
1248 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1249 new_stack = 0; /* avoid warning */
1250 esp = 0; /* avoid warning */
1251 }
1252
1253 PUSHQ(esp, env->segs[R_SS].selector);
1254 PUSHQ(esp, ESP);
1255 PUSHQ(esp, compute_eflags());
1256 PUSHQ(esp, env->segs[R_CS].selector);
1257 PUSHQ(esp, old_eip);
1258 if (has_error_code) {
1259 PUSHQ(esp, error_code);
1260 }
1261
1262 if (new_stack) {
1263 ss = 0 | dpl;
1264 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1265 }
1266 ESP = esp;
1267
1268 selector = (selector & ~3) | dpl;
1269 cpu_x86_load_seg_cache(env, R_CS, selector,
1270 get_seg_base(e1, e2),
1271 get_seg_limit(e1, e2),
1272 e2);
1273 cpu_x86_set_cpl(env, dpl);
1274 env->eip = offset;
1275
1276 /* interrupt gate clear IF mask */
1277 if ((type & 1) == 0) {
1278 env->eflags &= ~IF_MASK;
1279 }
1280 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1281}
1282#endif
1283
1284#if defined(CONFIG_USER_ONLY)
1285void helper_syscall(int next_eip_addend)
1286{
1287 env->exception_index = EXCP_SYSCALL;
1288 env->exception_next_eip = env->eip + next_eip_addend;
1289 cpu_loop_exit();
1290}
1291#else
1292void helper_syscall(int next_eip_addend)
1293{
1294 int selector;
1295
1296 if (!(env->efer & MSR_EFER_SCE)) {
1297 raise_exception_err(EXCP06_ILLOP, 0);
1298 }
1299 selector = (env->star >> 32) & 0xffff;
1300#ifdef TARGET_X86_64
1301 if (env->hflags & HF_LMA_MASK) {
1302 int code64;
1303
1304 ECX = env->eip + next_eip_addend;
1305 env->regs[11] = compute_eflags();
1306
1307 code64 = env->hflags & HF_CS64_MASK;
1308
1309 cpu_x86_set_cpl(env, 0);
1310 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1311 0, 0xffffffff,
1312 DESC_G_MASK | DESC_P_MASK |
1313 DESC_S_MASK |
1314 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1315 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1316 0, 0xffffffff,
1317 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1318 DESC_S_MASK |
1319 DESC_W_MASK | DESC_A_MASK);
1320 env->eflags &= ~env->fmask;
1321 load_eflags(env->eflags, 0);
1322 if (code64)
1323 env->eip = env->lstar;
1324 else
1325 env->eip = env->cstar;
1326 } else
1327#endif
1328 {
1329 ECX = (uint32_t)(env->eip + next_eip_addend);
1330
1331 cpu_x86_set_cpl(env, 0);
1332 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1333 0, 0xffffffff,
1334 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1335 DESC_S_MASK |
1336 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1337 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1338 0, 0xffffffff,
1339 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1340 DESC_S_MASK |
1341 DESC_W_MASK | DESC_A_MASK);
1342 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1343 env->eip = (uint32_t)env->star;
1344 }
1345}
1346#endif
1347
1348void helper_sysret(int dflag)
1349{
1350 int cpl, selector;
1351
1352 if (!(env->efer & MSR_EFER_SCE)) {
1353 raise_exception_err(EXCP06_ILLOP, 0);
1354 }
1355 cpl = env->hflags & HF_CPL_MASK;
1356 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1357 raise_exception_err(EXCP0D_GPF, 0);
1358 }
1359 selector = (env->star >> 48) & 0xffff;
1360#ifdef TARGET_X86_64
1361 if (env->hflags & HF_LMA_MASK) {
1362 if (dflag == 2) {
1363 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1364 0, 0xffffffff,
1365 DESC_G_MASK | DESC_P_MASK |
1366 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1367 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1368 DESC_L_MASK);
1369 env->eip = ECX;
1370 } else {
1371 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1372 0, 0xffffffff,
1373 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1374 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1375 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1376 env->eip = (uint32_t)ECX;
1377 }
1378 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1379 0, 0xffffffff,
1380 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1381 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1382 DESC_W_MASK | DESC_A_MASK);
1383 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1384 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1385 cpu_x86_set_cpl(env, 3);
1386 } else
1387#endif
1388 {
1389 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1390 0, 0xffffffff,
1391 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1392 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1393 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1394 env->eip = (uint32_t)ECX;
1395 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1396 0, 0xffffffff,
1397 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1398 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1399 DESC_W_MASK | DESC_A_MASK);
1400 env->eflags |= IF_MASK;
1401 cpu_x86_set_cpl(env, 3);
1402 }
1403#ifdef USE_KQEMU
1404 if (kqemu_is_ok(env)) {
1405 if (env->hflags & HF_LMA_MASK)
1406 CC_OP = CC_OP_EFLAGS;
1407 env->exception_index = -1;
1408 cpu_loop_exit();
1409 }
1410#endif
1411}
1412
1413#ifdef VBOX
1414/**
1415 * Checks and processes external VMM events.
1416 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1417 */
1418void helper_external_event(void)
1419{
1420#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1421 uintptr_t uESP;
1422 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1423 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1424#endif
1425 /* Keep in sync with flags checked by gen_check_external_event() */
1426 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1427 {
1428 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1429 ~CPU_INTERRUPT_EXTERNAL_HARD);
1430 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1431 }
1432 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1433 {
1434 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1435 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1436 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1437 }
1438 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1439 {
1440 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1441 ~CPU_INTERRUPT_EXTERNAL_DMA);
1442 remR3DmaRun(env);
1443 }
1444 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1445 {
1446 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1447 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1448 remR3TimersRun(env);
1449 }
1450}
1451/* helper for recording call instruction addresses for later scanning */
1452void helper_record_call()
1453{
1454 if ( !(env->state & CPU_RAW_RING0)
1455 && (env->cr[0] & CR0_PG_MASK)
1456 && !(env->eflags & X86_EFL_IF))
1457 remR3RecordCall(env);
1458}
1459#endif /* VBOX */
1460
1461/* real mode interrupt */
1462static void do_interrupt_real(int intno, int is_int, int error_code,
1463 unsigned int next_eip)
1464{
1465 SegmentCache *dt;
1466 target_ulong ptr, ssp;
1467 int selector;
1468 uint32_t offset, esp;
1469 uint32_t old_cs, old_eip;
1470
1471 /* real mode (simpler !) */
1472 dt = &env->idt;
1473#ifndef VBOX
1474 if (intno * 4 + 3 > dt->limit)
1475#else
1476 if ((unsigned)intno * 4 + 3 > dt->limit)
1477#endif
1478 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1479 ptr = dt->base + intno * 4;
1480 offset = lduw_kernel(ptr);
1481 selector = lduw_kernel(ptr + 2);
1482 esp = ESP;
1483 ssp = env->segs[R_SS].base;
1484 if (is_int)
1485 old_eip = next_eip;
1486 else
1487 old_eip = env->eip;
1488 old_cs = env->segs[R_CS].selector;
1489 /* XXX: use SS segment size ? */
1490 PUSHW(ssp, esp, 0xffff, compute_eflags());
1491 PUSHW(ssp, esp, 0xffff, old_cs);
1492 PUSHW(ssp, esp, 0xffff, old_eip);
1493
1494 /* update processor state */
1495 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1496 env->eip = offset;
1497 env->segs[R_CS].selector = selector;
1498 env->segs[R_CS].base = (selector << 4);
1499 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1500}
1501
1502/* fake user mode interrupt */
1503void do_interrupt_user(int intno, int is_int, int error_code,
1504 target_ulong next_eip)
1505{
1506 SegmentCache *dt;
1507 target_ulong ptr;
1508 int dpl, cpl, shift;
1509 uint32_t e2;
1510
1511 dt = &env->idt;
1512 if (env->hflags & HF_LMA_MASK) {
1513 shift = 4;
1514 } else {
1515 shift = 3;
1516 }
1517 ptr = dt->base + (intno << shift);
1518 e2 = ldl_kernel(ptr + 4);
1519
1520 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1521 cpl = env->hflags & HF_CPL_MASK;
1522 /* check privilege if software int */
1523 if (is_int && dpl < cpl)
1524 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1525
1526 /* Since we emulate only user space, we cannot do more than
1527 exiting the emulation with the suitable exception and error
1528 code */
1529 if (is_int)
1530 EIP = next_eip;
1531}
1532
1533/*
1534 * Begin execution of an interruption. is_int is TRUE if coming from
1535 * the int instruction. next_eip is the EIP value AFTER the interrupt
1536 * instruction. It is only relevant if is_int is TRUE.
1537 */
1538void do_interrupt(int intno, int is_int, int error_code,
1539 target_ulong next_eip, int is_hw)
1540{
1541 if (loglevel & CPU_LOG_INT) {
1542 if ((env->cr[0] & CR0_PE_MASK)) {
1543 static int count;
1544 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1545 count, intno, error_code, is_int,
1546 env->hflags & HF_CPL_MASK,
1547 env->segs[R_CS].selector, EIP,
1548 (int)env->segs[R_CS].base + EIP,
1549 env->segs[R_SS].selector, ESP);
1550 if (intno == 0x0e) {
1551 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1552 } else {
1553 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1554 }
1555 fprintf(logfile, "\n");
1556 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1557#if 0
1558 {
1559 int i;
1560 uint8_t *ptr;
1561 fprintf(logfile, " code=");
1562 ptr = env->segs[R_CS].base + env->eip;
1563 for(i = 0; i < 16; i++) {
1564 fprintf(logfile, " %02x", ldub(ptr + i));
1565 }
1566 fprintf(logfile, "\n");
1567 }
1568#endif
1569 count++;
1570 }
1571 }
1572 if (env->cr[0] & CR0_PE_MASK) {
1573#ifdef TARGET_X86_64
1574 if (env->hflags & HF_LMA_MASK) {
1575 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1576 } else
1577#endif
1578 {
1579#ifdef VBOX
1580 /* int xx *, v86 code and VME enabled? */
1581 if ( (env->eflags & VM_MASK)
1582 && (env->cr[4] & CR4_VME_MASK)
1583 && is_int
1584 && !is_hw
1585 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1586 )
1587 do_soft_interrupt_vme(intno, error_code, next_eip);
1588 else
1589#endif /* VBOX */
1590 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1591 }
1592 } else {
1593 do_interrupt_real(intno, is_int, error_code, next_eip);
1594 }
1595}
1596
1597/*
1598 * Check nested exceptions and change to double or triple fault if
1599 * needed. It should only be called, if this is not an interrupt.
1600 * Returns the new exception number.
1601 */
1602static int check_exception(int intno, int *error_code)
1603{
1604 int first_contributory = env->old_exception == 0 ||
1605 (env->old_exception >= 10 &&
1606 env->old_exception <= 13);
1607 int second_contributory = intno == 0 ||
1608 (intno >= 10 && intno <= 13);
1609
1610 if (loglevel & CPU_LOG_INT)
1611 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1612 env->old_exception, intno);
1613
1614 if (env->old_exception == EXCP08_DBLE)
1615 cpu_abort(env, "triple fault");
1616
1617 if ((first_contributory && second_contributory)
1618 || (env->old_exception == EXCP0E_PAGE &&
1619 (second_contributory || (intno == EXCP0E_PAGE)))) {
1620 intno = EXCP08_DBLE;
1621 *error_code = 0;
1622 }
1623
1624 if (second_contributory || (intno == EXCP0E_PAGE) ||
1625 (intno == EXCP08_DBLE))
1626 env->old_exception = intno;
1627
1628 return intno;
1629}
1630
1631/*
1632 * Signal an interruption. It is executed in the main CPU loop.
1633 * is_int is TRUE if coming from the int instruction. next_eip is the
1634 * EIP value AFTER the interrupt instruction. It is only relevant if
1635 * is_int is TRUE.
1636 */
1637void raise_interrupt(int intno, int is_int, int error_code,
1638 int next_eip_addend)
1639{
1640#if defined(VBOX) && defined(DEBUG)
1641 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1642#endif
1643 if (!is_int) {
1644 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1645 intno = check_exception(intno, &error_code);
1646 } else {
1647 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1648 }
1649
1650 env->exception_index = intno;
1651 env->error_code = error_code;
1652 env->exception_is_int = is_int;
1653 env->exception_next_eip = env->eip + next_eip_addend;
1654 cpu_loop_exit();
1655}
1656
1657/* shortcuts to generate exceptions */
1658
1659void (raise_exception_err)(int exception_index, int error_code)
1660{
1661 raise_interrupt(exception_index, 0, error_code, 0);
1662}
1663
1664void raise_exception(int exception_index)
1665{
1666 raise_interrupt(exception_index, 0, 0, 0);
1667}
1668
1669/* SMM support */
1670
1671#if defined(CONFIG_USER_ONLY)
1672
1673void do_smm_enter(void)
1674{
1675}
1676
1677void helper_rsm(void)
1678{
1679}
1680
1681#else
1682
1683#ifdef TARGET_X86_64
1684#define SMM_REVISION_ID 0x00020064
1685#else
1686#define SMM_REVISION_ID 0x00020000
1687#endif
1688
1689void do_smm_enter(void)
1690{
1691 target_ulong sm_state;
1692 SegmentCache *dt;
1693 int i, offset;
1694
1695 if (loglevel & CPU_LOG_INT) {
1696 fprintf(logfile, "SMM: enter\n");
1697 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1698 }
1699
1700 env->hflags |= HF_SMM_MASK;
1701 cpu_smm_update(env);
1702
1703 sm_state = env->smbase + 0x8000;
1704
1705#ifdef TARGET_X86_64
1706 for(i = 0; i < 6; i++) {
1707 dt = &env->segs[i];
1708 offset = 0x7e00 + i * 16;
1709 stw_phys(sm_state + offset, dt->selector);
1710 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1711 stl_phys(sm_state + offset + 4, dt->limit);
1712 stq_phys(sm_state + offset + 8, dt->base);
1713 }
1714
1715 stq_phys(sm_state + 0x7e68, env->gdt.base);
1716 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1717
1718 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1719 stq_phys(sm_state + 0x7e78, env->ldt.base);
1720 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1721 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1722
1723 stq_phys(sm_state + 0x7e88, env->idt.base);
1724 stl_phys(sm_state + 0x7e84, env->idt.limit);
1725
1726 stw_phys(sm_state + 0x7e90, env->tr.selector);
1727 stq_phys(sm_state + 0x7e98, env->tr.base);
1728 stl_phys(sm_state + 0x7e94, env->tr.limit);
1729 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1730
1731 stq_phys(sm_state + 0x7ed0, env->efer);
1732
1733 stq_phys(sm_state + 0x7ff8, EAX);
1734 stq_phys(sm_state + 0x7ff0, ECX);
1735 stq_phys(sm_state + 0x7fe8, EDX);
1736 stq_phys(sm_state + 0x7fe0, EBX);
1737 stq_phys(sm_state + 0x7fd8, ESP);
1738 stq_phys(sm_state + 0x7fd0, EBP);
1739 stq_phys(sm_state + 0x7fc8, ESI);
1740 stq_phys(sm_state + 0x7fc0, EDI);
1741 for(i = 8; i < 16; i++)
1742 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1743 stq_phys(sm_state + 0x7f78, env->eip);
1744 stl_phys(sm_state + 0x7f70, compute_eflags());
1745 stl_phys(sm_state + 0x7f68, env->dr[6]);
1746 stl_phys(sm_state + 0x7f60, env->dr[7]);
1747
1748 stl_phys(sm_state + 0x7f48, env->cr[4]);
1749 stl_phys(sm_state + 0x7f50, env->cr[3]);
1750 stl_phys(sm_state + 0x7f58, env->cr[0]);
1751
1752 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1753 stl_phys(sm_state + 0x7f00, env->smbase);
1754#else
1755 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1756 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1757 stl_phys(sm_state + 0x7ff4, compute_eflags());
1758 stl_phys(sm_state + 0x7ff0, env->eip);
1759 stl_phys(sm_state + 0x7fec, EDI);
1760 stl_phys(sm_state + 0x7fe8, ESI);
1761 stl_phys(sm_state + 0x7fe4, EBP);
1762 stl_phys(sm_state + 0x7fe0, ESP);
1763 stl_phys(sm_state + 0x7fdc, EBX);
1764 stl_phys(sm_state + 0x7fd8, EDX);
1765 stl_phys(sm_state + 0x7fd4, ECX);
1766 stl_phys(sm_state + 0x7fd0, EAX);
1767 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1768 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1769
1770 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1771 stl_phys(sm_state + 0x7f64, env->tr.base);
1772 stl_phys(sm_state + 0x7f60, env->tr.limit);
1773 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1774
1775 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1776 stl_phys(sm_state + 0x7f80, env->ldt.base);
1777 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1778 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1779
1780 stl_phys(sm_state + 0x7f74, env->gdt.base);
1781 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1782
1783 stl_phys(sm_state + 0x7f58, env->idt.base);
1784 stl_phys(sm_state + 0x7f54, env->idt.limit);
1785
1786 for(i = 0; i < 6; i++) {
1787 dt = &env->segs[i];
1788 if (i < 3)
1789 offset = 0x7f84 + i * 12;
1790 else
1791 offset = 0x7f2c + (i - 3) * 12;
1792 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1793 stl_phys(sm_state + offset + 8, dt->base);
1794 stl_phys(sm_state + offset + 4, dt->limit);
1795 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1796 }
1797 stl_phys(sm_state + 0x7f14, env->cr[4]);
1798
1799 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1800 stl_phys(sm_state + 0x7ef8, env->smbase);
1801#endif
1802 /* init SMM cpu state */
1803
1804#ifdef TARGET_X86_64
1805 cpu_load_efer(env, 0);
1806#endif
1807 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1808 env->eip = 0x00008000;
1809 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1810 0xffffffff, 0);
1811 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1812 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1813 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1814 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1815 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1816
1817 cpu_x86_update_cr0(env,
1818 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1819 cpu_x86_update_cr4(env, 0);
1820 env->dr[7] = 0x00000400;
1821 CC_OP = CC_OP_EFLAGS;
1822}
1823
1824void helper_rsm(void)
1825{
1826#ifdef VBOX
1827 cpu_abort(env, "helper_rsm");
1828#else /* !VBOX */
1829 target_ulong sm_
1830
1831 target_ulong sm_state;
1832 int i, offset;
1833 uint32_t val;
1834
1835 sm_state = env->smbase + 0x8000;
1836#ifdef TARGET_X86_64
1837 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1838
1839 for(i = 0; i < 6; i++) {
1840 offset = 0x7e00 + i * 16;
1841 cpu_x86_load_seg_cache(env, i,
1842 lduw_phys(sm_state + offset),
1843 ldq_phys(sm_state + offset + 8),
1844 ldl_phys(sm_state + offset + 4),
1845 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1846 }
1847
1848 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1849 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1850
1851 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1852 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1853 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1854 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1855
1856 env->idt.base = ldq_phys(sm_state + 0x7e88);
1857 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1858
1859 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1860 env->tr.base = ldq_phys(sm_state + 0x7e98);
1861 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1862 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1863
1864 EAX = ldq_phys(sm_state + 0x7ff8);
1865 ECX = ldq_phys(sm_state + 0x7ff0);
1866 EDX = ldq_phys(sm_state + 0x7fe8);
1867 EBX = ldq_phys(sm_state + 0x7fe0);
1868 ESP = ldq_phys(sm_state + 0x7fd8);
1869 EBP = ldq_phys(sm_state + 0x7fd0);
1870 ESI = ldq_phys(sm_state + 0x7fc8);
1871 EDI = ldq_phys(sm_state + 0x7fc0);
1872 for(i = 8; i < 16; i++)
1873 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1874 env->eip = ldq_phys(sm_state + 0x7f78);
1875 load_eflags(ldl_phys(sm_state + 0x7f70),
1876 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1877 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1878 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1879
1880 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1881 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1882 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1883
1884 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1885 if (val & 0x20000) {
1886 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1887 }
1888#else
1889 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1890 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1891 load_eflags(ldl_phys(sm_state + 0x7ff4),
1892 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1893 env->eip = ldl_phys(sm_state + 0x7ff0);
1894 EDI = ldl_phys(sm_state + 0x7fec);
1895 ESI = ldl_phys(sm_state + 0x7fe8);
1896 EBP = ldl_phys(sm_state + 0x7fe4);
1897 ESP = ldl_phys(sm_state + 0x7fe0);
1898 EBX = ldl_phys(sm_state + 0x7fdc);
1899 EDX = ldl_phys(sm_state + 0x7fd8);
1900 ECX = ldl_phys(sm_state + 0x7fd4);
1901 EAX = ldl_phys(sm_state + 0x7fd0);
1902 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1903 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1904
1905 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1906 env->tr.base = ldl_phys(sm_state + 0x7f64);
1907 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1908 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1909
1910 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1911 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1912 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1913 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1914
1915 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1916 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1917
1918 env->idt.base = ldl_phys(sm_state + 0x7f58);
1919 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1920
1921 for(i = 0; i < 6; i++) {
1922 if (i < 3)
1923 offset = 0x7f84 + i * 12;
1924 else
1925 offset = 0x7f2c + (i - 3) * 12;
1926 cpu_x86_load_seg_cache(env, i,
1927 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1928 ldl_phys(sm_state + offset + 8),
1929 ldl_phys(sm_state + offset + 4),
1930 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1931 }
1932 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1933
1934 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1935 if (val & 0x20000) {
1936 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1937 }
1938#endif
1939 CC_OP = CC_OP_EFLAGS;
1940 env->hflags &= ~HF_SMM_MASK;
1941 cpu_smm_update(env);
1942
1943 if (loglevel & CPU_LOG_INT) {
1944 fprintf(logfile, "SMM: after RSM\n");
1945 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1946 }
1947#endif /* !VBOX */
1948}
1949
1950#endif /* !CONFIG_USER_ONLY */
1951
1952
1953/* division, flags are undefined */
1954
1955void helper_divb_AL(target_ulong t0)
1956{
1957 unsigned int num, den, q, r;
1958
1959 num = (EAX & 0xffff);
1960 den = (t0 & 0xff);
1961 if (den == 0) {
1962 raise_exception(EXCP00_DIVZ);
1963 }
1964 q = (num / den);
1965 if (q > 0xff)
1966 raise_exception(EXCP00_DIVZ);
1967 q &= 0xff;
1968 r = (num % den) & 0xff;
1969 EAX = (EAX & ~0xffff) | (r << 8) | q;
1970}
1971
1972void helper_idivb_AL(target_ulong t0)
1973{
1974 int num, den, q, r;
1975
1976 num = (int16_t)EAX;
1977 den = (int8_t)t0;
1978 if (den == 0) {
1979 raise_exception(EXCP00_DIVZ);
1980 }
1981 q = (num / den);
1982 if (q != (int8_t)q)
1983 raise_exception(EXCP00_DIVZ);
1984 q &= 0xff;
1985 r = (num % den) & 0xff;
1986 EAX = (EAX & ~0xffff) | (r << 8) | q;
1987}
1988
1989void helper_divw_AX(target_ulong t0)
1990{
1991 unsigned int num, den, q, r;
1992
1993 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1994 den = (t0 & 0xffff);
1995 if (den == 0) {
1996 raise_exception(EXCP00_DIVZ);
1997 }
1998 q = (num / den);
1999 if (q > 0xffff)
2000 raise_exception(EXCP00_DIVZ);
2001 q &= 0xffff;
2002 r = (num % den) & 0xffff;
2003 EAX = (EAX & ~0xffff) | q;
2004 EDX = (EDX & ~0xffff) | r;
2005}
2006
2007void helper_idivw_AX(target_ulong t0)
2008{
2009 int num, den, q, r;
2010
2011 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2012 den = (int16_t)t0;
2013 if (den == 0) {
2014 raise_exception(EXCP00_DIVZ);
2015 }
2016 q = (num / den);
2017 if (q != (int16_t)q)
2018 raise_exception(EXCP00_DIVZ);
2019 q &= 0xffff;
2020 r = (num % den) & 0xffff;
2021 EAX = (EAX & ~0xffff) | q;
2022 EDX = (EDX & ~0xffff) | r;
2023}
2024
2025void helper_divl_EAX(target_ulong t0)
2026{
2027 unsigned int den, r;
2028 uint64_t num, q;
2029
2030 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2031 den = t0;
2032 if (den == 0) {
2033 raise_exception(EXCP00_DIVZ);
2034 }
2035 q = (num / den);
2036 r = (num % den);
2037 if (q > 0xffffffff)
2038 raise_exception(EXCP00_DIVZ);
2039 EAX = (uint32_t)q;
2040 EDX = (uint32_t)r;
2041}
2042
2043void helper_idivl_EAX(target_ulong t0)
2044{
2045 int den, r;
2046 int64_t num, q;
2047
2048 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2049 den = t0;
2050 if (den == 0) {
2051 raise_exception(EXCP00_DIVZ);
2052 }
2053 q = (num / den);
2054 r = (num % den);
2055 if (q != (int32_t)q)
2056 raise_exception(EXCP00_DIVZ);
2057 EAX = (uint32_t)q;
2058 EDX = (uint32_t)r;
2059}
2060
2061/* bcd */
2062
2063/* XXX: exception */
2064void helper_aam(int base)
2065{
2066 int al, ah;
2067 al = EAX & 0xff;
2068 ah = al / base;
2069 al = al % base;
2070 EAX = (EAX & ~0xffff) | al | (ah << 8);
2071 CC_DST = al;
2072}
2073
2074void helper_aad(int base)
2075{
2076 int al, ah;
2077 al = EAX & 0xff;
2078 ah = (EAX >> 8) & 0xff;
2079 al = ((ah * base) + al) & 0xff;
2080 EAX = (EAX & ~0xffff) | al;
2081 CC_DST = al;
2082}
2083
2084void helper_aaa(void)
2085{
2086 int icarry;
2087 int al, ah, af;
2088 int eflags;
2089
2090 eflags = cc_table[CC_OP].compute_all();
2091 af = eflags & CC_A;
2092 al = EAX & 0xff;
2093 ah = (EAX >> 8) & 0xff;
2094
2095 icarry = (al > 0xf9);
2096 if (((al & 0x0f) > 9 ) || af) {
2097 al = (al + 6) & 0x0f;
2098 ah = (ah + 1 + icarry) & 0xff;
2099 eflags |= CC_C | CC_A;
2100 } else {
2101 eflags &= ~(CC_C | CC_A);
2102 al &= 0x0f;
2103 }
2104 EAX = (EAX & ~0xffff) | al | (ah << 8);
2105 CC_SRC = eflags;
2106 FORCE_RET();
2107}
2108
2109void helper_aas(void)
2110{
2111 int icarry;
2112 int al, ah, af;
2113 int eflags;
2114
2115 eflags = cc_table[CC_OP].compute_all();
2116 af = eflags & CC_A;
2117 al = EAX & 0xff;
2118 ah = (EAX >> 8) & 0xff;
2119
2120 icarry = (al < 6);
2121 if (((al & 0x0f) > 9 ) || af) {
2122 al = (al - 6) & 0x0f;
2123 ah = (ah - 1 - icarry) & 0xff;
2124 eflags |= CC_C | CC_A;
2125 } else {
2126 eflags &= ~(CC_C | CC_A);
2127 al &= 0x0f;
2128 }
2129 EAX = (EAX & ~0xffff) | al | (ah << 8);
2130 CC_SRC = eflags;
2131 FORCE_RET();
2132}
2133
2134void helper_daa(void)
2135{
2136 int al, af, cf;
2137 int eflags;
2138
2139 eflags = cc_table[CC_OP].compute_all();
2140 cf = eflags & CC_C;
2141 af = eflags & CC_A;
2142 al = EAX & 0xff;
2143
2144 eflags = 0;
2145 if (((al & 0x0f) > 9 ) || af) {
2146 al = (al + 6) & 0xff;
2147 eflags |= CC_A;
2148 }
2149 if ((al > 0x9f) || cf) {
2150 al = (al + 0x60) & 0xff;
2151 eflags |= CC_C;
2152 }
2153 EAX = (EAX & ~0xff) | al;
2154 /* well, speed is not an issue here, so we compute the flags by hand */
2155 eflags |= (al == 0) << 6; /* zf */
2156 eflags |= parity_table[al]; /* pf */
2157 eflags |= (al & 0x80); /* sf */
2158 CC_SRC = eflags;
2159 FORCE_RET();
2160}
2161
2162void helper_das(void)
2163{
2164 int al, al1, af, cf;
2165 int eflags;
2166
2167 eflags = cc_table[CC_OP].compute_all();
2168 cf = eflags & CC_C;
2169 af = eflags & CC_A;
2170 al = EAX & 0xff;
2171
2172 eflags = 0;
2173 al1 = al;
2174 if (((al & 0x0f) > 9 ) || af) {
2175 eflags |= CC_A;
2176 if (al < 6 || cf)
2177 eflags |= CC_C;
2178 al = (al - 6) & 0xff;
2179 }
2180 if ((al1 > 0x99) || cf) {
2181 al = (al - 0x60) & 0xff;
2182 eflags |= CC_C;
2183 }
2184 EAX = (EAX & ~0xff) | al;
2185 /* well, speed is not an issue here, so we compute the flags by hand */
2186 eflags |= (al == 0) << 6; /* zf */
2187 eflags |= parity_table[al]; /* pf */
2188 eflags |= (al & 0x80); /* sf */
2189 CC_SRC = eflags;
2190 FORCE_RET();
2191}
2192
2193void helper_into(int next_eip_addend)
2194{
2195 int eflags;
2196 eflags = cc_table[CC_OP].compute_all();
2197 if (eflags & CC_O) {
2198 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2199 }
2200}
2201
2202void helper_cmpxchg8b(target_ulong a0)
2203{
2204 uint64_t d;
2205 int eflags;
2206
2207 eflags = cc_table[CC_OP].compute_all();
2208 d = ldq(a0);
2209 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2210 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2211 eflags |= CC_Z;
2212 } else {
2213 /* always do the store */
2214 stq(a0, d);
2215 EDX = (uint32_t)(d >> 32);
2216 EAX = (uint32_t)d;
2217 eflags &= ~CC_Z;
2218 }
2219 CC_SRC = eflags;
2220}
2221
2222#ifdef TARGET_X86_64
2223void helper_cmpxchg16b(target_ulong a0)
2224{
2225 uint64_t d0, d1;
2226 int eflags;
2227
2228 if ((a0 & 0xf) != 0)
2229 raise_exception(EXCP0D_GPF);
2230 eflags = cc_table[CC_OP].compute_all();
2231 d0 = ldq(a0);
2232 d1 = ldq(a0 + 8);
2233 if (d0 == EAX && d1 == EDX) {
2234 stq(a0, EBX);
2235 stq(a0 + 8, ECX);
2236 eflags |= CC_Z;
2237 } else {
2238 /* always do the store */
2239 stq(a0, d0);
2240 stq(a0 + 8, d1);
2241 EDX = d1;
2242 EAX = d0;
2243 eflags &= ~CC_Z;
2244 }
2245 CC_SRC = eflags;
2246}
2247#endif
2248
2249void helper_single_step(void)
2250{
2251 env->dr[6] |= 0x4000;
2252 raise_exception(EXCP01_SSTP);
2253}
2254
2255void helper_cpuid(void)
2256{
2257#ifndef VBOX
2258 uint32_t index;
2259
2260 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2261
2262 index = (uint32_t)EAX;
2263 /* test if maximum index reached */
2264 if (index & 0x80000000) {
2265 if (index > env->cpuid_xlevel)
2266 index = env->cpuid_level;
2267 } else {
2268 if (index > env->cpuid_level)
2269 index = env->cpuid_level;
2270 }
2271
2272 switch(index) {
2273 case 0:
2274 EAX = env->cpuid_level;
2275 EBX = env->cpuid_vendor1;
2276 EDX = env->cpuid_vendor2;
2277 ECX = env->cpuid_vendor3;
2278 break;
2279 case 1:
2280 EAX = env->cpuid_version;
2281 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2282 ECX = env->cpuid_ext_features;
2283 EDX = env->cpuid_features;
2284 break;
2285 case 2:
2286 /* cache info: needed for Pentium Pro compatibility */
2287 EAX = 1;
2288 EBX = 0;
2289 ECX = 0;
2290 EDX = 0x2c307d;
2291 break;
2292 case 4:
2293 /* cache info: needed for Core compatibility */
2294 switch (ECX) {
2295 case 0: /* L1 dcache info */
2296 EAX = 0x0000121;
2297 EBX = 0x1c0003f;
2298 ECX = 0x000003f;
2299 EDX = 0x0000001;
2300 break;
2301 case 1: /* L1 icache info */
2302 EAX = 0x0000122;
2303 EBX = 0x1c0003f;
2304 ECX = 0x000003f;
2305 EDX = 0x0000001;
2306 break;
2307 case 2: /* L2 cache info */
2308 EAX = 0x0000143;
2309 EBX = 0x3c0003f;
2310 ECX = 0x0000fff;
2311 EDX = 0x0000001;
2312 break;
2313 default: /* end of info */
2314 EAX = 0;
2315 EBX = 0;
2316 ECX = 0;
2317 EDX = 0;
2318 break;
2319 }
2320
2321 break;
2322 case 5:
2323 /* mwait info: needed for Core compatibility */
2324 EAX = 0; /* Smallest monitor-line size in bytes */
2325 EBX = 0; /* Largest monitor-line size in bytes */
2326 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2327 EDX = 0;
2328 break;
2329 case 6:
2330 /* Thermal and Power Leaf */
2331 EAX = 0;
2332 EBX = 0;
2333 ECX = 0;
2334 EDX = 0;
2335 break;
2336 case 9:
2337 /* Direct Cache Access Information Leaf */
2338 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2339 EBX = 0;
2340 ECX = 0;
2341 EDX = 0;
2342 break;
2343 case 0xA:
2344 /* Architectural Performance Monitoring Leaf */
2345 EAX = 0;
2346 EBX = 0;
2347 ECX = 0;
2348 EDX = 0;
2349 break;
2350 case 0x80000000:
2351 EAX = env->cpuid_xlevel;
2352 EBX = env->cpuid_vendor1;
2353 EDX = env->cpuid_vendor2;
2354 ECX = env->cpuid_vendor3;
2355 break;
2356 case 0x80000001:
2357 EAX = env->cpuid_features;
2358 EBX = 0;
2359 ECX = env->cpuid_ext3_features;
2360 EDX = env->cpuid_ext2_features;
2361 break;
2362 case 0x80000002:
2363 case 0x80000003:
2364 case 0x80000004:
2365 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2366 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2367 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2368 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2369 break;
2370 case 0x80000005:
2371 /* cache info (L1 cache) */
2372 EAX = 0x01ff01ff;
2373 EBX = 0x01ff01ff;
2374 ECX = 0x40020140;
2375 EDX = 0x40020140;
2376 break;
2377 case 0x80000006:
2378 /* cache info (L2 cache) */
2379 EAX = 0;
2380 EBX = 0x42004200;
2381 ECX = 0x02008140;
2382 EDX = 0;
2383 break;
2384 case 0x80000008:
2385 /* virtual & phys address size in low 2 bytes. */
2386/* XXX: This value must match the one used in the MMU code. */
2387 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2388 /* 64 bit processor */
2389#if defined(USE_KQEMU)
2390 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2391#else
2392/* XXX: The physical address space is limited to 42 bits in exec.c. */
2393 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2394#endif
2395 } else {
2396#if defined(USE_KQEMU)
2397 EAX = 0x00000020; /* 32 bits physical */
2398#else
2399 if (env->cpuid_features & CPUID_PSE36)
2400 EAX = 0x00000024; /* 36 bits physical */
2401 else
2402 EAX = 0x00000020; /* 32 bits physical */
2403#endif
2404 }
2405 EBX = 0;
2406 ECX = 0;
2407 EDX = 0;
2408 break;
2409 case 0x8000000A:
2410 EAX = 0x00000001;
2411 EBX = 0;
2412 ECX = 0;
2413 EDX = 0;
2414 break;
2415 default:
2416 /* reserved values: zero */
2417 EAX = 0;
2418 EBX = 0;
2419 ECX = 0;
2420 EDX = 0;
2421 break;
2422 }
2423#else /* VBOX */
2424 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2425#endif /* VBOX */
2426}
2427
2428void helper_enter_level(int level, int data32, target_ulong t1)
2429{
2430 target_ulong ssp;
2431 uint32_t esp_mask, esp, ebp;
2432
2433 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2434 ssp = env->segs[R_SS].base;
2435 ebp = EBP;
2436 esp = ESP;
2437 if (data32) {
2438 /* 32 bit */
2439 esp -= 4;
2440 while (--level) {
2441 esp -= 4;
2442 ebp -= 4;
2443 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2444 }
2445 esp -= 4;
2446 stl(ssp + (esp & esp_mask), t1);
2447 } else {
2448 /* 16 bit */
2449 esp -= 2;
2450 while (--level) {
2451 esp -= 2;
2452 ebp -= 2;
2453 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2454 }
2455 esp -= 2;
2456 stw(ssp + (esp & esp_mask), t1);
2457 }
2458}
2459
2460#ifdef TARGET_X86_64
2461void helper_enter64_level(int level, int data64, target_ulong t1)
2462{
2463 target_ulong esp, ebp;
2464 ebp = EBP;
2465 esp = ESP;
2466
2467 if (data64) {
2468 /* 64 bit */
2469 esp -= 8;
2470 while (--level) {
2471 esp -= 8;
2472 ebp -= 8;
2473 stq(esp, ldq(ebp));
2474 }
2475 esp -= 8;
2476 stq(esp, t1);
2477 } else {
2478 /* 16 bit */
2479 esp -= 2;
2480 while (--level) {
2481 esp -= 2;
2482 ebp -= 2;
2483 stw(esp, lduw(ebp));
2484 }
2485 esp -= 2;
2486 stw(esp, t1);
2487 }
2488}
2489#endif
2490
2491void helper_lldt(int selector)
2492{
2493 SegmentCache *dt;
2494 uint32_t e1, e2;
2495#ifndef VBOX
2496 int index, entry_limit;
2497#else
2498 unsigned int index, entry_limit;
2499#endif
2500 target_ulong ptr;
2501
2502#ifdef VBOX
2503 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2504 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2505#endif
2506
2507 selector &= 0xffff;
2508 if ((selector & 0xfffc) == 0) {
2509 /* XXX: NULL selector case: invalid LDT */
2510 env->ldt.base = 0;
2511 env->ldt.limit = 0;
2512 } else {
2513 if (selector & 0x4)
2514 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2515 dt = &env->gdt;
2516 index = selector & ~7;
2517#ifdef TARGET_X86_64
2518 if (env->hflags & HF_LMA_MASK)
2519 entry_limit = 15;
2520 else
2521#endif
2522 entry_limit = 7;
2523 if ((index + entry_limit) > dt->limit)
2524 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2525 ptr = dt->base + index;
2526 e1 = ldl_kernel(ptr);
2527 e2 = ldl_kernel(ptr + 4);
2528 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2529 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2530 if (!(e2 & DESC_P_MASK))
2531 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2532#ifdef TARGET_X86_64
2533 if (env->hflags & HF_LMA_MASK) {
2534 uint32_t e3;
2535 e3 = ldl_kernel(ptr + 8);
2536 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2537 env->ldt.base |= (target_ulong)e3 << 32;
2538 } else
2539#endif
2540 {
2541 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2542 }
2543 }
2544 env->ldt.selector = selector;
2545#ifdef VBOX
2546 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2547 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2548#endif
2549}
2550
2551void helper_ltr(int selector)
2552{
2553 SegmentCache *dt;
2554 uint32_t e1, e2;
2555#ifndef VBOX
2556 int index, type, entry_limit;
2557#else
2558 unsigned int index;
2559 int type, entry_limit;
2560#endif
2561 target_ulong ptr;
2562
2563#ifdef VBOX
2564 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2565 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2566 env->tr.flags, (RTSEL)(selector & 0xffff)));
2567#endif
2568 selector &= 0xffff;
2569 if ((selector & 0xfffc) == 0) {
2570 /* NULL selector case: invalid TR */
2571 env->tr.base = 0;
2572 env->tr.limit = 0;
2573 env->tr.flags = 0;
2574 } else {
2575 if (selector & 0x4)
2576 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2577 dt = &env->gdt;
2578 index = selector & ~7;
2579#ifdef TARGET_X86_64
2580 if (env->hflags & HF_LMA_MASK)
2581 entry_limit = 15;
2582 else
2583#endif
2584 entry_limit = 7;
2585 if ((index + entry_limit) > dt->limit)
2586 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2587 ptr = dt->base + index;
2588 e1 = ldl_kernel(ptr);
2589 e2 = ldl_kernel(ptr + 4);
2590 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2591 if ((e2 & DESC_S_MASK) ||
2592 (type != 1 && type != 9))
2593 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2594 if (!(e2 & DESC_P_MASK))
2595 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2596#ifdef TARGET_X86_64
2597 if (env->hflags & HF_LMA_MASK) {
2598 uint32_t e3, e4;
2599 e3 = ldl_kernel(ptr + 8);
2600 e4 = ldl_kernel(ptr + 12);
2601 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2602 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2603 load_seg_cache_raw_dt(&env->tr, e1, e2);
2604 env->tr.base |= (target_ulong)e3 << 32;
2605 } else
2606#endif
2607 {
2608 load_seg_cache_raw_dt(&env->tr, e1, e2);
2609 }
2610 e2 |= DESC_TSS_BUSY_MASK;
2611 stl_kernel(ptr + 4, e2);
2612 }
2613 env->tr.selector = selector;
2614#ifdef VBOX
2615 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2616 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2617 env->tr.flags, (RTSEL)(selector & 0xffff)));
2618#endif
2619}
2620
2621/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2622void helper_load_seg(int seg_reg, int selector)
2623{
2624 uint32_t e1, e2;
2625 int cpl, dpl, rpl;
2626 SegmentCache *dt;
2627#ifndef VBOX
2628 int index;
2629#else
2630 unsigned int index;
2631#endif
2632 target_ulong ptr;
2633
2634 selector &= 0xffff;
2635 cpl = env->hflags & HF_CPL_MASK;
2636
2637#ifdef VBOX
2638 /* Trying to load a selector with CPL=1? */
2639 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2640 {
2641 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2642 selector = selector & 0xfffc;
2643 }
2644#endif
2645 if ((selector & 0xfffc) == 0) {
2646 /* null selector case */
2647 if (seg_reg == R_SS
2648#ifdef TARGET_X86_64
2649 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2650#endif
2651 )
2652 raise_exception_err(EXCP0D_GPF, 0);
2653 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2654 } else {
2655
2656 if (selector & 0x4)
2657 dt = &env->ldt;
2658 else
2659 dt = &env->gdt;
2660 index = selector & ~7;
2661 if ((index + 7) > dt->limit)
2662 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2663 ptr = dt->base + index;
2664 e1 = ldl_kernel(ptr);
2665 e2 = ldl_kernel(ptr + 4);
2666
2667 if (!(e2 & DESC_S_MASK))
2668 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2669 rpl = selector & 3;
2670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2671 if (seg_reg == R_SS) {
2672 /* must be writable segment */
2673 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2674 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2675 if (rpl != cpl || dpl != cpl)
2676 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2677 } else {
2678 /* must be readable segment */
2679 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2680 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2681
2682 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2683 /* if not conforming code, test rights */
2684 if (dpl < cpl || dpl < rpl)
2685 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2686 }
2687 }
2688
2689 if (!(e2 & DESC_P_MASK)) {
2690 if (seg_reg == R_SS)
2691 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2692 else
2693 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2694 }
2695
2696 /* set the access bit if not already set */
2697 if (!(e2 & DESC_A_MASK)) {
2698 e2 |= DESC_A_MASK;
2699 stl_kernel(ptr + 4, e2);
2700 }
2701
2702 cpu_x86_load_seg_cache(env, seg_reg, selector,
2703 get_seg_base(e1, e2),
2704 get_seg_limit(e1, e2),
2705 e2);
2706#if 0
2707 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2708 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2709#endif
2710 }
2711}
2712
2713/* protected mode jump */
2714void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2715 int next_eip_addend)
2716{
2717 int gate_cs, type;
2718 uint32_t e1, e2, cpl, dpl, rpl, limit;
2719 target_ulong next_eip;
2720
2721#ifdef VBOX
2722 e1 = e2 = 0;
2723#endif
2724 if ((new_cs & 0xfffc) == 0)
2725 raise_exception_err(EXCP0D_GPF, 0);
2726 if (load_segment(&e1, &e2, new_cs) != 0)
2727 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2728 cpl = env->hflags & HF_CPL_MASK;
2729 if (e2 & DESC_S_MASK) {
2730 if (!(e2 & DESC_CS_MASK))
2731 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2732 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2733 if (e2 & DESC_C_MASK) {
2734 /* conforming code segment */
2735 if (dpl > cpl)
2736 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2737 } else {
2738 /* non conforming code segment */
2739 rpl = new_cs & 3;
2740 if (rpl > cpl)
2741 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2742 if (dpl != cpl)
2743 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2744 }
2745 if (!(e2 & DESC_P_MASK))
2746 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2747 limit = get_seg_limit(e1, e2);
2748 if (new_eip > limit &&
2749 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2750 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2751 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2752 get_seg_base(e1, e2), limit, e2);
2753 EIP = new_eip;
2754 } else {
2755 /* jump to call or task gate */
2756 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2757 rpl = new_cs & 3;
2758 cpl = env->hflags & HF_CPL_MASK;
2759 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2760 switch(type) {
2761 case 1: /* 286 TSS */
2762 case 9: /* 386 TSS */
2763 case 5: /* task gate */
2764 if (dpl < cpl || dpl < rpl)
2765 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2766 next_eip = env->eip + next_eip_addend;
2767 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2768 CC_OP = CC_OP_EFLAGS;
2769 break;
2770 case 4: /* 286 call gate */
2771 case 12: /* 386 call gate */
2772 if ((dpl < cpl) || (dpl < rpl))
2773 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2774 if (!(e2 & DESC_P_MASK))
2775 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2776 gate_cs = e1 >> 16;
2777 new_eip = (e1 & 0xffff);
2778 if (type == 12)
2779 new_eip |= (e2 & 0xffff0000);
2780 if (load_segment(&e1, &e2, gate_cs) != 0)
2781 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2782 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2783 /* must be code segment */
2784 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2785 (DESC_S_MASK | DESC_CS_MASK)))
2786 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2787 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2788 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2789 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2790 if (!(e2 & DESC_P_MASK))
2791#ifdef VBOX /* See page 3-514 of 253666.pdf */
2792 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2793#else
2794 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2795#endif
2796 limit = get_seg_limit(e1, e2);
2797 if (new_eip > limit)
2798 raise_exception_err(EXCP0D_GPF, 0);
2799 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2800 get_seg_base(e1, e2), limit, e2);
2801 EIP = new_eip;
2802 break;
2803 default:
2804 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2805 break;
2806 }
2807 }
2808}
2809
2810/* real mode call */
2811void helper_lcall_real(int new_cs, target_ulong new_eip1,
2812 int shift, int next_eip)
2813{
2814 int new_eip;
2815 uint32_t esp, esp_mask;
2816 target_ulong ssp;
2817
2818 new_eip = new_eip1;
2819 esp = ESP;
2820 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2821 ssp = env->segs[R_SS].base;
2822 if (shift) {
2823 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2824 PUSHL(ssp, esp, esp_mask, next_eip);
2825 } else {
2826 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2827 PUSHW(ssp, esp, esp_mask, next_eip);
2828 }
2829
2830 SET_ESP(esp, esp_mask);
2831 env->eip = new_eip;
2832 env->segs[R_CS].selector = new_cs;
2833 env->segs[R_CS].base = (new_cs << 4);
2834}
2835
2836/* protected mode call */
2837void helper_lcall_protected(int new_cs, target_ulong new_eip,
2838 int shift, int next_eip_addend)
2839{
2840 int new_stack, i;
2841 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2842 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2843 uint32_t val, limit, old_sp_mask;
2844 target_ulong ssp, old_ssp, next_eip;
2845
2846#ifdef VBOX
2847 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2848#endif
2849 next_eip = env->eip + next_eip_addend;
2850#ifdef DEBUG_PCALL
2851 if (loglevel & CPU_LOG_PCALL) {
2852 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2853 new_cs, (uint32_t)new_eip, shift);
2854 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2855 }
2856#endif
2857 if ((new_cs & 0xfffc) == 0)
2858 raise_exception_err(EXCP0D_GPF, 0);
2859 if (load_segment(&e1, &e2, new_cs) != 0)
2860 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2861 cpl = env->hflags & HF_CPL_MASK;
2862#ifdef DEBUG_PCALL
2863 if (loglevel & CPU_LOG_PCALL) {
2864 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2865 }
2866#endif
2867 if (e2 & DESC_S_MASK) {
2868 if (!(e2 & DESC_CS_MASK))
2869 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2870 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2871 if (e2 & DESC_C_MASK) {
2872 /* conforming code segment */
2873 if (dpl > cpl)
2874 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2875 } else {
2876 /* non conforming code segment */
2877 rpl = new_cs & 3;
2878 if (rpl > cpl)
2879 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2880 if (dpl != cpl)
2881 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2882 }
2883 if (!(e2 & DESC_P_MASK))
2884 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2885
2886#ifdef TARGET_X86_64
2887 /* XXX: check 16/32 bit cases in long mode */
2888 if (shift == 2) {
2889 target_ulong rsp;
2890 /* 64 bit case */
2891 rsp = ESP;
2892 PUSHQ(rsp, env->segs[R_CS].selector);
2893 PUSHQ(rsp, next_eip);
2894 /* from this point, not restartable */
2895 ESP = rsp;
2896 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2897 get_seg_base(e1, e2),
2898 get_seg_limit(e1, e2), e2);
2899 EIP = new_eip;
2900 } else
2901#endif
2902 {
2903 sp = ESP;
2904 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2905 ssp = env->segs[R_SS].base;
2906 if (shift) {
2907 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2908 PUSHL(ssp, sp, sp_mask, next_eip);
2909 } else {
2910 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2911 PUSHW(ssp, sp, sp_mask, next_eip);
2912 }
2913
2914 limit = get_seg_limit(e1, e2);
2915 if (new_eip > limit)
2916 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2917 /* from this point, not restartable */
2918 SET_ESP(sp, sp_mask);
2919 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2920 get_seg_base(e1, e2), limit, e2);
2921 EIP = new_eip;
2922 }
2923 } else {
2924 /* check gate type */
2925 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2926 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2927 rpl = new_cs & 3;
2928 switch(type) {
2929 case 1: /* available 286 TSS */
2930 case 9: /* available 386 TSS */
2931 case 5: /* task gate */
2932 if (dpl < cpl || dpl < rpl)
2933 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2934 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2935 CC_OP = CC_OP_EFLAGS;
2936 return;
2937 case 4: /* 286 call gate */
2938 case 12: /* 386 call gate */
2939 break;
2940 default:
2941 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2942 break;
2943 }
2944 shift = type >> 3;
2945
2946 if (dpl < cpl || dpl < rpl)
2947 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2948 /* check valid bit */
2949 if (!(e2 & DESC_P_MASK))
2950 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2951 selector = e1 >> 16;
2952 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2953 param_count = e2 & 0x1f;
2954 if ((selector & 0xfffc) == 0)
2955 raise_exception_err(EXCP0D_GPF, 0);
2956
2957 if (load_segment(&e1, &e2, selector) != 0)
2958 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2959 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2960 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2961 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2962 if (dpl > cpl)
2963 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2964 if (!(e2 & DESC_P_MASK))
2965 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2966
2967 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2968 /* to inner privilege */
2969 get_ss_esp_from_tss(&ss, &sp, dpl);
2970#ifdef DEBUG_PCALL
2971 if (loglevel & CPU_LOG_PCALL)
2972 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2973 ss, sp, param_count, ESP);
2974#endif
2975 if ((ss & 0xfffc) == 0)
2976 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2977 if ((ss & 3) != dpl)
2978 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2979 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2980 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2981 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2982 if (ss_dpl != dpl)
2983 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2984 if (!(ss_e2 & DESC_S_MASK) ||
2985 (ss_e2 & DESC_CS_MASK) ||
2986 !(ss_e2 & DESC_W_MASK))
2987 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2988 if (!(ss_e2 & DESC_P_MASK))
2989#ifdef VBOX /* See page 3-99 of 253666.pdf */
2990 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2991#else
2992 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2993#endif
2994
2995 // push_size = ((param_count * 2) + 8) << shift;
2996
2997 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2998 old_ssp = env->segs[R_SS].base;
2999
3000 sp_mask = get_sp_mask(ss_e2);
3001 ssp = get_seg_base(ss_e1, ss_e2);
3002 if (shift) {
3003 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3004 PUSHL(ssp, sp, sp_mask, ESP);
3005 for(i = param_count - 1; i >= 0; i--) {
3006 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3007 PUSHL(ssp, sp, sp_mask, val);
3008 }
3009 } else {
3010 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3011 PUSHW(ssp, sp, sp_mask, ESP);
3012 for(i = param_count - 1; i >= 0; i--) {
3013 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3014 PUSHW(ssp, sp, sp_mask, val);
3015 }
3016 }
3017 new_stack = 1;
3018 } else {
3019 /* to same privilege */
3020 sp = ESP;
3021 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3022 ssp = env->segs[R_SS].base;
3023 // push_size = (4 << shift);
3024 new_stack = 0;
3025 }
3026
3027 if (shift) {
3028 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3029 PUSHL(ssp, sp, sp_mask, next_eip);
3030 } else {
3031 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3032 PUSHW(ssp, sp, sp_mask, next_eip);
3033 }
3034
3035 /* from this point, not restartable */
3036
3037 if (new_stack) {
3038 ss = (ss & ~3) | dpl;
3039 cpu_x86_load_seg_cache(env, R_SS, ss,
3040 ssp,
3041 get_seg_limit(ss_e1, ss_e2),
3042 ss_e2);
3043 }
3044
3045 selector = (selector & ~3) | dpl;
3046 cpu_x86_load_seg_cache(env, R_CS, selector,
3047 get_seg_base(e1, e2),
3048 get_seg_limit(e1, e2),
3049 e2);
3050 cpu_x86_set_cpl(env, dpl);
3051 SET_ESP(sp, sp_mask);
3052 EIP = offset;
3053 }
3054#ifdef USE_KQEMU
3055 if (kqemu_is_ok(env)) {
3056 env->exception_index = -1;
3057 cpu_loop_exit();
3058 }
3059#endif
3060}
3061
3062/* real and vm86 mode iret */
3063void helper_iret_real(int shift)
3064{
3065 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3066 target_ulong ssp;
3067 int eflags_mask;
3068#ifdef VBOX
3069 bool fVME = false;
3070
3071 remR3TrapClear(env->pVM);
3072#endif /* VBOX */
3073
3074 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3075 sp = ESP;
3076 ssp = env->segs[R_SS].base;
3077 if (shift == 1) {
3078 /* 32 bits */
3079 POPL(ssp, sp, sp_mask, new_eip);
3080 POPL(ssp, sp, sp_mask, new_cs);
3081 new_cs &= 0xffff;
3082 POPL(ssp, sp, sp_mask, new_eflags);
3083 } else {
3084 /* 16 bits */
3085 POPW(ssp, sp, sp_mask, new_eip);
3086 POPW(ssp, sp, sp_mask, new_cs);
3087 POPW(ssp, sp, sp_mask, new_eflags);
3088 }
3089#ifdef VBOX
3090 if ( (env->eflags & VM_MASK)
3091 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3092 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3093 {
3094 fVME = true;
3095 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3096 /* if TF will be set -> #GP */
3097 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3098 || (new_eflags & TF_MASK))
3099 raise_exception(EXCP0D_GPF);
3100 }
3101#endif /* VBOX */
3102 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3103 env->segs[R_CS].selector = new_cs;
3104 env->segs[R_CS].base = (new_cs << 4);
3105 env->eip = new_eip;
3106#ifdef VBOX
3107 if (fVME)
3108 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3109 else
3110#endif
3111 if (env->eflags & VM_MASK)
3112 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3113 else
3114 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3115 if (shift == 0)
3116 eflags_mask &= 0xffff;
3117 load_eflags(new_eflags, eflags_mask);
3118 env->hflags2 &= ~HF2_NMI_MASK;
3119#ifdef VBOX
3120 if (fVME)
3121 {
3122 if (new_eflags & IF_MASK)
3123 env->eflags |= VIF_MASK;
3124 else
3125 env->eflags &= ~VIF_MASK;
3126 }
3127#endif /* VBOX */
3128}
3129
3130#ifndef VBOX
3131static inline void validate_seg(int seg_reg, int cpl)
3132#else /* VBOX */
3133DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3134#endif /* VBOX */
3135{
3136 int dpl;
3137 uint32_t e2;
3138
3139 /* XXX: on x86_64, we do not want to nullify FS and GS because
3140 they may still contain a valid base. I would be interested to
3141 know how a real x86_64 CPU behaves */
3142 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3143 (env->segs[seg_reg].selector & 0xfffc) == 0)
3144 return;
3145
3146 e2 = env->segs[seg_reg].flags;
3147 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3148 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3149 /* data or non conforming code segment */
3150 if (dpl < cpl) {
3151 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3152 }
3153 }
3154}
3155
3156/* protected mode iret */
3157#ifndef VBOX
3158static inline void helper_ret_protected(int shift, int is_iret, int addend)
3159#else /* VBOX */
3160DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3161#endif /* VBOX */
3162{
3163 uint32_t new_cs, new_eflags, new_ss;
3164 uint32_t new_es, new_ds, new_fs, new_gs;
3165 uint32_t e1, e2, ss_e1, ss_e2;
3166 int cpl, dpl, rpl, eflags_mask, iopl;
3167 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3168
3169#ifdef VBOX
3170 ss_e1 = ss_e2 = e1 = e2 = 0;
3171#endif
3172
3173#ifdef TARGET_X86_64
3174 if (shift == 2)
3175 sp_mask = -1;
3176 else
3177#endif
3178 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3179 sp = ESP;
3180 ssp = env->segs[R_SS].base;
3181 new_eflags = 0; /* avoid warning */
3182#ifdef TARGET_X86_64
3183 if (shift == 2) {
3184 POPQ(sp, new_eip);
3185 POPQ(sp, new_cs);
3186 new_cs &= 0xffff;
3187 if (is_iret) {
3188 POPQ(sp, new_eflags);
3189 }
3190 } else
3191#endif
3192 if (shift == 1) {
3193 /* 32 bits */
3194 POPL(ssp, sp, sp_mask, new_eip);
3195 POPL(ssp, sp, sp_mask, new_cs);
3196 new_cs &= 0xffff;
3197 if (is_iret) {
3198 POPL(ssp, sp, sp_mask, new_eflags);
3199#if defined(VBOX) && defined(DEBUG)
3200 printf("iret: new CS %04X\n", new_cs);
3201 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3202 printf("iret: new EFLAGS %08X\n", new_eflags);
3203 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3204#endif
3205 if (new_eflags & VM_MASK)
3206 goto return_to_vm86;
3207 }
3208#ifdef VBOX
3209 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3210 {
3211#ifdef DEBUG
3212 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3213#endif
3214 new_cs = new_cs & 0xfffc;
3215 }
3216#endif
3217 } else {
3218 /* 16 bits */
3219 POPW(ssp, sp, sp_mask, new_eip);
3220 POPW(ssp, sp, sp_mask, new_cs);
3221 if (is_iret)
3222 POPW(ssp, sp, sp_mask, new_eflags);
3223 }
3224#ifdef DEBUG_PCALL
3225 if (loglevel & CPU_LOG_PCALL) {
3226 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3227 new_cs, new_eip, shift, addend);
3228 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3229 }
3230#endif
3231 if ((new_cs & 0xfffc) == 0)
3232 {
3233#if defined(VBOX) && defined(DEBUG)
3234 printf("new_cs & 0xfffc) == 0\n");
3235#endif
3236 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3237 }
3238 if (load_segment(&e1, &e2, new_cs) != 0)
3239 {
3240#if defined(VBOX) && defined(DEBUG)
3241 printf("load_segment failed\n");
3242#endif
3243 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3244 }
3245 if (!(e2 & DESC_S_MASK) ||
3246 !(e2 & DESC_CS_MASK))
3247 {
3248#if defined(VBOX) && defined(DEBUG)
3249 printf("e2 mask %08x\n", e2);
3250#endif
3251 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3252 }
3253 cpl = env->hflags & HF_CPL_MASK;
3254 rpl = new_cs & 3;
3255 if (rpl < cpl)
3256 {
3257#if defined(VBOX) && defined(DEBUG)
3258 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3259#endif
3260 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3261 }
3262 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3263 if (e2 & DESC_C_MASK) {
3264 if (dpl > rpl)
3265 {
3266#if defined(VBOX) && defined(DEBUG)
3267 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3268#endif
3269 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3270 }
3271 } else {
3272 if (dpl != rpl)
3273 {
3274#if defined(VBOX) && defined(DEBUG)
3275 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3276#endif
3277 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3278 }
3279 }
3280 if (!(e2 & DESC_P_MASK))
3281 {
3282#if defined(VBOX) && defined(DEBUG)
3283 printf("DESC_P_MASK e2=%08x\n", e2);
3284#endif
3285 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3286 }
3287
3288 sp += addend;
3289 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3290 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3291 /* return to same privilege level */
3292 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3293 get_seg_base(e1, e2),
3294 get_seg_limit(e1, e2),
3295 e2);
3296 } else {
3297 /* return to different privilege level */
3298#ifdef TARGET_X86_64
3299 if (shift == 2) {
3300 POPQ(sp, new_esp);
3301 POPQ(sp, new_ss);
3302 new_ss &= 0xffff;
3303 } else
3304#endif
3305 if (shift == 1) {
3306 /* 32 bits */
3307 POPL(ssp, sp, sp_mask, new_esp);
3308 POPL(ssp, sp, sp_mask, new_ss);
3309 new_ss &= 0xffff;
3310 } else {
3311 /* 16 bits */
3312 POPW(ssp, sp, sp_mask, new_esp);
3313 POPW(ssp, sp, sp_mask, new_ss);
3314 }
3315#ifdef DEBUG_PCALL
3316 if (loglevel & CPU_LOG_PCALL) {
3317 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3318 new_ss, new_esp);
3319 }
3320#endif
3321 if ((new_ss & 0xfffc) == 0) {
3322#ifdef TARGET_X86_64
3323 /* NULL ss is allowed in long mode if cpl != 3*/
3324 /* XXX: test CS64 ? */
3325 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3326 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3327 0, 0xffffffff,
3328 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3329 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3330 DESC_W_MASK | DESC_A_MASK);
3331 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3332 } else
3333#endif
3334 {
3335 raise_exception_err(EXCP0D_GPF, 0);
3336 }
3337 } else {
3338 if ((new_ss & 3) != rpl)
3339 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3340 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3341 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3342 if (!(ss_e2 & DESC_S_MASK) ||
3343 (ss_e2 & DESC_CS_MASK) ||
3344 !(ss_e2 & DESC_W_MASK))
3345 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3346 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3347 if (dpl != rpl)
3348 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3349 if (!(ss_e2 & DESC_P_MASK))
3350 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3351 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3352 get_seg_base(ss_e1, ss_e2),
3353 get_seg_limit(ss_e1, ss_e2),
3354 ss_e2);
3355 }
3356
3357 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3358 get_seg_base(e1, e2),
3359 get_seg_limit(e1, e2),
3360 e2);
3361 cpu_x86_set_cpl(env, rpl);
3362 sp = new_esp;
3363#ifdef TARGET_X86_64
3364 if (env->hflags & HF_CS64_MASK)
3365 sp_mask = -1;
3366 else
3367#endif
3368 sp_mask = get_sp_mask(ss_e2);
3369
3370 /* validate data segments */
3371 validate_seg(R_ES, rpl);
3372 validate_seg(R_DS, rpl);
3373 validate_seg(R_FS, rpl);
3374 validate_seg(R_GS, rpl);
3375
3376 sp += addend;
3377 }
3378 SET_ESP(sp, sp_mask);
3379 env->eip = new_eip;
3380 if (is_iret) {
3381 /* NOTE: 'cpl' is the _old_ CPL */
3382 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3383 if (cpl == 0)
3384#ifdef VBOX
3385 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3386#else
3387 eflags_mask |= IOPL_MASK;
3388#endif
3389 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3390 if (cpl <= iopl)
3391 eflags_mask |= IF_MASK;
3392 if (shift == 0)
3393 eflags_mask &= 0xffff;
3394 load_eflags(new_eflags, eflags_mask);
3395 }
3396 return;
3397
3398 return_to_vm86:
3399 POPL(ssp, sp, sp_mask, new_esp);
3400 POPL(ssp, sp, sp_mask, new_ss);
3401 POPL(ssp, sp, sp_mask, new_es);
3402 POPL(ssp, sp, sp_mask, new_ds);
3403 POPL(ssp, sp, sp_mask, new_fs);
3404 POPL(ssp, sp, sp_mask, new_gs);
3405
3406 /* modify processor state */
3407 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3408 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3409 load_seg_vm(R_CS, new_cs & 0xffff);
3410 cpu_x86_set_cpl(env, 3);
3411 load_seg_vm(R_SS, new_ss & 0xffff);
3412 load_seg_vm(R_ES, new_es & 0xffff);
3413 load_seg_vm(R_DS, new_ds & 0xffff);
3414 load_seg_vm(R_FS, new_fs & 0xffff);
3415 load_seg_vm(R_GS, new_gs & 0xffff);
3416
3417 env->eip = new_eip & 0xffff;
3418 ESP = new_esp;
3419}
3420
3421void helper_iret_protected(int shift, int next_eip)
3422{
3423 int tss_selector, type;
3424 uint32_t e1, e2;
3425
3426#ifdef VBOX
3427 e1 = e2 = 0;
3428 remR3TrapClear(env->pVM);
3429#endif
3430
3431 /* specific case for TSS */
3432 if (env->eflags & NT_MASK) {
3433#ifdef TARGET_X86_64
3434 if (env->hflags & HF_LMA_MASK)
3435 raise_exception_err(EXCP0D_GPF, 0);
3436#endif
3437 tss_selector = lduw_kernel(env->tr.base + 0);
3438 if (tss_selector & 4)
3439 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3440 if (load_segment(&e1, &e2, tss_selector) != 0)
3441 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3442 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3443 /* NOTE: we check both segment and busy TSS */
3444 if (type != 3)
3445 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3446 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3447 } else {
3448 helper_ret_protected(shift, 1, 0);
3449 }
3450 env->hflags2 &= ~HF2_NMI_MASK;
3451#ifdef USE_KQEMU
3452 if (kqemu_is_ok(env)) {
3453 CC_OP = CC_OP_EFLAGS;
3454 env->exception_index = -1;
3455 cpu_loop_exit();
3456 }
3457#endif
3458}
3459
3460void helper_lret_protected(int shift, int addend)
3461{
3462 helper_ret_protected(shift, 0, addend);
3463#ifdef USE_KQEMU
3464 if (kqemu_is_ok(env)) {
3465 env->exception_index = -1;
3466 cpu_loop_exit();
3467 }
3468#endif
3469}
3470
3471void helper_sysenter(void)
3472{
3473 if (env->sysenter_cs == 0) {
3474 raise_exception_err(EXCP0D_GPF, 0);
3475 }
3476 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3477 cpu_x86_set_cpl(env, 0);
3478
3479#ifdef TARGET_X86_64
3480 if (env->hflags & HF_LMA_MASK) {
3481 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3482 0, 0xffffffff,
3483 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3484 DESC_S_MASK |
3485 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3486 } else
3487#endif
3488 {
3489 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3490 0, 0xffffffff,
3491 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3492 DESC_S_MASK |
3493 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3494 }
3495 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3496 0, 0xffffffff,
3497 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3498 DESC_S_MASK |
3499 DESC_W_MASK | DESC_A_MASK);
3500 ESP = env->sysenter_esp;
3501 EIP = env->sysenter_eip;
3502}
3503
3504void helper_sysexit(int dflag)
3505{
3506 int cpl;
3507
3508 cpl = env->hflags & HF_CPL_MASK;
3509 if (env->sysenter_cs == 0 || cpl != 0) {
3510 raise_exception_err(EXCP0D_GPF, 0);
3511 }
3512 cpu_x86_set_cpl(env, 3);
3513#ifdef TARGET_X86_64
3514 if (dflag == 2) {
3515 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3516 0, 0xffffffff,
3517 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3518 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3519 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3520 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3521 0, 0xffffffff,
3522 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3523 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3524 DESC_W_MASK | DESC_A_MASK);
3525 } else
3526#endif
3527 {
3528 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3529 0, 0xffffffff,
3530 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3531 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3532 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3533 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3534 0, 0xffffffff,
3535 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3536 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3537 DESC_W_MASK | DESC_A_MASK);
3538 }
3539 ESP = ECX;
3540 EIP = EDX;
3541#ifdef USE_KQEMU
3542 if (kqemu_is_ok(env)) {
3543 env->exception_index = -1;
3544 cpu_loop_exit();
3545 }
3546#endif
3547}
3548
3549#if defined(CONFIG_USER_ONLY)
3550target_ulong helper_read_crN(int reg)
3551{
3552 return 0;
3553}
3554
3555void helper_write_crN(int reg, target_ulong t0)
3556{
3557}
3558#else
3559target_ulong helper_read_crN(int reg)
3560{
3561 target_ulong val;
3562
3563 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3564 switch(reg) {
3565 default:
3566 val = env->cr[reg];
3567 break;
3568 case 8:
3569 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3570 val = cpu_get_apic_tpr(env);
3571 } else {
3572 val = env->v_tpr;
3573 }
3574 break;
3575 }
3576 return val;
3577}
3578
3579void helper_write_crN(int reg, target_ulong t0)
3580{
3581 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3582 switch(reg) {
3583 case 0:
3584 cpu_x86_update_cr0(env, t0);
3585 break;
3586 case 3:
3587 cpu_x86_update_cr3(env, t0);
3588 break;
3589 case 4:
3590 cpu_x86_update_cr4(env, t0);
3591 break;
3592 case 8:
3593 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3594 cpu_set_apic_tpr(env, t0);
3595 }
3596 env->v_tpr = t0 & 0x0f;
3597 break;
3598 default:
3599 env->cr[reg] = t0;
3600 break;
3601 }
3602}
3603#endif
3604
3605void helper_lmsw(target_ulong t0)
3606{
3607 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3608 if already set to one. */
3609 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3610 helper_write_crN(0, t0);
3611}
3612
3613void helper_clts(void)
3614{
3615 env->cr[0] &= ~CR0_TS_MASK;
3616 env->hflags &= ~HF_TS_MASK;
3617}
3618
3619/* XXX: do more */
3620void helper_movl_drN_T0(int reg, target_ulong t0)
3621{
3622 env->dr[reg] = t0;
3623}
3624
3625void helper_invlpg(target_ulong addr)
3626{
3627 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3628 tlb_flush_page(env, addr);
3629}
3630
3631void helper_rdtsc(void)
3632{
3633 uint64_t val;
3634
3635 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3636 raise_exception(EXCP0D_GPF);
3637 }
3638 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3639
3640 val = cpu_get_tsc(env) + env->tsc_offset;
3641 EAX = (uint32_t)(val);
3642 EDX = (uint32_t)(val >> 32);
3643}
3644
3645#ifdef VBOX
3646void helper_rdtscp(void)
3647{
3648 uint64_t val;
3649 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3650 raise_exception(EXCP0D_GPF);
3651 }
3652
3653 val = cpu_get_tsc(env);
3654 EAX = (uint32_t)(val);
3655 EDX = (uint32_t)(val >> 32);
3656 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3657}
3658#endif
3659
3660void helper_rdpmc(void)
3661{
3662 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3663 raise_exception(EXCP0D_GPF);
3664 }
3665 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3666
3667 /* currently unimplemented */
3668 raise_exception_err(EXCP06_ILLOP, 0);
3669}
3670
3671#if defined(CONFIG_USER_ONLY)
3672void helper_wrmsr(void)
3673{
3674}
3675
3676void helper_rdmsr(void)
3677{
3678}
3679#else
3680void helper_wrmsr(void)
3681{
3682 uint64_t val;
3683
3684 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3685
3686 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3687
3688 switch((uint32_t)ECX) {
3689 case MSR_IA32_SYSENTER_CS:
3690 env->sysenter_cs = val & 0xffff;
3691 break;
3692 case MSR_IA32_SYSENTER_ESP:
3693 env->sysenter_esp = val;
3694 break;
3695 case MSR_IA32_SYSENTER_EIP:
3696 env->sysenter_eip = val;
3697 break;
3698 case MSR_IA32_APICBASE:
3699 cpu_set_apic_base(env, val);
3700 break;
3701 case MSR_EFER:
3702 {
3703 uint64_t update_mask;
3704 update_mask = 0;
3705 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3706 update_mask |= MSR_EFER_SCE;
3707 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3708 update_mask |= MSR_EFER_LME;
3709 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3710 update_mask |= MSR_EFER_FFXSR;
3711 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3712 update_mask |= MSR_EFER_NXE;
3713 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3714 update_mask |= MSR_EFER_SVME;
3715 cpu_load_efer(env, (env->efer & ~update_mask) |
3716 (val & update_mask));
3717 }
3718 break;
3719 case MSR_STAR:
3720 env->star = val;
3721 break;
3722 case MSR_PAT:
3723 env->pat = val;
3724 break;
3725 case MSR_VM_HSAVE_PA:
3726 env->vm_hsave = val;
3727 break;
3728#ifdef TARGET_X86_64
3729 case MSR_LSTAR:
3730 env->lstar = val;
3731 break;
3732 case MSR_CSTAR:
3733 env->cstar = val;
3734 break;
3735 case MSR_FMASK:
3736 env->fmask = val;
3737 break;
3738 case MSR_FSBASE:
3739 env->segs[R_FS].base = val;
3740 break;
3741 case MSR_GSBASE:
3742 env->segs[R_GS].base = val;
3743 break;
3744 case MSR_KERNELGSBASE:
3745 env->kernelgsbase = val;
3746 break;
3747#endif
3748 default:
3749#ifndef VBOX
3750 /* XXX: exception ? */
3751 break;
3752#else /* VBOX */
3753 {
3754 uint32_t ecx = (uint32_t)ECX;
3755 /* In X2APIC specification this range is reserved for APIC control. */
3756 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3757 cpu_apic_wrmsr(env, ecx, val);
3758 /** @todo else exception? */
3759 break;
3760 }
3761 case MSR_K8_TSC_AUX:
3762 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3763 break;
3764#endif /* VBOX */
3765 }
3766}
3767
3768void helper_rdmsr(void)
3769{
3770 uint64_t val;
3771
3772 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3773
3774 switch((uint32_t)ECX) {
3775 case MSR_IA32_SYSENTER_CS:
3776 val = env->sysenter_cs;
3777 break;
3778 case MSR_IA32_SYSENTER_ESP:
3779 val = env->sysenter_esp;
3780 break;
3781 case MSR_IA32_SYSENTER_EIP:
3782 val = env->sysenter_eip;
3783 break;
3784 case MSR_IA32_APICBASE:
3785 val = cpu_get_apic_base(env);
3786 break;
3787 case MSR_EFER:
3788 val = env->efer;
3789 break;
3790 case MSR_STAR:
3791 val = env->star;
3792 break;
3793 case MSR_PAT:
3794 val = env->pat;
3795 break;
3796 case MSR_VM_HSAVE_PA:
3797 val = env->vm_hsave;
3798 break;
3799 case MSR_IA32_PERF_STATUS:
3800 /* tsc_increment_by_tick */
3801 val = 1000ULL;
3802 /* CPU multiplier */
3803 val |= (((uint64_t)4ULL) << 40);
3804 break;
3805#ifdef TARGET_X86_64
3806 case MSR_LSTAR:
3807 val = env->lstar;
3808 break;
3809 case MSR_CSTAR:
3810 val = env->cstar;
3811 break;
3812 case MSR_FMASK:
3813 val = env->fmask;
3814 break;
3815 case MSR_FSBASE:
3816 val = env->segs[R_FS].base;
3817 break;
3818 case MSR_GSBASE:
3819 val = env->segs[R_GS].base;
3820 break;
3821 case MSR_KERNELGSBASE:
3822 val = env->kernelgsbase;
3823 break;
3824#endif
3825#ifdef USE_KQEMU
3826 case MSR_QPI_COMMBASE:
3827 if (env->kqemu_enabled) {
3828 val = kqemu_comm_base;
3829 } else {
3830 val = 0;
3831 }
3832 break;
3833#endif
3834 default:
3835#ifndef VBOX
3836 /* XXX: exception ? */
3837 val = 0;
3838 break;
3839#else /* VBOX */
3840 {
3841 uint32_t ecx = (uint32_t)ECX;
3842 /* In X2APIC specification this range is reserved for APIC control. */
3843 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3844 val = cpu_apic_rdmsr(env, ecx);
3845 else
3846 val = 0; /** @todo else exception? */
3847 break;
3848 }
3849 case MSR_K8_TSC_AUX:
3850 val = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3851 break;
3852#endif /* VBOX */
3853 }
3854 EAX = (uint32_t)(val);
3855 EDX = (uint32_t)(val >> 32);
3856}
3857#endif
3858
3859target_ulong helper_lsl(target_ulong selector1)
3860{
3861 unsigned int limit;
3862 uint32_t e1, e2, eflags, selector;
3863 int rpl, dpl, cpl, type;
3864
3865 selector = selector1 & 0xffff;
3866 eflags = cc_table[CC_OP].compute_all();
3867 if (load_segment(&e1, &e2, selector) != 0)
3868 goto fail;
3869 rpl = selector & 3;
3870 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3871 cpl = env->hflags & HF_CPL_MASK;
3872 if (e2 & DESC_S_MASK) {
3873 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3874 /* conforming */
3875 } else {
3876 if (dpl < cpl || dpl < rpl)
3877 goto fail;
3878 }
3879 } else {
3880 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3881 switch(type) {
3882 case 1:
3883 case 2:
3884 case 3:
3885 case 9:
3886 case 11:
3887 break;
3888 default:
3889 goto fail;
3890 }
3891 if (dpl < cpl || dpl < rpl) {
3892 fail:
3893 CC_SRC = eflags & ~CC_Z;
3894 return 0;
3895 }
3896 }
3897 limit = get_seg_limit(e1, e2);
3898 CC_SRC = eflags | CC_Z;
3899 return limit;
3900}
3901
3902target_ulong helper_lar(target_ulong selector1)
3903{
3904 uint32_t e1, e2, eflags, selector;
3905 int rpl, dpl, cpl, type;
3906
3907 selector = selector1 & 0xffff;
3908 eflags = cc_table[CC_OP].compute_all();
3909 if ((selector & 0xfffc) == 0)
3910 goto fail;
3911 if (load_segment(&e1, &e2, selector) != 0)
3912 goto fail;
3913 rpl = selector & 3;
3914 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3915 cpl = env->hflags & HF_CPL_MASK;
3916 if (e2 & DESC_S_MASK) {
3917 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3918 /* conforming */
3919 } else {
3920 if (dpl < cpl || dpl < rpl)
3921 goto fail;
3922 }
3923 } else {
3924 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3925 switch(type) {
3926 case 1:
3927 case 2:
3928 case 3:
3929 case 4:
3930 case 5:
3931 case 9:
3932 case 11:
3933 case 12:
3934 break;
3935 default:
3936 goto fail;
3937 }
3938 if (dpl < cpl || dpl < rpl) {
3939 fail:
3940 CC_SRC = eflags & ~CC_Z;
3941 return 0;
3942 }
3943 }
3944 CC_SRC = eflags | CC_Z;
3945 return e2 & 0x00f0ff00;
3946}
3947
3948void helper_verr(target_ulong selector1)
3949{
3950 uint32_t e1, e2, eflags, selector;
3951 int rpl, dpl, cpl;
3952
3953 selector = selector1 & 0xffff;
3954 eflags = cc_table[CC_OP].compute_all();
3955 if ((selector & 0xfffc) == 0)
3956 goto fail;
3957 if (load_segment(&e1, &e2, selector) != 0)
3958 goto fail;
3959 if (!(e2 & DESC_S_MASK))
3960 goto fail;
3961 rpl = selector & 3;
3962 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3963 cpl = env->hflags & HF_CPL_MASK;
3964 if (e2 & DESC_CS_MASK) {
3965 if (!(e2 & DESC_R_MASK))
3966 goto fail;
3967 if (!(e2 & DESC_C_MASK)) {
3968 if (dpl < cpl || dpl < rpl)
3969 goto fail;
3970 }
3971 } else {
3972 if (dpl < cpl || dpl < rpl) {
3973 fail:
3974 CC_SRC = eflags & ~CC_Z;
3975 return;
3976 }
3977 }
3978 CC_SRC = eflags | CC_Z;
3979}
3980
3981void helper_verw(target_ulong selector1)
3982{
3983 uint32_t e1, e2, eflags, selector;
3984 int rpl, dpl, cpl;
3985
3986 selector = selector1 & 0xffff;
3987 eflags = cc_table[CC_OP].compute_all();
3988 if ((selector & 0xfffc) == 0)
3989 goto fail;
3990 if (load_segment(&e1, &e2, selector) != 0)
3991 goto fail;
3992 if (!(e2 & DESC_S_MASK))
3993 goto fail;
3994 rpl = selector & 3;
3995 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3996 cpl = env->hflags & HF_CPL_MASK;
3997 if (e2 & DESC_CS_MASK) {
3998 goto fail;
3999 } else {
4000 if (dpl < cpl || dpl < rpl)
4001 goto fail;
4002 if (!(e2 & DESC_W_MASK)) {
4003 fail:
4004 CC_SRC = eflags & ~CC_Z;
4005 return;
4006 }
4007 }
4008 CC_SRC = eflags | CC_Z;
4009}
4010
4011/* x87 FPU helpers */
4012
4013static void fpu_set_exception(int mask)
4014{
4015 env->fpus |= mask;
4016 if (env->fpus & (~env->fpuc & FPUC_EM))
4017 env->fpus |= FPUS_SE | FPUS_B;
4018}
4019
4020#ifndef VBOX
4021static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4022#else /* VBOX */
4023DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4024#endif /* VBOX */
4025{
4026 if (b == 0.0)
4027 fpu_set_exception(FPUS_ZE);
4028 return a / b;
4029}
4030
4031void fpu_raise_exception(void)
4032{
4033 if (env->cr[0] & CR0_NE_MASK) {
4034 raise_exception(EXCP10_COPR);
4035 }
4036#if !defined(CONFIG_USER_ONLY)
4037 else {
4038 cpu_set_ferr(env);
4039 }
4040#endif
4041}
4042
4043void helper_flds_FT0(uint32_t val)
4044{
4045 union {
4046 float32 f;
4047 uint32_t i;
4048 } u;
4049 u.i = val;
4050 FT0 = float32_to_floatx(u.f, &env->fp_status);
4051}
4052
4053void helper_fldl_FT0(uint64_t val)
4054{
4055 union {
4056 float64 f;
4057 uint64_t i;
4058 } u;
4059 u.i = val;
4060 FT0 = float64_to_floatx(u.f, &env->fp_status);
4061}
4062
4063void helper_fildl_FT0(int32_t val)
4064{
4065 FT0 = int32_to_floatx(val, &env->fp_status);
4066}
4067
4068void helper_flds_ST0(uint32_t val)
4069{
4070 int new_fpstt;
4071 union {
4072 float32 f;
4073 uint32_t i;
4074 } u;
4075 new_fpstt = (env->fpstt - 1) & 7;
4076 u.i = val;
4077 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4078 env->fpstt = new_fpstt;
4079 env->fptags[new_fpstt] = 0; /* validate stack entry */
4080}
4081
4082void helper_fldl_ST0(uint64_t val)
4083{
4084 int new_fpstt;
4085 union {
4086 float64 f;
4087 uint64_t i;
4088 } u;
4089 new_fpstt = (env->fpstt - 1) & 7;
4090 u.i = val;
4091 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4092 env->fpstt = new_fpstt;
4093 env->fptags[new_fpstt] = 0; /* validate stack entry */
4094}
4095
4096void helper_fildl_ST0(int32_t val)
4097{
4098 int new_fpstt;
4099 new_fpstt = (env->fpstt - 1) & 7;
4100 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4101 env->fpstt = new_fpstt;
4102 env->fptags[new_fpstt] = 0; /* validate stack entry */
4103}
4104
4105void helper_fildll_ST0(int64_t val)
4106{
4107 int new_fpstt;
4108 new_fpstt = (env->fpstt - 1) & 7;
4109 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4110 env->fpstt = new_fpstt;
4111 env->fptags[new_fpstt] = 0; /* validate stack entry */
4112}
4113
4114#ifndef VBOX
4115uint32_t helper_fsts_ST0(void)
4116#else
4117RTCCUINTREG helper_fsts_ST0(void)
4118#endif
4119{
4120 union {
4121 float32 f;
4122 uint32_t i;
4123 } u;
4124 u.f = floatx_to_float32(ST0, &env->fp_status);
4125 return u.i;
4126}
4127
4128uint64_t helper_fstl_ST0(void)
4129{
4130 union {
4131 float64 f;
4132 uint64_t i;
4133 } u;
4134 u.f = floatx_to_float64(ST0, &env->fp_status);
4135 return u.i;
4136}
4137#ifndef VBOX
4138int32_t helper_fist_ST0(void)
4139#else
4140RTCCINTREG helper_fist_ST0(void)
4141#endif
4142{
4143 int32_t val;
4144 val = floatx_to_int32(ST0, &env->fp_status);
4145 if (val != (int16_t)val)
4146 val = -32768;
4147 return val;
4148}
4149
4150#ifndef VBOX
4151int32_t helper_fistl_ST0(void)
4152#else
4153RTCCINTREG helper_fistl_ST0(void)
4154#endif
4155{
4156 int32_t val;
4157 val = floatx_to_int32(ST0, &env->fp_status);
4158 return val;
4159}
4160
4161int64_t helper_fistll_ST0(void)
4162{
4163 int64_t val;
4164 val = floatx_to_int64(ST0, &env->fp_status);
4165 return val;
4166}
4167
4168#ifndef VBOX
4169int32_t helper_fistt_ST0(void)
4170#else
4171RTCCINTREG helper_fistt_ST0(void)
4172#endif
4173{
4174 int32_t val;
4175 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4176 if (val != (int16_t)val)
4177 val = -32768;
4178 return val;
4179}
4180
4181#ifndef VBOX
4182int32_t helper_fisttl_ST0(void)
4183#else
4184RTCCINTREG helper_fisttl_ST0(void)
4185#endif
4186{
4187 int32_t val;
4188 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4189 return val;
4190}
4191
4192int64_t helper_fisttll_ST0(void)
4193{
4194 int64_t val;
4195 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4196 return val;
4197}
4198
4199void helper_fldt_ST0(target_ulong ptr)
4200{
4201 int new_fpstt;
4202 new_fpstt = (env->fpstt - 1) & 7;
4203 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4204 env->fpstt = new_fpstt;
4205 env->fptags[new_fpstt] = 0; /* validate stack entry */
4206}
4207
4208void helper_fstt_ST0(target_ulong ptr)
4209{
4210 helper_fstt(ST0, ptr);
4211}
4212
4213void helper_fpush(void)
4214{
4215 fpush();
4216}
4217
4218void helper_fpop(void)
4219{
4220 fpop();
4221}
4222
4223void helper_fdecstp(void)
4224{
4225 env->fpstt = (env->fpstt - 1) & 7;
4226 env->fpus &= (~0x4700);
4227}
4228
4229void helper_fincstp(void)
4230{
4231 env->fpstt = (env->fpstt + 1) & 7;
4232 env->fpus &= (~0x4700);
4233}
4234
4235/* FPU move */
4236
4237void helper_ffree_STN(int st_index)
4238{
4239 env->fptags[(env->fpstt + st_index) & 7] = 1;
4240}
4241
4242void helper_fmov_ST0_FT0(void)
4243{
4244 ST0 = FT0;
4245}
4246
4247void helper_fmov_FT0_STN(int st_index)
4248{
4249 FT0 = ST(st_index);
4250}
4251
4252void helper_fmov_ST0_STN(int st_index)
4253{
4254 ST0 = ST(st_index);
4255}
4256
4257void helper_fmov_STN_ST0(int st_index)
4258{
4259 ST(st_index) = ST0;
4260}
4261
4262void helper_fxchg_ST0_STN(int st_index)
4263{
4264 CPU86_LDouble tmp;
4265 tmp = ST(st_index);
4266 ST(st_index) = ST0;
4267 ST0 = tmp;
4268}
4269
4270/* FPU operations */
4271
4272static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4273
4274void helper_fcom_ST0_FT0(void)
4275{
4276 int ret;
4277
4278 ret = floatx_compare(ST0, FT0, &env->fp_status);
4279 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4280 FORCE_RET();
4281}
4282
4283void helper_fucom_ST0_FT0(void)
4284{
4285 int ret;
4286
4287 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4288 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4289 FORCE_RET();
4290}
4291
4292static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4293
4294void helper_fcomi_ST0_FT0(void)
4295{
4296 int eflags;
4297 int ret;
4298
4299 ret = floatx_compare(ST0, FT0, &env->fp_status);
4300 eflags = cc_table[CC_OP].compute_all();
4301 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4302 CC_SRC = eflags;
4303 FORCE_RET();
4304}
4305
4306void helper_fucomi_ST0_FT0(void)
4307{
4308 int eflags;
4309 int ret;
4310
4311 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4312 eflags = cc_table[CC_OP].compute_all();
4313 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4314 CC_SRC = eflags;
4315 FORCE_RET();
4316}
4317
4318void helper_fadd_ST0_FT0(void)
4319{
4320 ST0 += FT0;
4321}
4322
4323void helper_fmul_ST0_FT0(void)
4324{
4325 ST0 *= FT0;
4326}
4327
4328void helper_fsub_ST0_FT0(void)
4329{
4330 ST0 -= FT0;
4331}
4332
4333void helper_fsubr_ST0_FT0(void)
4334{
4335 ST0 = FT0 - ST0;
4336}
4337
4338void helper_fdiv_ST0_FT0(void)
4339{
4340 ST0 = helper_fdiv(ST0, FT0);
4341}
4342
4343void helper_fdivr_ST0_FT0(void)
4344{
4345 ST0 = helper_fdiv(FT0, ST0);
4346}
4347
4348/* fp operations between STN and ST0 */
4349
4350void helper_fadd_STN_ST0(int st_index)
4351{
4352 ST(st_index) += ST0;
4353}
4354
4355void helper_fmul_STN_ST0(int st_index)
4356{
4357 ST(st_index) *= ST0;
4358}
4359
4360void helper_fsub_STN_ST0(int st_index)
4361{
4362 ST(st_index) -= ST0;
4363}
4364
4365void helper_fsubr_STN_ST0(int st_index)
4366{
4367 CPU86_LDouble *p;
4368 p = &ST(st_index);
4369 *p = ST0 - *p;
4370}
4371
4372void helper_fdiv_STN_ST0(int st_index)
4373{
4374 CPU86_LDouble *p;
4375 p = &ST(st_index);
4376 *p = helper_fdiv(*p, ST0);
4377}
4378
4379void helper_fdivr_STN_ST0(int st_index)
4380{
4381 CPU86_LDouble *p;
4382 p = &ST(st_index);
4383 *p = helper_fdiv(ST0, *p);
4384}
4385
4386/* misc FPU operations */
4387void helper_fchs_ST0(void)
4388{
4389 ST0 = floatx_chs(ST0);
4390}
4391
4392void helper_fabs_ST0(void)
4393{
4394 ST0 = floatx_abs(ST0);
4395}
4396
4397void helper_fld1_ST0(void)
4398{
4399 ST0 = f15rk[1];
4400}
4401
4402void helper_fldl2t_ST0(void)
4403{
4404 ST0 = f15rk[6];
4405}
4406
4407void helper_fldl2e_ST0(void)
4408{
4409 ST0 = f15rk[5];
4410}
4411
4412void helper_fldpi_ST0(void)
4413{
4414 ST0 = f15rk[2];
4415}
4416
4417void helper_fldlg2_ST0(void)
4418{
4419 ST0 = f15rk[3];
4420}
4421
4422void helper_fldln2_ST0(void)
4423{
4424 ST0 = f15rk[4];
4425}
4426
4427void helper_fldz_ST0(void)
4428{
4429 ST0 = f15rk[0];
4430}
4431
4432void helper_fldz_FT0(void)
4433{
4434 FT0 = f15rk[0];
4435}
4436
4437#ifndef VBOX
4438uint32_t helper_fnstsw(void)
4439#else
4440RTCCUINTREG helper_fnstsw(void)
4441#endif
4442{
4443 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4444}
4445
4446#ifndef VBOX
4447uint32_t helper_fnstcw(void)
4448#else
4449RTCCUINTREG helper_fnstcw(void)
4450#endif
4451{
4452 return env->fpuc;
4453}
4454
4455static void update_fp_status(void)
4456{
4457 int rnd_type;
4458
4459 /* set rounding mode */
4460 switch(env->fpuc & RC_MASK) {
4461 default:
4462 case RC_NEAR:
4463 rnd_type = float_round_nearest_even;
4464 break;
4465 case RC_DOWN:
4466 rnd_type = float_round_down;
4467 break;
4468 case RC_UP:
4469 rnd_type = float_round_up;
4470 break;
4471 case RC_CHOP:
4472 rnd_type = float_round_to_zero;
4473 break;
4474 }
4475 set_float_rounding_mode(rnd_type, &env->fp_status);
4476#ifdef FLOATX80
4477 switch((env->fpuc >> 8) & 3) {
4478 case 0:
4479 rnd_type = 32;
4480 break;
4481 case 2:
4482 rnd_type = 64;
4483 break;
4484 case 3:
4485 default:
4486 rnd_type = 80;
4487 break;
4488 }
4489 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4490#endif
4491}
4492
4493void helper_fldcw(uint32_t val)
4494{
4495 env->fpuc = val;
4496 update_fp_status();
4497}
4498
4499void helper_fclex(void)
4500{
4501 env->fpus &= 0x7f00;
4502}
4503
4504void helper_fwait(void)
4505{
4506 if (env->fpus & FPUS_SE)
4507 fpu_raise_exception();
4508 FORCE_RET();
4509}
4510
4511void helper_fninit(void)
4512{
4513 env->fpus = 0;
4514 env->fpstt = 0;
4515 env->fpuc = 0x37f;
4516 env->fptags[0] = 1;
4517 env->fptags[1] = 1;
4518 env->fptags[2] = 1;
4519 env->fptags[3] = 1;
4520 env->fptags[4] = 1;
4521 env->fptags[5] = 1;
4522 env->fptags[6] = 1;
4523 env->fptags[7] = 1;
4524}
4525
4526/* BCD ops */
4527
4528void helper_fbld_ST0(target_ulong ptr)
4529{
4530 CPU86_LDouble tmp;
4531 uint64_t val;
4532 unsigned int v;
4533 int i;
4534
4535 val = 0;
4536 for(i = 8; i >= 0; i--) {
4537 v = ldub(ptr + i);
4538 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4539 }
4540 tmp = val;
4541 if (ldub(ptr + 9) & 0x80)
4542 tmp = -tmp;
4543 fpush();
4544 ST0 = tmp;
4545}
4546
4547void helper_fbst_ST0(target_ulong ptr)
4548{
4549 int v;
4550 target_ulong mem_ref, mem_end;
4551 int64_t val;
4552
4553 val = floatx_to_int64(ST0, &env->fp_status);
4554 mem_ref = ptr;
4555 mem_end = mem_ref + 9;
4556 if (val < 0) {
4557 stb(mem_end, 0x80);
4558 val = -val;
4559 } else {
4560 stb(mem_end, 0x00);
4561 }
4562 while (mem_ref < mem_end) {
4563 if (val == 0)
4564 break;
4565 v = val % 100;
4566 val = val / 100;
4567 v = ((v / 10) << 4) | (v % 10);
4568 stb(mem_ref++, v);
4569 }
4570 while (mem_ref < mem_end) {
4571 stb(mem_ref++, 0);
4572 }
4573}
4574
4575void helper_f2xm1(void)
4576{
4577 ST0 = pow(2.0,ST0) - 1.0;
4578}
4579
4580void helper_fyl2x(void)
4581{
4582 CPU86_LDouble fptemp;
4583
4584 fptemp = ST0;
4585 if (fptemp>0.0){
4586 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4587 ST1 *= fptemp;
4588 fpop();
4589 } else {
4590 env->fpus &= (~0x4700);
4591 env->fpus |= 0x400;
4592 }
4593}
4594
4595void helper_fptan(void)
4596{
4597 CPU86_LDouble fptemp;
4598
4599 fptemp = ST0;
4600 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4601 env->fpus |= 0x400;
4602 } else {
4603 ST0 = tan(fptemp);
4604 fpush();
4605 ST0 = 1.0;
4606 env->fpus &= (~0x400); /* C2 <-- 0 */
4607 /* the above code is for |arg| < 2**52 only */
4608 }
4609}
4610
4611void helper_fpatan(void)
4612{
4613 CPU86_LDouble fptemp, fpsrcop;
4614
4615 fpsrcop = ST1;
4616 fptemp = ST0;
4617 ST1 = atan2(fpsrcop,fptemp);
4618 fpop();
4619}
4620
4621void helper_fxtract(void)
4622{
4623 CPU86_LDoubleU temp;
4624 unsigned int expdif;
4625
4626 temp.d = ST0;
4627 expdif = EXPD(temp) - EXPBIAS;
4628 /*DP exponent bias*/
4629 ST0 = expdif;
4630 fpush();
4631 BIASEXPONENT(temp);
4632 ST0 = temp.d;
4633}
4634
4635#ifdef VBOX
4636#ifdef _MSC_VER
4637/* MSC cannot divide by zero */
4638extern double _Nan;
4639#define NaN _Nan
4640#else
4641#define NaN (0.0 / 0.0)
4642#endif
4643#endif /* VBOX */
4644
4645void helper_fprem1(void)
4646{
4647 CPU86_LDouble dblq, fpsrcop, fptemp;
4648 CPU86_LDoubleU fpsrcop1, fptemp1;
4649 int expdif;
4650 signed long long int q;
4651
4652#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4653 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4654#else
4655 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4656#endif
4657 ST0 = 0.0 / 0.0; /* NaN */
4658 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4659 return;
4660 }
4661
4662 fpsrcop = ST0;
4663 fptemp = ST1;
4664 fpsrcop1.d = fpsrcop;
4665 fptemp1.d = fptemp;
4666 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4667
4668 if (expdif < 0) {
4669 /* optimisation? taken from the AMD docs */
4670 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4671 /* ST0 is unchanged */
4672 return;
4673 }
4674
4675 if (expdif < 53) {
4676 dblq = fpsrcop / fptemp;
4677 /* round dblq towards nearest integer */
4678 dblq = rint(dblq);
4679 ST0 = fpsrcop - fptemp * dblq;
4680
4681 /* convert dblq to q by truncating towards zero */
4682 if (dblq < 0.0)
4683 q = (signed long long int)(-dblq);
4684 else
4685 q = (signed long long int)dblq;
4686
4687 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4688 /* (C0,C3,C1) <-- (q2,q1,q0) */
4689 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4690 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4691 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4692 } else {
4693 env->fpus |= 0x400; /* C2 <-- 1 */
4694 fptemp = pow(2.0, expdif - 50);
4695 fpsrcop = (ST0 / ST1) / fptemp;
4696 /* fpsrcop = integer obtained by chopping */
4697 fpsrcop = (fpsrcop < 0.0) ?
4698 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4699 ST0 -= (ST1 * fpsrcop * fptemp);
4700 }
4701}
4702
4703void helper_fprem(void)
4704{
4705 CPU86_LDouble dblq, fpsrcop, fptemp;
4706 CPU86_LDoubleU fpsrcop1, fptemp1;
4707 int expdif;
4708 signed long long int q;
4709
4710#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4711 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4712#else
4713 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4714#endif
4715 ST0 = 0.0 / 0.0; /* NaN */
4716 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4717 return;
4718 }
4719
4720 fpsrcop = (CPU86_LDouble)ST0;
4721 fptemp = (CPU86_LDouble)ST1;
4722 fpsrcop1.d = fpsrcop;
4723 fptemp1.d = fptemp;
4724 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4725
4726 if (expdif < 0) {
4727 /* optimisation? taken from the AMD docs */
4728 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4729 /* ST0 is unchanged */
4730 return;
4731 }
4732
4733 if ( expdif < 53 ) {
4734 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4735 /* round dblq towards zero */
4736 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4737 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4738
4739 /* convert dblq to q by truncating towards zero */
4740 if (dblq < 0.0)
4741 q = (signed long long int)(-dblq);
4742 else
4743 q = (signed long long int)dblq;
4744
4745 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4746 /* (C0,C3,C1) <-- (q2,q1,q0) */
4747 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4748 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4749 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4750 } else {
4751 int N = 32 + (expdif % 32); /* as per AMD docs */
4752 env->fpus |= 0x400; /* C2 <-- 1 */
4753 fptemp = pow(2.0, (double)(expdif - N));
4754 fpsrcop = (ST0 / ST1) / fptemp;
4755 /* fpsrcop = integer obtained by chopping */
4756 fpsrcop = (fpsrcop < 0.0) ?
4757 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4758 ST0 -= (ST1 * fpsrcop * fptemp);
4759 }
4760}
4761
4762void helper_fyl2xp1(void)
4763{
4764 CPU86_LDouble fptemp;
4765
4766 fptemp = ST0;
4767 if ((fptemp+1.0)>0.0) {
4768 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4769 ST1 *= fptemp;
4770 fpop();
4771 } else {
4772 env->fpus &= (~0x4700);
4773 env->fpus |= 0x400;
4774 }
4775}
4776
4777void helper_fsqrt(void)
4778{
4779 CPU86_LDouble fptemp;
4780
4781 fptemp = ST0;
4782 if (fptemp<0.0) {
4783 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4784 env->fpus |= 0x400;
4785 }
4786 ST0 = sqrt(fptemp);
4787}
4788
4789void helper_fsincos(void)
4790{
4791 CPU86_LDouble fptemp;
4792
4793 fptemp = ST0;
4794 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4795 env->fpus |= 0x400;
4796 } else {
4797 ST0 = sin(fptemp);
4798 fpush();
4799 ST0 = cos(fptemp);
4800 env->fpus &= (~0x400); /* C2 <-- 0 */
4801 /* the above code is for |arg| < 2**63 only */
4802 }
4803}
4804
4805void helper_frndint(void)
4806{
4807 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4808}
4809
4810void helper_fscale(void)
4811{
4812 ST0 = ldexp (ST0, (int)(ST1));
4813}
4814
4815void helper_fsin(void)
4816{
4817 CPU86_LDouble fptemp;
4818
4819 fptemp = ST0;
4820 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4821 env->fpus |= 0x400;
4822 } else {
4823 ST0 = sin(fptemp);
4824 env->fpus &= (~0x400); /* C2 <-- 0 */
4825 /* the above code is for |arg| < 2**53 only */
4826 }
4827}
4828
4829void helper_fcos(void)
4830{
4831 CPU86_LDouble fptemp;
4832
4833 fptemp = ST0;
4834 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4835 env->fpus |= 0x400;
4836 } else {
4837 ST0 = cos(fptemp);
4838 env->fpus &= (~0x400); /* C2 <-- 0 */
4839 /* the above code is for |arg5 < 2**63 only */
4840 }
4841}
4842
4843void helper_fxam_ST0(void)
4844{
4845 CPU86_LDoubleU temp;
4846 int expdif;
4847
4848 temp.d = ST0;
4849
4850 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4851 if (SIGND(temp))
4852 env->fpus |= 0x200; /* C1 <-- 1 */
4853
4854 /* XXX: test fptags too */
4855 expdif = EXPD(temp);
4856 if (expdif == MAXEXPD) {
4857#ifdef USE_X86LDOUBLE
4858 if (MANTD(temp) == 0x8000000000000000ULL)
4859#else
4860 if (MANTD(temp) == 0)
4861#endif
4862 env->fpus |= 0x500 /*Infinity*/;
4863 else
4864 env->fpus |= 0x100 /*NaN*/;
4865 } else if (expdif == 0) {
4866 if (MANTD(temp) == 0)
4867 env->fpus |= 0x4000 /*Zero*/;
4868 else
4869 env->fpus |= 0x4400 /*Denormal*/;
4870 } else {
4871 env->fpus |= 0x400;
4872 }
4873}
4874
4875void helper_fstenv(target_ulong ptr, int data32)
4876{
4877 int fpus, fptag, exp, i;
4878 uint64_t mant;
4879 CPU86_LDoubleU tmp;
4880
4881 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4882 fptag = 0;
4883 for (i=7; i>=0; i--) {
4884 fptag <<= 2;
4885 if (env->fptags[i]) {
4886 fptag |= 3;
4887 } else {
4888 tmp.d = env->fpregs[i].d;
4889 exp = EXPD(tmp);
4890 mant = MANTD(tmp);
4891 if (exp == 0 && mant == 0) {
4892 /* zero */
4893 fptag |= 1;
4894 } else if (exp == 0 || exp == MAXEXPD
4895#ifdef USE_X86LDOUBLE
4896 || (mant & (1LL << 63)) == 0
4897#endif
4898 ) {
4899 /* NaNs, infinity, denormal */
4900 fptag |= 2;
4901 }
4902 }
4903 }
4904 if (data32) {
4905 /* 32 bit */
4906 stl(ptr, env->fpuc);
4907 stl(ptr + 4, fpus);
4908 stl(ptr + 8, fptag);
4909 stl(ptr + 12, 0); /* fpip */
4910 stl(ptr + 16, 0); /* fpcs */
4911 stl(ptr + 20, 0); /* fpoo */
4912 stl(ptr + 24, 0); /* fpos */
4913 } else {
4914 /* 16 bit */
4915 stw(ptr, env->fpuc);
4916 stw(ptr + 2, fpus);
4917 stw(ptr + 4, fptag);
4918 stw(ptr + 6, 0);
4919 stw(ptr + 8, 0);
4920 stw(ptr + 10, 0);
4921 stw(ptr + 12, 0);
4922 }
4923}
4924
4925void helper_fldenv(target_ulong ptr, int data32)
4926{
4927 int i, fpus, fptag;
4928
4929 if (data32) {
4930 env->fpuc = lduw(ptr);
4931 fpus = lduw(ptr + 4);
4932 fptag = lduw(ptr + 8);
4933 }
4934 else {
4935 env->fpuc = lduw(ptr);
4936 fpus = lduw(ptr + 2);
4937 fptag = lduw(ptr + 4);
4938 }
4939 env->fpstt = (fpus >> 11) & 7;
4940 env->fpus = fpus & ~0x3800;
4941 for(i = 0;i < 8; i++) {
4942 env->fptags[i] = ((fptag & 3) == 3);
4943 fptag >>= 2;
4944 }
4945}
4946
4947void helper_fsave(target_ulong ptr, int data32)
4948{
4949 CPU86_LDouble tmp;
4950 int i;
4951
4952 helper_fstenv(ptr, data32);
4953
4954 ptr += (14 << data32);
4955 for(i = 0;i < 8; i++) {
4956 tmp = ST(i);
4957 helper_fstt(tmp, ptr);
4958 ptr += 10;
4959 }
4960
4961 /* fninit */
4962 env->fpus = 0;
4963 env->fpstt = 0;
4964 env->fpuc = 0x37f;
4965 env->fptags[0] = 1;
4966 env->fptags[1] = 1;
4967 env->fptags[2] = 1;
4968 env->fptags[3] = 1;
4969 env->fptags[4] = 1;
4970 env->fptags[5] = 1;
4971 env->fptags[6] = 1;
4972 env->fptags[7] = 1;
4973}
4974
4975void helper_frstor(target_ulong ptr, int data32)
4976{
4977 CPU86_LDouble tmp;
4978 int i;
4979
4980 helper_fldenv(ptr, data32);
4981 ptr += (14 << data32);
4982
4983 for(i = 0;i < 8; i++) {
4984 tmp = helper_fldt(ptr);
4985 ST(i) = tmp;
4986 ptr += 10;
4987 }
4988}
4989
4990void helper_fxsave(target_ulong ptr, int data64)
4991{
4992 int fpus, fptag, i, nb_xmm_regs;
4993 CPU86_LDouble tmp;
4994 target_ulong addr;
4995
4996 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4997 fptag = 0;
4998 for(i = 0; i < 8; i++) {
4999 fptag |= (env->fptags[i] << i);
5000 }
5001 stw(ptr, env->fpuc);
5002 stw(ptr + 2, fpus);
5003 stw(ptr + 4, fptag ^ 0xff);
5004#ifdef TARGET_X86_64
5005 if (data64) {
5006 stq(ptr + 0x08, 0); /* rip */
5007 stq(ptr + 0x10, 0); /* rdp */
5008 } else
5009#endif
5010 {
5011 stl(ptr + 0x08, 0); /* eip */
5012 stl(ptr + 0x0c, 0); /* sel */
5013 stl(ptr + 0x10, 0); /* dp */
5014 stl(ptr + 0x14, 0); /* sel */
5015 }
5016
5017 addr = ptr + 0x20;
5018 for(i = 0;i < 8; i++) {
5019 tmp = ST(i);
5020 helper_fstt(tmp, addr);
5021 addr += 16;
5022 }
5023
5024 if (env->cr[4] & CR4_OSFXSR_MASK) {
5025 /* XXX: finish it */
5026 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5027 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5028 if (env->hflags & HF_CS64_MASK)
5029 nb_xmm_regs = 16;
5030 else
5031 nb_xmm_regs = 8;
5032 addr = ptr + 0xa0;
5033 for(i = 0; i < nb_xmm_regs; i++) {
5034 stq(addr, env->xmm_regs[i].XMM_Q(0));
5035 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5036 addr += 16;
5037 }
5038 }
5039}
5040
5041void helper_fxrstor(target_ulong ptr, int data64)
5042{
5043 int i, fpus, fptag, nb_xmm_regs;
5044 CPU86_LDouble tmp;
5045 target_ulong addr;
5046
5047 env->fpuc = lduw(ptr);
5048 fpus = lduw(ptr + 2);
5049 fptag = lduw(ptr + 4);
5050 env->fpstt = (fpus >> 11) & 7;
5051 env->fpus = fpus & ~0x3800;
5052 fptag ^= 0xff;
5053 for(i = 0;i < 8; i++) {
5054 env->fptags[i] = ((fptag >> i) & 1);
5055 }
5056
5057 addr = ptr + 0x20;
5058 for(i = 0;i < 8; i++) {
5059 tmp = helper_fldt(addr);
5060 ST(i) = tmp;
5061 addr += 16;
5062 }
5063
5064 if (env->cr[4] & CR4_OSFXSR_MASK) {
5065 /* XXX: finish it */
5066 env->mxcsr = ldl(ptr + 0x18);
5067 //ldl(ptr + 0x1c);
5068 if (env->hflags & HF_CS64_MASK)
5069 nb_xmm_regs = 16;
5070 else
5071 nb_xmm_regs = 8;
5072 addr = ptr + 0xa0;
5073 for(i = 0; i < nb_xmm_regs; i++) {
5074#if !defined(VBOX) || __GNUC__ < 4
5075 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5076 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5077#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5078# if 1
5079 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5080 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5081 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5082 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5083# else
5084 /* this works fine on Mac OS X, gcc 4.0.1 */
5085 uint64_t u64 = ldq(addr);
5086 env->xmm_regs[i].XMM_Q(0);
5087 u64 = ldq(addr + 4);
5088 env->xmm_regs[i].XMM_Q(1) = u64;
5089# endif
5090#endif
5091 addr += 16;
5092 }
5093 }
5094}
5095
5096#ifndef USE_X86LDOUBLE
5097
5098void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5099{
5100 CPU86_LDoubleU temp;
5101 int e;
5102
5103 temp.d = f;
5104 /* mantissa */
5105 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5106 /* exponent + sign */
5107 e = EXPD(temp) - EXPBIAS + 16383;
5108 e |= SIGND(temp) >> 16;
5109 *pexp = e;
5110}
5111
5112CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5113{
5114 CPU86_LDoubleU temp;
5115 int e;
5116 uint64_t ll;
5117
5118 /* XXX: handle overflow ? */
5119 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5120 e |= (upper >> 4) & 0x800; /* sign */
5121 ll = (mant >> 11) & ((1LL << 52) - 1);
5122#ifdef __arm__
5123 temp.l.upper = (e << 20) | (ll >> 32);
5124 temp.l.lower = ll;
5125#else
5126 temp.ll = ll | ((uint64_t)e << 52);
5127#endif
5128 return temp.d;
5129}
5130
5131#else
5132
5133void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5134{
5135 CPU86_LDoubleU temp;
5136
5137 temp.d = f;
5138 *pmant = temp.l.lower;
5139 *pexp = temp.l.upper;
5140}
5141
5142CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5143{
5144 CPU86_LDoubleU temp;
5145
5146 temp.l.upper = upper;
5147 temp.l.lower = mant;
5148 return temp.d;
5149}
5150#endif
5151
5152#ifdef TARGET_X86_64
5153
5154//#define DEBUG_MULDIV
5155
5156static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5157{
5158 *plow += a;
5159 /* carry test */
5160 if (*plow < a)
5161 (*phigh)++;
5162 *phigh += b;
5163}
5164
5165static void neg128(uint64_t *plow, uint64_t *phigh)
5166{
5167 *plow = ~ *plow;
5168 *phigh = ~ *phigh;
5169 add128(plow, phigh, 1, 0);
5170}
5171
5172/* return TRUE if overflow */
5173static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5174{
5175 uint64_t q, r, a1, a0;
5176 int i, qb, ab;
5177
5178 a0 = *plow;
5179 a1 = *phigh;
5180 if (a1 == 0) {
5181 q = a0 / b;
5182 r = a0 % b;
5183 *plow = q;
5184 *phigh = r;
5185 } else {
5186 if (a1 >= b)
5187 return 1;
5188 /* XXX: use a better algorithm */
5189 for(i = 0; i < 64; i++) {
5190 ab = a1 >> 63;
5191 a1 = (a1 << 1) | (a0 >> 63);
5192 if (ab || a1 >= b) {
5193 a1 -= b;
5194 qb = 1;
5195 } else {
5196 qb = 0;
5197 }
5198 a0 = (a0 << 1) | qb;
5199 }
5200#if defined(DEBUG_MULDIV)
5201 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5202 *phigh, *plow, b, a0, a1);
5203#endif
5204 *plow = a0;
5205 *phigh = a1;
5206 }
5207 return 0;
5208}
5209
5210/* return TRUE if overflow */
5211static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5212{
5213 int sa, sb;
5214 sa = ((int64_t)*phigh < 0);
5215 if (sa)
5216 neg128(plow, phigh);
5217 sb = (b < 0);
5218 if (sb)
5219 b = -b;
5220 if (div64(plow, phigh, b) != 0)
5221 return 1;
5222 if (sa ^ sb) {
5223 if (*plow > (1ULL << 63))
5224 return 1;
5225 *plow = - *plow;
5226 } else {
5227 if (*plow >= (1ULL << 63))
5228 return 1;
5229 }
5230 if (sa)
5231 *phigh = - *phigh;
5232 return 0;
5233}
5234
5235void helper_mulq_EAX_T0(target_ulong t0)
5236{
5237 uint64_t r0, r1;
5238
5239 mulu64(&r0, &r1, EAX, t0);
5240 EAX = r0;
5241 EDX = r1;
5242 CC_DST = r0;
5243 CC_SRC = r1;
5244}
5245
5246void helper_imulq_EAX_T0(target_ulong t0)
5247{
5248 uint64_t r0, r1;
5249
5250 muls64(&r0, &r1, EAX, t0);
5251 EAX = r0;
5252 EDX = r1;
5253 CC_DST = r0;
5254 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5255}
5256
5257target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5258{
5259 uint64_t r0, r1;
5260
5261 muls64(&r0, &r1, t0, t1);
5262 CC_DST = r0;
5263 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5264 return r0;
5265}
5266
5267void helper_divq_EAX(target_ulong t0)
5268{
5269 uint64_t r0, r1;
5270 if (t0 == 0) {
5271 raise_exception(EXCP00_DIVZ);
5272 }
5273 r0 = EAX;
5274 r1 = EDX;
5275 if (div64(&r0, &r1, t0))
5276 raise_exception(EXCP00_DIVZ);
5277 EAX = r0;
5278 EDX = r1;
5279}
5280
5281void helper_idivq_EAX(target_ulong t0)
5282{
5283 uint64_t r0, r1;
5284 if (t0 == 0) {
5285 raise_exception(EXCP00_DIVZ);
5286 }
5287 r0 = EAX;
5288 r1 = EDX;
5289 if (idiv64(&r0, &r1, t0))
5290 raise_exception(EXCP00_DIVZ);
5291 EAX = r0;
5292 EDX = r1;
5293}
5294#endif
5295
5296static void do_hlt(void)
5297{
5298 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5299 env->halted = 1;
5300 env->exception_index = EXCP_HLT;
5301 cpu_loop_exit();
5302}
5303
5304void helper_hlt(int next_eip_addend)
5305{
5306 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5307 EIP += next_eip_addend;
5308
5309 do_hlt();
5310}
5311
5312void helper_monitor(target_ulong ptr)
5313{
5314 if ((uint32_t)ECX != 0)
5315 raise_exception(EXCP0D_GPF);
5316 /* XXX: store address ? */
5317 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5318}
5319
5320void helper_mwait(int next_eip_addend)
5321{
5322 if ((uint32_t)ECX != 0)
5323 raise_exception(EXCP0D_GPF);
5324#ifdef VBOX
5325 helper_hlt(next_eip_addend);
5326#else
5327 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5328 EIP += next_eip_addend;
5329
5330 /* XXX: not complete but not completely erroneous */
5331 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5332 /* more than one CPU: do not sleep because another CPU may
5333 wake this one */
5334 } else {
5335 do_hlt();
5336 }
5337#endif
5338}
5339
5340void helper_debug(void)
5341{
5342 env->exception_index = EXCP_DEBUG;
5343 cpu_loop_exit();
5344}
5345
5346void helper_raise_interrupt(int intno, int next_eip_addend)
5347{
5348 raise_interrupt(intno, 1, 0, next_eip_addend);
5349}
5350
5351void helper_raise_exception(int exception_index)
5352{
5353 raise_exception(exception_index);
5354}
5355
5356void helper_cli(void)
5357{
5358 env->eflags &= ~IF_MASK;
5359}
5360
5361void helper_sti(void)
5362{
5363 env->eflags |= IF_MASK;
5364}
5365
5366#ifdef VBOX
5367void helper_cli_vme(void)
5368{
5369 env->eflags &= ~IF_MASK;
5370}
5371
5372void helper_sti_vme(void)
5373{
5374 /* First check, then change eflags according to the AMD manual */
5375 if (env->eflags & VIP_MASK) {
5376 raise_exception(EXCP0D_GPF);
5377 }
5378 env->eflags |= IF_MASK;
5379}
5380#endif
5381
5382#if 0
5383/* vm86plus instructions */
5384void helper_cli_vm(void)
5385{
5386 env->eflags &= ~VIF_MASK;
5387}
5388
5389void helper_sti_vm(void)
5390{
5391 env->eflags |= VIF_MASK;
5392 if (env->eflags & VIP_MASK) {
5393 raise_exception(EXCP0D_GPF);
5394 }
5395}
5396#endif
5397
5398void helper_set_inhibit_irq(void)
5399{
5400 env->hflags |= HF_INHIBIT_IRQ_MASK;
5401}
5402
5403void helper_reset_inhibit_irq(void)
5404{
5405 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5406}
5407
5408void helper_boundw(target_ulong a0, int v)
5409{
5410 int low, high;
5411 low = ldsw(a0);
5412 high = ldsw(a0 + 2);
5413 v = (int16_t)v;
5414 if (v < low || v > high) {
5415 raise_exception(EXCP05_BOUND);
5416 }
5417 FORCE_RET();
5418}
5419
5420void helper_boundl(target_ulong a0, int v)
5421{
5422 int low, high;
5423 low = ldl(a0);
5424 high = ldl(a0 + 4);
5425 if (v < low || v > high) {
5426 raise_exception(EXCP05_BOUND);
5427 }
5428 FORCE_RET();
5429}
5430
5431static float approx_rsqrt(float a)
5432{
5433 return 1.0 / sqrt(a);
5434}
5435
5436static float approx_rcp(float a)
5437{
5438 return 1.0 / a;
5439}
5440
5441#if !defined(CONFIG_USER_ONLY)
5442
5443#define MMUSUFFIX _mmu
5444
5445#define SHIFT 0
5446#include "softmmu_template.h"
5447
5448#define SHIFT 1
5449#include "softmmu_template.h"
5450
5451#define SHIFT 2
5452#include "softmmu_template.h"
5453
5454#define SHIFT 3
5455#include "softmmu_template.h"
5456
5457#endif
5458
5459#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5460/* This code assumes real physical address always fit into host CPU reg,
5461 which is wrong in general, but true for our current use cases. */
5462RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5463{
5464 return remR3PhysReadS8(addr);
5465}
5466RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5467{
5468 return remR3PhysReadU8(addr);
5469}
5470void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5471{
5472 remR3PhysWriteU8(addr, val);
5473}
5474RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5475{
5476 return remR3PhysReadS16(addr);
5477}
5478RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5479{
5480 return remR3PhysReadU16(addr);
5481}
5482void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5483{
5484 remR3PhysWriteU16(addr, val);
5485}
5486RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5487{
5488 return remR3PhysReadS32(addr);
5489}
5490RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5491{
5492 return remR3PhysReadU32(addr);
5493}
5494void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5495{
5496 remR3PhysWriteU32(addr, val);
5497}
5498uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5499{
5500 return remR3PhysReadU64(addr);
5501}
5502void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5503{
5504 remR3PhysWriteU64(addr, val);
5505}
5506#endif
5507
5508/* try to fill the TLB and return an exception if error. If retaddr is
5509 NULL, it means that the function was called in C code (i.e. not
5510 from generated code or from helper.c) */
5511/* XXX: fix it to restore all registers */
5512void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5513{
5514 TranslationBlock *tb;
5515 int ret;
5516 unsigned long pc;
5517 CPUX86State *saved_env;
5518
5519 /* XXX: hack to restore env in all cases, even if not called from
5520 generated code */
5521 saved_env = env;
5522 env = cpu_single_env;
5523
5524 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5525 if (ret) {
5526 if (retaddr) {
5527 /* now we have a real cpu fault */
5528 pc = (unsigned long)retaddr;
5529 tb = tb_find_pc(pc);
5530 if (tb) {
5531 /* the PC is inside the translated code. It means that we have
5532 a virtual CPU fault */
5533 cpu_restore_state(tb, env, pc, NULL);
5534 }
5535 }
5536 raise_exception_err(env->exception_index, env->error_code);
5537 }
5538 env = saved_env;
5539}
5540
5541#ifdef VBOX
5542
5543/**
5544 * Correctly computes the eflags.
5545 * @returns eflags.
5546 * @param env1 CPU environment.
5547 */
5548uint32_t raw_compute_eflags(CPUX86State *env1)
5549{
5550 CPUX86State *savedenv = env;
5551 uint32_t efl;
5552 env = env1;
5553 efl = compute_eflags();
5554 env = savedenv;
5555 return efl;
5556}
5557
5558/**
5559 * Reads byte from virtual address in guest memory area.
5560 * XXX: is it working for any addresses? swapped out pages?
5561 * @returns readed data byte.
5562 * @param env1 CPU environment.
5563 * @param pvAddr GC Virtual address.
5564 */
5565uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5566{
5567 CPUX86State *savedenv = env;
5568 uint8_t u8;
5569 env = env1;
5570 u8 = ldub_kernel(addr);
5571 env = savedenv;
5572 return u8;
5573}
5574
5575/**
5576 * Reads byte from virtual address in guest memory area.
5577 * XXX: is it working for any addresses? swapped out pages?
5578 * @returns readed data byte.
5579 * @param env1 CPU environment.
5580 * @param pvAddr GC Virtual address.
5581 */
5582uint16_t read_word(CPUX86State *env1, target_ulong addr)
5583{
5584 CPUX86State *savedenv = env;
5585 uint16_t u16;
5586 env = env1;
5587 u16 = lduw_kernel(addr);
5588 env = savedenv;
5589 return u16;
5590}
5591
5592/**
5593 * Reads byte from virtual address in guest memory area.
5594 * XXX: is it working for any addresses? swapped out pages?
5595 * @returns readed data byte.
5596 * @param env1 CPU environment.
5597 * @param pvAddr GC Virtual address.
5598 */
5599uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5600{
5601 CPUX86State *savedenv = env;
5602 uint32_t u32;
5603 env = env1;
5604 u32 = ldl_kernel(addr);
5605 env = savedenv;
5606 return u32;
5607}
5608
5609/**
5610 * Writes byte to virtual address in guest memory area.
5611 * XXX: is it working for any addresses? swapped out pages?
5612 * @returns readed data byte.
5613 * @param env1 CPU environment.
5614 * @param pvAddr GC Virtual address.
5615 * @param val byte value
5616 */
5617void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5618{
5619 CPUX86State *savedenv = env;
5620 env = env1;
5621 stb(addr, val);
5622 env = savedenv;
5623}
5624
5625void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5626{
5627 CPUX86State *savedenv = env;
5628 env = env1;
5629 stw(addr, val);
5630 env = savedenv;
5631}
5632
5633void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5634{
5635 CPUX86State *savedenv = env;
5636 env = env1;
5637 stl(addr, val);
5638 env = savedenv;
5639}
5640
5641/**
5642 * Correctly loads selector into segment register with updating internal
5643 * qemu data/caches.
5644 * @param env1 CPU environment.
5645 * @param seg_reg Segment register.
5646 * @param selector Selector to load.
5647 */
5648void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5649{
5650 CPUX86State *savedenv = env;
5651 jmp_buf old_buf;
5652
5653 env = env1;
5654
5655 if ( env->eflags & X86_EFL_VM
5656 || !(env->cr[0] & X86_CR0_PE))
5657 {
5658 load_seg_vm(seg_reg, selector);
5659
5660 env = savedenv;
5661
5662 /* Successful sync. */
5663 env1->segs[seg_reg].newselector = 0;
5664 }
5665 else
5666 {
5667 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5668 time critical - let's not do that */
5669#if 0
5670 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5671#endif
5672 if (setjmp(env1->jmp_env) == 0)
5673 {
5674 if (seg_reg == R_CS)
5675 {
5676 uint32_t e1, e2;
5677 e1 = e2 = 0;
5678 load_segment(&e1, &e2, selector);
5679 cpu_x86_load_seg_cache(env, R_CS, selector,
5680 get_seg_base(e1, e2),
5681 get_seg_limit(e1, e2),
5682 e2);
5683 }
5684 else
5685 tss_load_seg(seg_reg, selector);
5686 env = savedenv;
5687
5688 /* Successful sync. */
5689 env1->segs[seg_reg].newselector = 0;
5690 }
5691 else
5692 {
5693 env = savedenv;
5694
5695 /* Postpone sync until the guest uses the selector. */
5696 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5697 env1->segs[seg_reg].newselector = selector;
5698 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5699 env1->exception_index = -1;
5700 env1->error_code = 0;
5701 env1->old_exception = -1;
5702 }
5703#if 0
5704 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5705#endif
5706 }
5707
5708}
5709
5710DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5711{
5712 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5713}
5714
5715
5716int emulate_single_instr(CPUX86State *env1)
5717{
5718 TranslationBlock *tb;
5719 TranslationBlock *current;
5720 int flags;
5721 uint8_t *tc_ptr;
5722 target_ulong old_eip;
5723
5724 /* ensures env is loaded! */
5725 CPUX86State *savedenv = env;
5726 env = env1;
5727
5728 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5729
5730 current = env->current_tb;
5731 env->current_tb = NULL;
5732 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5733
5734 /*
5735 * Translate only one instruction.
5736 */
5737 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5738 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5739 env->segs[R_CS].base, flags, 0);
5740
5741 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5742
5743
5744 /* tb_link_phys: */
5745 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5746 tb->jmp_next[0] = NULL;
5747 tb->jmp_next[1] = NULL;
5748 Assert(tb->jmp_next[0] == NULL);
5749 Assert(tb->jmp_next[1] == NULL);
5750 if (tb->tb_next_offset[0] != 0xffff)
5751 tb_reset_jump(tb, 0);
5752 if (tb->tb_next_offset[1] != 0xffff)
5753 tb_reset_jump(tb, 1);
5754
5755 /*
5756 * Execute it using emulation
5757 */
5758 old_eip = env->eip;
5759 env->current_tb = tb;
5760
5761 /*
5762 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5763 * perhaps not a very safe hack
5764 */
5765 while(old_eip == env->eip)
5766 {
5767 tc_ptr = tb->tc_ptr;
5768
5769#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5770 int fake_ret;
5771 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5772#else
5773 tcg_qemu_tb_exec(tc_ptr);
5774#endif
5775 /*
5776 * Exit once we detect an external interrupt and interrupts are enabled
5777 */
5778 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5779 ( (env->eflags & IF_MASK) &&
5780 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5781 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5782 {
5783 break;
5784 }
5785 }
5786 env->current_tb = current;
5787
5788 tb_phys_invalidate(tb, -1);
5789 tb_free(tb);
5790/*
5791 Assert(tb->tb_next_offset[0] == 0xffff);
5792 Assert(tb->tb_next_offset[1] == 0xffff);
5793 Assert(tb->tb_next[0] == 0xffff);
5794 Assert(tb->tb_next[1] == 0xffff);
5795 Assert(tb->jmp_next[0] == NULL);
5796 Assert(tb->jmp_next[1] == NULL);
5797 Assert(tb->jmp_first == NULL); */
5798
5799 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5800
5801 /*
5802 * Execute the next instruction when we encounter instruction fusing.
5803 */
5804 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5805 {
5806 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5807 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5808 emulate_single_instr(env);
5809 }
5810
5811 env = savedenv;
5812 return 0;
5813}
5814
5815/**
5816 * Correctly loads a new ldtr selector.
5817 *
5818 * @param env1 CPU environment.
5819 * @param selector Selector to load.
5820 */
5821void sync_ldtr(CPUX86State *env1, int selector)
5822{
5823 CPUX86State *saved_env = env;
5824 if (setjmp(env1->jmp_env) == 0)
5825 {
5826 env = env1;
5827 helper_lldt(selector);
5828 env = saved_env;
5829 }
5830 else
5831 {
5832 env = saved_env;
5833#ifdef VBOX_STRICT
5834 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5835#endif
5836 }
5837}
5838
5839/**
5840 * Correctly loads a new tr selector.
5841 *
5842 * @param env1 CPU environment.
5843 * @param selector Selector to load.
5844 */
5845int sync_tr(CPUX86State *env1, int selector)
5846{
5847 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
5848 SegmentCache *dt;
5849 uint32_t e1, e2;
5850 int index, type, entry_limit;
5851 target_ulong ptr;
5852 CPUX86State *saved_env = env;
5853 env = env1;
5854
5855 selector &= 0xffff;
5856 if ((selector & 0xfffc) == 0) {
5857 /* NULL selector case: invalid TR */
5858 env->tr.base = 0;
5859 env->tr.limit = 0;
5860 env->tr.flags = 0;
5861 } else {
5862 if (selector & 0x4)
5863 goto l_failure;
5864 dt = &env->gdt;
5865 index = selector & ~7;
5866#ifdef TARGET_X86_64
5867 if (env->hflags & HF_LMA_MASK)
5868 entry_limit = 15;
5869 else
5870#endif
5871 entry_limit = 7;
5872 if ((index + entry_limit) > dt->limit)
5873 goto l_failure;
5874 ptr = dt->base + index;
5875 e1 = ldl_kernel(ptr);
5876 e2 = ldl_kernel(ptr + 4);
5877 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5878 if ((e2 & DESC_S_MASK) /*||
5879 (type != 1 && type != 9)*/)
5880 goto l_failure;
5881 if (!(e2 & DESC_P_MASK))
5882 goto l_failure;
5883#ifdef TARGET_X86_64
5884 if (env->hflags & HF_LMA_MASK) {
5885 uint32_t e3;
5886 e3 = ldl_kernel(ptr + 8);
5887 load_seg_cache_raw_dt(&env->tr, e1, e2);
5888 env->tr.base |= (target_ulong)e3 << 32;
5889 } else
5890#endif
5891 {
5892 load_seg_cache_raw_dt(&env->tr, e1, e2);
5893 }
5894 e2 |= DESC_TSS_BUSY_MASK;
5895 stl_kernel(ptr + 4, e2);
5896 }
5897 env->tr.selector = selector;
5898
5899 env = saved_env;
5900 return 0;
5901l_failure:
5902 AssertMsgFailed(("selector=%d\n", selector));
5903 return -1;
5904}
5905
5906
5907int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5908 uint32_t *esp_ptr, int dpl)
5909{
5910 int type, index, shift;
5911
5912 CPUX86State *savedenv = env;
5913 env = env1;
5914
5915 if (!(env->tr.flags & DESC_P_MASK))
5916 cpu_abort(env, "invalid tss");
5917 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5918 if ((type & 7) != 1)
5919 cpu_abort(env, "invalid tss type %d", type);
5920 shift = type >> 3;
5921 index = (dpl * 4 + 2) << shift;
5922 if (index + (4 << shift) - 1 > env->tr.limit)
5923 {
5924 env = savedenv;
5925 return 0;
5926 }
5927 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5928
5929 if (shift == 0) {
5930 *esp_ptr = lduw_kernel(env->tr.base + index);
5931 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5932 } else {
5933 *esp_ptr = ldl_kernel(env->tr.base + index);
5934 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5935 }
5936
5937 env = savedenv;
5938 return 1;
5939}
5940
5941//*****************************************************************************
5942// Needs to be at the bottom of the file (overriding macros)
5943
5944#ifndef VBOX
5945static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5946#else /* VBOX */
5947DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5948#endif /* VBOX */
5949{
5950 return *(CPU86_LDouble *)ptr;
5951}
5952
5953#ifndef VBOX
5954static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5955#else /* VBOX */
5956DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5957#endif /* VBOX */
5958{
5959 *(CPU86_LDouble *)ptr = f;
5960}
5961
5962#undef stw
5963#undef stl
5964#undef stq
5965#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5966#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5967#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5968#define data64 0
5969
5970//*****************************************************************************
5971void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5972{
5973 int fpus, fptag, i, nb_xmm_regs;
5974 CPU86_LDouble tmp;
5975 uint8_t *addr;
5976
5977 if (env->cpuid_features & CPUID_FXSR)
5978 {
5979 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5980 fptag = 0;
5981 for(i = 0; i < 8; i++) {
5982 fptag |= (env->fptags[i] << i);
5983 }
5984 stw(ptr, env->fpuc);
5985 stw(ptr + 2, fpus);
5986 stw(ptr + 4, fptag ^ 0xff);
5987
5988 addr = ptr + 0x20;
5989 for(i = 0;i < 8; i++) {
5990 tmp = ST(i);
5991 helper_fstt_raw(tmp, addr);
5992 addr += 16;
5993 }
5994
5995 if (env->cr[4] & CR4_OSFXSR_MASK) {
5996 /* XXX: finish it */
5997 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5998 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5999 nb_xmm_regs = 8 << data64;
6000 addr = ptr + 0xa0;
6001 for(i = 0; i < nb_xmm_regs; i++) {
6002#if __GNUC__ < 4
6003 stq(addr, env->xmm_regs[i].XMM_Q(0));
6004 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6005#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6006 stl(addr, env->xmm_regs[i].XMM_L(0));
6007 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6008 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6009 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6010#endif
6011 addr += 16;
6012 }
6013 }
6014 }
6015 else
6016 {
6017 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6018 int fptag;
6019
6020 fp->FCW = env->fpuc;
6021 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6022 fptag = 0;
6023 for (i=7; i>=0; i--) {
6024 fptag <<= 2;
6025 if (env->fptags[i]) {
6026 fptag |= 3;
6027 } else {
6028 /* the FPU automatically computes it */
6029 }
6030 }
6031 fp->FTW = fptag;
6032
6033 for(i = 0;i < 8; i++) {
6034 tmp = ST(i);
6035 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6036 }
6037 }
6038}
6039
6040//*****************************************************************************
6041#undef lduw
6042#undef ldl
6043#undef ldq
6044#define lduw(a) *(uint16_t *)(a)
6045#define ldl(a) *(uint32_t *)(a)
6046#define ldq(a) *(uint64_t *)(a)
6047//*****************************************************************************
6048void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6049{
6050 int i, fpus, fptag, nb_xmm_regs;
6051 CPU86_LDouble tmp;
6052 uint8_t *addr;
6053
6054 if (env->cpuid_features & CPUID_FXSR)
6055 {
6056 env->fpuc = lduw(ptr);
6057 fpus = lduw(ptr + 2);
6058 fptag = lduw(ptr + 4);
6059 env->fpstt = (fpus >> 11) & 7;
6060 env->fpus = fpus & ~0x3800;
6061 fptag ^= 0xff;
6062 for(i = 0;i < 8; i++) {
6063 env->fptags[i] = ((fptag >> i) & 1);
6064 }
6065
6066 addr = ptr + 0x20;
6067 for(i = 0;i < 8; i++) {
6068 tmp = helper_fldt_raw(addr);
6069 ST(i) = tmp;
6070 addr += 16;
6071 }
6072
6073 if (env->cr[4] & CR4_OSFXSR_MASK) {
6074 /* XXX: finish it, endianness */
6075 env->mxcsr = ldl(ptr + 0x18);
6076 //ldl(ptr + 0x1c);
6077 nb_xmm_regs = 8 << data64;
6078 addr = ptr + 0xa0;
6079 for(i = 0; i < nb_xmm_regs; i++) {
6080#if HC_ARCH_BITS == 32
6081 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6082 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6083 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6084 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6085 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6086#else
6087 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6088 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6089#endif
6090 addr += 16;
6091 }
6092 }
6093 }
6094 else
6095 {
6096 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6097 int fptag, j;
6098
6099 env->fpuc = fp->FCW;
6100 env->fpstt = (fp->FSW >> 11) & 7;
6101 env->fpus = fp->FSW & ~0x3800;
6102 fptag = fp->FTW;
6103 for(i = 0;i < 8; i++) {
6104 env->fptags[i] = ((fptag & 3) == 3);
6105 fptag >>= 2;
6106 }
6107 j = env->fpstt;
6108 for(i = 0;i < 8; i++) {
6109 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6110 ST(i) = tmp;
6111 }
6112 }
6113}
6114//*****************************************************************************
6115//*****************************************************************************
6116
6117#endif /* VBOX */
6118
6119/* Secure Virtual Machine helpers */
6120
6121#if defined(CONFIG_USER_ONLY)
6122
6123void helper_vmrun(int aflag, int next_eip_addend)
6124{
6125}
6126void helper_vmmcall(void)
6127{
6128}
6129void helper_vmload(int aflag)
6130{
6131}
6132void helper_vmsave(int aflag)
6133{
6134}
6135void helper_stgi(void)
6136{
6137}
6138void helper_clgi(void)
6139{
6140}
6141void helper_skinit(void)
6142{
6143}
6144void helper_invlpga(int aflag)
6145{
6146}
6147void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6148{
6149}
6150void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6151{
6152}
6153
6154void helper_svm_check_io(uint32_t port, uint32_t param,
6155 uint32_t next_eip_addend)
6156{
6157}
6158#else
6159
6160#ifndef VBOX
6161static inline void svm_save_seg(target_phys_addr_t addr,
6162#else /* VBOX */
6163DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6164#endif /* VBOX */
6165 const SegmentCache *sc)
6166{
6167 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6168 sc->selector);
6169 stq_phys(addr + offsetof(struct vmcb_seg, base),
6170 sc->base);
6171 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6172 sc->limit);
6173 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6174 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6175}
6176
6177#ifndef VBOX
6178static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6179#else /* VBOX */
6180DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6181#endif /* VBOX */
6182{
6183 unsigned int flags;
6184
6185 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6186 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6187 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6188 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6189 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6190}
6191
6192#ifndef VBOX
6193static inline void svm_load_seg_cache(target_phys_addr_t addr,
6194#else /* VBOX */
6195DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6196#endif /* VBOX */
6197 CPUState *env, int seg_reg)
6198{
6199 SegmentCache sc1, *sc = &sc1;
6200 svm_load_seg(addr, sc);
6201 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6202 sc->base, sc->limit, sc->flags);
6203}
6204
6205void helper_vmrun(int aflag, int next_eip_addend)
6206{
6207 target_ulong addr;
6208 uint32_t event_inj;
6209 uint32_t int_ctl;
6210
6211 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6212
6213 if (aflag == 2)
6214 addr = EAX;
6215 else
6216 addr = (uint32_t)EAX;
6217
6218 if (loglevel & CPU_LOG_TB_IN_ASM)
6219 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6220
6221 env->vm_vmcb = addr;
6222
6223 /* save the current CPU state in the hsave page */
6224 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6225 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6226
6227 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6228 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6229
6230 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6231 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6232 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6233 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6234 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6235 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6236
6237 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6238 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6239
6240 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6241 &env->segs[R_ES]);
6242 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6243 &env->segs[R_CS]);
6244 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6245 &env->segs[R_SS]);
6246 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6247 &env->segs[R_DS]);
6248
6249 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6250 EIP + next_eip_addend);
6251 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6252 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6253
6254 /* load the interception bitmaps so we do not need to access the
6255 vmcb in svm mode */
6256 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6257 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6258 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6259 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6260 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6261 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6262
6263 /* enable intercepts */
6264 env->hflags |= HF_SVMI_MASK;
6265
6266 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6267
6268 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6269 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6270
6271 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6272 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6273
6274 /* clear exit_info_2 so we behave like the real hardware */
6275 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6276
6277 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6278 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6279 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6280 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6281 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6282 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6283 if (int_ctl & V_INTR_MASKING_MASK) {
6284 env->v_tpr = int_ctl & V_TPR_MASK;
6285 env->hflags2 |= HF2_VINTR_MASK;
6286 if (env->eflags & IF_MASK)
6287 env->hflags2 |= HF2_HIF_MASK;
6288 }
6289
6290 cpu_load_efer(env,
6291 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6292 env->eflags = 0;
6293 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6294 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6295 CC_OP = CC_OP_EFLAGS;
6296
6297 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6298 env, R_ES);
6299 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6300 env, R_CS);
6301 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6302 env, R_SS);
6303 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6304 env, R_DS);
6305
6306 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6307 env->eip = EIP;
6308 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6309 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6310 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6311 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6312 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6313
6314 /* FIXME: guest state consistency checks */
6315
6316 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6317 case TLB_CONTROL_DO_NOTHING:
6318 break;
6319 case TLB_CONTROL_FLUSH_ALL_ASID:
6320 /* FIXME: this is not 100% correct but should work for now */
6321 tlb_flush(env, 1);
6322 break;
6323 }
6324
6325 env->hflags2 |= HF2_GIF_MASK;
6326
6327 if (int_ctl & V_IRQ_MASK) {
6328 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6329 }
6330
6331 /* maybe we need to inject an event */
6332 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6333 if (event_inj & SVM_EVTINJ_VALID) {
6334 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6335 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6336 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6337 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6338
6339 if (loglevel & CPU_LOG_TB_IN_ASM)
6340 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6341 /* FIXME: need to implement valid_err */
6342 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6343 case SVM_EVTINJ_TYPE_INTR:
6344 env->exception_index = vector;
6345 env->error_code = event_inj_err;
6346 env->exception_is_int = 0;
6347 env->exception_next_eip = -1;
6348 if (loglevel & CPU_LOG_TB_IN_ASM)
6349 fprintf(logfile, "INTR");
6350 /* XXX: is it always correct ? */
6351 do_interrupt(vector, 0, 0, 0, 1);
6352 break;
6353 case SVM_EVTINJ_TYPE_NMI:
6354 env->exception_index = EXCP02_NMI;
6355 env->error_code = event_inj_err;
6356 env->exception_is_int = 0;
6357 env->exception_next_eip = EIP;
6358 if (loglevel & CPU_LOG_TB_IN_ASM)
6359 fprintf(logfile, "NMI");
6360 cpu_loop_exit();
6361 break;
6362 case SVM_EVTINJ_TYPE_EXEPT:
6363 env->exception_index = vector;
6364 env->error_code = event_inj_err;
6365 env->exception_is_int = 0;
6366 env->exception_next_eip = -1;
6367 if (loglevel & CPU_LOG_TB_IN_ASM)
6368 fprintf(logfile, "EXEPT");
6369 cpu_loop_exit();
6370 break;
6371 case SVM_EVTINJ_TYPE_SOFT:
6372 env->exception_index = vector;
6373 env->error_code = event_inj_err;
6374 env->exception_is_int = 1;
6375 env->exception_next_eip = EIP;
6376 if (loglevel & CPU_LOG_TB_IN_ASM)
6377 fprintf(logfile, "SOFT");
6378 cpu_loop_exit();
6379 break;
6380 }
6381 if (loglevel & CPU_LOG_TB_IN_ASM)
6382 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6383 }
6384}
6385
6386void helper_vmmcall(void)
6387{
6388 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6389 raise_exception(EXCP06_ILLOP);
6390}
6391
6392void helper_vmload(int aflag)
6393{
6394 target_ulong addr;
6395 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6396
6397 if (aflag == 2)
6398 addr = EAX;
6399 else
6400 addr = (uint32_t)EAX;
6401
6402 if (loglevel & CPU_LOG_TB_IN_ASM)
6403 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6404 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6405 env->segs[R_FS].base);
6406
6407 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6408 env, R_FS);
6409 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6410 env, R_GS);
6411 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6412 &env->tr);
6413 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6414 &env->ldt);
6415
6416#ifdef TARGET_X86_64
6417 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6418 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6419 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6420 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6421#endif
6422 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6423 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6424 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6425 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6426}
6427
6428void helper_vmsave(int aflag)
6429{
6430 target_ulong addr;
6431 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6432
6433 if (aflag == 2)
6434 addr = EAX;
6435 else
6436 addr = (uint32_t)EAX;
6437
6438 if (loglevel & CPU_LOG_TB_IN_ASM)
6439 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6440 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6441 env->segs[R_FS].base);
6442
6443 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6444 &env->segs[R_FS]);
6445 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6446 &env->segs[R_GS]);
6447 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6448 &env->tr);
6449 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6450 &env->ldt);
6451
6452#ifdef TARGET_X86_64
6453 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6454 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6455 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6456 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6457#endif
6458 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6459 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6460 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6461 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6462}
6463
6464void helper_stgi(void)
6465{
6466 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6467 env->hflags2 |= HF2_GIF_MASK;
6468}
6469
6470void helper_clgi(void)
6471{
6472 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6473 env->hflags2 &= ~HF2_GIF_MASK;
6474}
6475
6476void helper_skinit(void)
6477{
6478 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6479 /* XXX: not implemented */
6480 raise_exception(EXCP06_ILLOP);
6481}
6482
6483void helper_invlpga(int aflag)
6484{
6485 target_ulong addr;
6486 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6487
6488 if (aflag == 2)
6489 addr = EAX;
6490 else
6491 addr = (uint32_t)EAX;
6492
6493 /* XXX: could use the ASID to see if it is needed to do the
6494 flush */
6495 tlb_flush_page(env, addr);
6496}
6497
6498void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6499{
6500 if (likely(!(env->hflags & HF_SVMI_MASK)))
6501 return;
6502#ifndef VBOX
6503 switch(type) {
6504#ifndef VBOX
6505 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6506#else
6507 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6508 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6509 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6510#endif
6511 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6512 helper_vmexit(type, param);
6513 }
6514 break;
6515#ifndef VBOX
6516 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6517#else
6518 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6519 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6520 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6521#endif
6522 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6523 helper_vmexit(type, param);
6524 }
6525 break;
6526 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6527 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6528 helper_vmexit(type, param);
6529 }
6530 break;
6531 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6532 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6533 helper_vmexit(type, param);
6534 }
6535 break;
6536 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6537 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6538 helper_vmexit(type, param);
6539 }
6540 break;
6541 case SVM_EXIT_MSR:
6542 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6543 /* FIXME: this should be read in at vmrun (faster this way?) */
6544 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6545 uint32_t t0, t1;
6546 switch((uint32_t)ECX) {
6547 case 0 ... 0x1fff:
6548 t0 = (ECX * 2) % 8;
6549 t1 = ECX / 8;
6550 break;
6551 case 0xc0000000 ... 0xc0001fff:
6552 t0 = (8192 + ECX - 0xc0000000) * 2;
6553 t1 = (t0 / 8);
6554 t0 %= 8;
6555 break;
6556 case 0xc0010000 ... 0xc0011fff:
6557 t0 = (16384 + ECX - 0xc0010000) * 2;
6558 t1 = (t0 / 8);
6559 t0 %= 8;
6560 break;
6561 default:
6562 helper_vmexit(type, param);
6563 t0 = 0;
6564 t1 = 0;
6565 break;
6566 }
6567 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6568 helper_vmexit(type, param);
6569 }
6570 break;
6571 default:
6572 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6573 helper_vmexit(type, param);
6574 }
6575 break;
6576 }
6577#else
6578 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6579#endif
6580}
6581
6582void helper_svm_check_io(uint32_t port, uint32_t param,
6583 uint32_t next_eip_addend)
6584{
6585 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6586 /* FIXME: this should be read in at vmrun (faster this way?) */
6587 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6588 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6589 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6590 /* next EIP */
6591 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6592 env->eip + next_eip_addend);
6593 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6594 }
6595 }
6596}
6597
6598/* Note: currently only 32 bits of exit_code are used */
6599void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6600{
6601 uint32_t int_ctl;
6602
6603 if (loglevel & CPU_LOG_TB_IN_ASM)
6604 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6605 exit_code, exit_info_1,
6606 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6607 EIP);
6608
6609 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6610 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6611 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6612 } else {
6613 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6614 }
6615
6616 /* Save the VM state in the vmcb */
6617 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6618 &env->segs[R_ES]);
6619 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6620 &env->segs[R_CS]);
6621 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6622 &env->segs[R_SS]);
6623 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6624 &env->segs[R_DS]);
6625
6626 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6627 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6628
6629 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6630 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6631
6632 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6633 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6634 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6635 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6636 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6637
6638 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6639 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6640 int_ctl |= env->v_tpr & V_TPR_MASK;
6641 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6642 int_ctl |= V_IRQ_MASK;
6643 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6644
6645 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6646 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6647 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6648 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6649 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6650 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6651 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6652
6653 /* Reload the host state from vm_hsave */
6654 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6655 env->hflags &= ~HF_SVMI_MASK;
6656 env->intercept = 0;
6657 env->intercept_exceptions = 0;
6658 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6659 env->tsc_offset = 0;
6660
6661 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6662 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6663
6664 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6665 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6666
6667 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6668 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6669 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6670 /* we need to set the efer after the crs so the hidden flags get
6671 set properly */
6672 cpu_load_efer(env,
6673 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6674 env->eflags = 0;
6675 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6676 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6677 CC_OP = CC_OP_EFLAGS;
6678
6679 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6680 env, R_ES);
6681 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6682 env, R_CS);
6683 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6684 env, R_SS);
6685 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6686 env, R_DS);
6687
6688 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6689 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6690 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6691
6692 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6693 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6694
6695 /* other setups */
6696 cpu_x86_set_cpl(env, 0);
6697 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6698 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6699
6700 env->hflags2 &= ~HF2_GIF_MASK;
6701 /* FIXME: Resets the current ASID register to zero (host ASID). */
6702
6703 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6704
6705 /* Clears the TSC_OFFSET inside the processor. */
6706
6707 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6708 from the page table indicated the host's CR3. If the PDPEs contain
6709 illegal state, the processor causes a shutdown. */
6710
6711 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6712 env->cr[0] |= CR0_PE_MASK;
6713 env->eflags &= ~VM_MASK;
6714
6715 /* Disables all breakpoints in the host DR7 register. */
6716
6717 /* Checks the reloaded host state for consistency. */
6718
6719 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6720 host's code segment or non-canonical (in the case of long mode), a
6721 #GP fault is delivered inside the host.) */
6722
6723 /* remove any pending exception */
6724 env->exception_index = -1;
6725 env->error_code = 0;
6726 env->old_exception = -1;
6727
6728 cpu_loop_exit();
6729}
6730
6731#endif
6732
6733/* MMX/SSE */
6734/* XXX: optimize by storing fptt and fptags in the static cpu state */
6735void helper_enter_mmx(void)
6736{
6737 env->fpstt = 0;
6738 *(uint32_t *)(env->fptags) = 0;
6739 *(uint32_t *)(env->fptags + 4) = 0;
6740}
6741
6742void helper_emms(void)
6743{
6744 /* set to empty state */
6745 *(uint32_t *)(env->fptags) = 0x01010101;
6746 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6747}
6748
6749/* XXX: suppress */
6750void helper_movq(uint64_t *d, uint64_t *s)
6751{
6752 *d = *s;
6753}
6754
6755#define SHIFT 0
6756#include "ops_sse.h"
6757
6758#define SHIFT 1
6759#include "ops_sse.h"
6760
6761#define SHIFT 0
6762#include "helper_template.h"
6763#undef SHIFT
6764
6765#define SHIFT 1
6766#include "helper_template.h"
6767#undef SHIFT
6768
6769#define SHIFT 2
6770#include "helper_template.h"
6771#undef SHIFT
6772
6773#ifdef TARGET_X86_64
6774
6775#define SHIFT 3
6776#include "helper_template.h"
6777#undef SHIFT
6778
6779#endif
6780
6781/* bit operations */
6782target_ulong helper_bsf(target_ulong t0)
6783{
6784 int count;
6785 target_ulong res;
6786
6787 res = t0;
6788 count = 0;
6789 while ((res & 1) == 0) {
6790 count++;
6791 res >>= 1;
6792 }
6793 return count;
6794}
6795
6796target_ulong helper_bsr(target_ulong t0)
6797{
6798 int count;
6799 target_ulong res, mask;
6800
6801 res = t0;
6802 count = TARGET_LONG_BITS - 1;
6803 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6804 while ((res & mask) == 0) {
6805 count--;
6806 res <<= 1;
6807 }
6808 return count;
6809}
6810
6811
6812static int compute_all_eflags(void)
6813{
6814 return CC_SRC;
6815}
6816
6817static int compute_c_eflags(void)
6818{
6819 return CC_SRC & CC_C;
6820}
6821
6822#ifndef VBOX
6823CCTable cc_table[CC_OP_NB] = {
6824 [CC_OP_DYNAMIC] = { /* should never happen */ },
6825
6826 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6827
6828 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6829 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6830 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6831
6832 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6833 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6834 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6835
6836 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6837 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6838 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6839
6840 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6841 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6842 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6843
6844 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6845 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6846 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6847
6848 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6849 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6850 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6851
6852 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6853 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6854 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6855
6856 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6857 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6858 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6859
6860 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6861 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6862 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6863
6864 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6865 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6866 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6867
6868#ifdef TARGET_X86_64
6869 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6870
6871 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6872
6873 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6874
6875 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6876
6877 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6878
6879 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6880
6881 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6882
6883 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6884
6885 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6886
6887 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6888#endif
6889};
6890#else /* VBOX */
6891/* Sync carefully with cpu.h */
6892CCTable cc_table[CC_OP_NB] = {
6893 /* CC_OP_DYNAMIC */ { 0, 0 },
6894
6895 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6896
6897 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6898 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6899 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6900#ifdef TARGET_X86_64
6901 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6902#else
6903 /* CC_OP_MULQ */ { 0, 0 },
6904#endif
6905
6906 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6907 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6908 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6909#ifdef TARGET_X86_64
6910 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6911#else
6912 /* CC_OP_ADDQ */ { 0, 0 },
6913#endif
6914
6915 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6916 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6917 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6918#ifdef TARGET_X86_64
6919 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6920#else
6921 /* CC_OP_ADCQ */ { 0, 0 },
6922#endif
6923
6924 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6925 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6926 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6927#ifdef TARGET_X86_64
6928 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6929#else
6930 /* CC_OP_SUBQ */ { 0, 0 },
6931#endif
6932
6933 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6934 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6935 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6936#ifdef TARGET_X86_64
6937 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6938#else
6939 /* CC_OP_SBBQ */ { 0, 0 },
6940#endif
6941
6942 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6943 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6944 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6945#ifdef TARGET_X86_64
6946 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6947#else
6948 /* CC_OP_LOGICQ */ { 0, 0 },
6949#endif
6950
6951 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6952 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6953 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6954#ifdef TARGET_X86_64
6955 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6956#else
6957 /* CC_OP_INCQ */ { 0, 0 },
6958#endif
6959
6960 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6961 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6962 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6963#ifdef TARGET_X86_64
6964 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6965#else
6966 /* CC_OP_DECQ */ { 0, 0 },
6967#endif
6968
6969 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6970 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6971 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6972#ifdef TARGET_X86_64
6973 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6974#else
6975 /* CC_OP_SHLQ */ { 0, 0 },
6976#endif
6977
6978 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6979 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6980 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6981#ifdef TARGET_X86_64
6982 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6983#else
6984 /* CC_OP_SARQ */ { 0, 0 },
6985#endif
6986};
6987#endif /* VBOX */
6988
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette