VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/op_helper.c@ 15173

Last change on this file since 15173 was 15173, checked in by vboxsync, 16 years ago

an attempt to fix MacOS alignment issues

File size: 194.3 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
148 /* if TF will be set -> #GP */
149 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
150 || (new_eflags & TF_MASK)) {
151 raise_exception(EXCP0D_GPF);
152 } else {
153 load_eflags(new_eflags, (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
154
155 if (new_eflags & IF_MASK) {
156 env->eflags |= VIF_MASK;
157 } else {
158 env->eflags &= ~VIF_MASK;
159 }
160 }
161}
162
163target_ulong helper_read_eflags_vme(void)
164{
165 uint32_t eflags;
166 eflags = cc_table[CC_OP].compute_all();
167 eflags |= (DF & DF_MASK);
168 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
169 if (env->eflags & VIF_MASK)
170 eflags |= IF_MASK;
171 return eflags;
172}
173
174void helper_dump_state()
175{
176 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
177 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
178 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
179 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
180 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
181 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
182 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
183}
184#endif
185
186/* return non zero if error */
187#ifndef VBOX
188static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
189#else /* VBOX */
190DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
191#endif /* VBOX */
192 int selector)
193{
194 SegmentCache *dt;
195 int index;
196 target_ulong ptr;
197
198#ifdef VBOX
199 /* Trying to load a selector with CPL=1? */
200 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
201 {
202 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
203 selector = selector & 0xfffc;
204 }
205#endif
206
207 if (selector & 0x4)
208 dt = &env->ldt;
209 else
210 dt = &env->gdt;
211 index = selector & ~7;
212 if ((index + 7) > dt->limit)
213 return -1;
214 ptr = dt->base + index;
215 *e1_ptr = ldl_kernel(ptr);
216 *e2_ptr = ldl_kernel(ptr + 4);
217 return 0;
218}
219
220#ifndef VBOX
221static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
222#else /* VBOX */
223DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
224#endif /* VBOX */
225{
226 unsigned int limit;
227 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
228 if (e2 & DESC_G_MASK)
229 limit = (limit << 12) | 0xfff;
230 return limit;
231}
232
233#ifndef VBOX
234static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
235#else /* VBOX */
236DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
237#endif /* VBOX */
238{
239 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
240}
241
242#ifndef VBOX
243static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
244#else /* VBOX */
245DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
246#endif /* VBOX */
247{
248 sc->base = get_seg_base(e1, e2);
249 sc->limit = get_seg_limit(e1, e2);
250 sc->flags = e2;
251}
252
253/* init the segment cache in vm86 mode. */
254#ifndef VBOX
255static inline void load_seg_vm(int seg, int selector)
256#else /* VBOX */
257DECLINLINE(void) load_seg_vm(int seg, int selector)
258#endif /* VBOX */
259{
260 selector &= 0xffff;
261#ifdef VBOX
262 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
263
264 if (seg == R_CS)
265 flags |= DESC_CS_MASK;
266
267 cpu_x86_load_seg_cache(env, seg, selector,
268 (selector << 4), 0xffff, flags);
269#else
270 cpu_x86_load_seg_cache(env, seg, selector,
271 (selector << 4), 0xffff, 0);
272#endif
273}
274
275#ifndef VBOX
276static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
277#else /* VBOX */
278DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
279#endif /* VBOX */
280 uint32_t *esp_ptr, int dpl)
281{
282#ifndef VBOX
283 int type, index, shift;
284#else
285 unsigned int type, index, shift;
286#endif
287
288#if 0
289 {
290 int i;
291 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
292 for(i=0;i<env->tr.limit;i++) {
293 printf("%02x ", env->tr.base[i]);
294 if ((i & 7) == 7) printf("\n");
295 }
296 printf("\n");
297 }
298#endif
299
300 if (!(env->tr.flags & DESC_P_MASK))
301 cpu_abort(env, "invalid tss");
302 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
303 if ((type & 7) != 1)
304 cpu_abort(env, "invalid tss type");
305 shift = type >> 3;
306 index = (dpl * 4 + 2) << shift;
307 if (index + (4 << shift) - 1 > env->tr.limit)
308 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
309 if (shift == 0) {
310 *esp_ptr = lduw_kernel(env->tr.base + index);
311 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
312 } else {
313 *esp_ptr = ldl_kernel(env->tr.base + index);
314 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
315 }
316}
317
318/* XXX: merge with load_seg() */
319static void tss_load_seg(int seg_reg, int selector)
320{
321 uint32_t e1, e2;
322 int rpl, dpl, cpl;
323
324#ifdef VBOX
325 e1 = e2 = 0;
326 cpl = env->hflags & HF_CPL_MASK;
327 /* Trying to load a selector with CPL=1? */
328 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
329 {
330 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
331 selector = selector & 0xfffc;
332 }
333#endif
334
335 if ((selector & 0xfffc) != 0) {
336 if (load_segment(&e1, &e2, selector) != 0)
337 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
338 if (!(e2 & DESC_S_MASK))
339 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
340 rpl = selector & 3;
341 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
342 cpl = env->hflags & HF_CPL_MASK;
343 if (seg_reg == R_CS) {
344 if (!(e2 & DESC_CS_MASK))
345 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346 /* XXX: is it correct ? */
347 if (dpl != rpl)
348 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
349 if ((e2 & DESC_C_MASK) && dpl > rpl)
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 } else if (seg_reg == R_SS) {
352 /* SS must be writable data */
353 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 if (dpl != cpl || dpl != rpl)
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 } else {
358 /* not readable code */
359 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 /* if data or non conforming code, checks the rights */
362 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
363 if (dpl < cpl || dpl < rpl)
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 }
366 }
367 if (!(e2 & DESC_P_MASK))
368 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
369 cpu_x86_load_seg_cache(env, seg_reg, selector,
370 get_seg_base(e1, e2),
371 get_seg_limit(e1, e2),
372 e2);
373 } else {
374 if (seg_reg == R_SS || seg_reg == R_CS)
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376 }
377}
378
379#define SWITCH_TSS_JMP 0
380#define SWITCH_TSS_IRET 1
381#define SWITCH_TSS_CALL 2
382
383/* XXX: restore CPU state in registers (PowerPC case) */
384static void switch_tss(int tss_selector,
385 uint32_t e1, uint32_t e2, int source,
386 uint32_t next_eip)
387{
388 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
389 target_ulong tss_base;
390 uint32_t new_regs[8], new_segs[6];
391 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
392 uint32_t old_eflags, eflags_mask;
393 SegmentCache *dt;
394#ifndef VBOX
395 int index;
396#else
397 unsigned int index;
398#endif
399 target_ulong ptr;
400
401 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
402#ifdef DEBUG_PCALL
403 if (loglevel & CPU_LOG_PCALL)
404 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
405#endif
406
407#if defined(VBOX) && defined(DEBUG)
408 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
409#endif
410
411 /* if task gate, we read the TSS segment and we load it */
412 if (type == 5) {
413 if (!(e2 & DESC_P_MASK))
414 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
415 tss_selector = e1 >> 16;
416 if (tss_selector & 4)
417 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
418 if (load_segment(&e1, &e2, tss_selector) != 0)
419 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
420 if (e2 & DESC_S_MASK)
421 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
422 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
423 if ((type & 7) != 1)
424 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
425 }
426
427 if (!(e2 & DESC_P_MASK))
428 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
429
430 if (type & 8)
431 tss_limit_max = 103;
432 else
433 tss_limit_max = 43;
434 tss_limit = get_seg_limit(e1, e2);
435 tss_base = get_seg_base(e1, e2);
436 if ((tss_selector & 4) != 0 ||
437 tss_limit < tss_limit_max)
438 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
439 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
440 if (old_type & 8)
441 old_tss_limit_max = 103;
442 else
443 old_tss_limit_max = 43;
444
445 /* read all the registers from the new TSS */
446 if (type & 8) {
447 /* 32 bit */
448 new_cr3 = ldl_kernel(tss_base + 0x1c);
449 new_eip = ldl_kernel(tss_base + 0x20);
450 new_eflags = ldl_kernel(tss_base + 0x24);
451 for(i = 0; i < 8; i++)
452 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
453 for(i = 0; i < 6; i++)
454 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
455 new_ldt = lduw_kernel(tss_base + 0x60);
456 new_trap = ldl_kernel(tss_base + 0x64);
457 } else {
458 /* 16 bit */
459 new_cr3 = 0;
460 new_eip = lduw_kernel(tss_base + 0x0e);
461 new_eflags = lduw_kernel(tss_base + 0x10);
462 for(i = 0; i < 8; i++)
463 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
464 for(i = 0; i < 4; i++)
465 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
466 new_ldt = lduw_kernel(tss_base + 0x2a);
467 new_segs[R_FS] = 0;
468 new_segs[R_GS] = 0;
469 new_trap = 0;
470 }
471
472 /* NOTE: we must avoid memory exceptions during the task switch,
473 so we make dummy accesses before */
474 /* XXX: it can still fail in some cases, so a bigger hack is
475 necessary to valid the TLB after having done the accesses */
476
477 v1 = ldub_kernel(env->tr.base);
478 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
479 stb_kernel(env->tr.base, v1);
480 stb_kernel(env->tr.base + old_tss_limit_max, v2);
481
482 /* clear busy bit (it is restartable) */
483 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
484 target_ulong ptr;
485 uint32_t e2;
486 ptr = env->gdt.base + (env->tr.selector & ~7);
487 e2 = ldl_kernel(ptr + 4);
488 e2 &= ~DESC_TSS_BUSY_MASK;
489 stl_kernel(ptr + 4, e2);
490 }
491 old_eflags = compute_eflags();
492 if (source == SWITCH_TSS_IRET)
493 old_eflags &= ~NT_MASK;
494
495 /* save the current state in the old TSS */
496 if (type & 8) {
497 /* 32 bit */
498 stl_kernel(env->tr.base + 0x20, next_eip);
499 stl_kernel(env->tr.base + 0x24, old_eflags);
500 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
501 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
502 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
503 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
504 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
505 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
506 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
507 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
508 for(i = 0; i < 6; i++)
509 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
510#if defined(VBOX) && defined(DEBUG)
511 printf("TSS 32 bits switch\n");
512 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
513#endif
514 } else {
515 /* 16 bit */
516 stw_kernel(env->tr.base + 0x0e, next_eip);
517 stw_kernel(env->tr.base + 0x10, old_eflags);
518 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
519 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
520 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
521 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
522 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
523 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
524 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
525 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
526 for(i = 0; i < 4; i++)
527 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
528 }
529
530 /* now if an exception occurs, it will occurs in the next task
531 context */
532
533 if (source == SWITCH_TSS_CALL) {
534 stw_kernel(tss_base, env->tr.selector);
535 new_eflags |= NT_MASK;
536 }
537
538 /* set busy bit */
539 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
540 target_ulong ptr;
541 uint32_t e2;
542 ptr = env->gdt.base + (tss_selector & ~7);
543 e2 = ldl_kernel(ptr + 4);
544 e2 |= DESC_TSS_BUSY_MASK;
545 stl_kernel(ptr + 4, e2);
546 }
547
548 /* set the new CPU state */
549 /* from this point, any exception which occurs can give problems */
550 env->cr[0] |= CR0_TS_MASK;
551 env->hflags |= HF_TS_MASK;
552 env->tr.selector = tss_selector;
553 env->tr.base = tss_base;
554 env->tr.limit = tss_limit;
555 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
556
557 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
558 cpu_x86_update_cr3(env, new_cr3);
559 }
560
561 /* load all registers without an exception, then reload them with
562 possible exception */
563 env->eip = new_eip;
564 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
565 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
566 if (!(type & 8))
567 eflags_mask &= 0xffff;
568 load_eflags(new_eflags, eflags_mask);
569 /* XXX: what to do in 16 bit case ? */
570 EAX = new_regs[0];
571 ECX = new_regs[1];
572 EDX = new_regs[2];
573 EBX = new_regs[3];
574 ESP = new_regs[4];
575 EBP = new_regs[5];
576 ESI = new_regs[6];
577 EDI = new_regs[7];
578 if (new_eflags & VM_MASK) {
579 for(i = 0; i < 6; i++)
580 load_seg_vm(i, new_segs[i]);
581 /* in vm86, CPL is always 3 */
582 cpu_x86_set_cpl(env, 3);
583 } else {
584 /* CPL is set the RPL of CS */
585 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
586 /* first just selectors as the rest may trigger exceptions */
587 for(i = 0; i < 6; i++)
588 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
589 }
590
591 env->ldt.selector = new_ldt & ~4;
592 env->ldt.base = 0;
593 env->ldt.limit = 0;
594 env->ldt.flags = 0;
595
596 /* load the LDT */
597 if (new_ldt & 4)
598 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
599
600 if ((new_ldt & 0xfffc) != 0) {
601 dt = &env->gdt;
602 index = new_ldt & ~7;
603 if ((index + 7) > dt->limit)
604 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
605 ptr = dt->base + index;
606 e1 = ldl_kernel(ptr);
607 e2 = ldl_kernel(ptr + 4);
608 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
609 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
610 if (!(e2 & DESC_P_MASK))
611 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
612 load_seg_cache_raw_dt(&env->ldt, e1, e2);
613 }
614
615 /* load the segments */
616 if (!(new_eflags & VM_MASK)) {
617 tss_load_seg(R_CS, new_segs[R_CS]);
618 tss_load_seg(R_SS, new_segs[R_SS]);
619 tss_load_seg(R_ES, new_segs[R_ES]);
620 tss_load_seg(R_DS, new_segs[R_DS]);
621 tss_load_seg(R_FS, new_segs[R_FS]);
622 tss_load_seg(R_GS, new_segs[R_GS]);
623 }
624
625 /* check that EIP is in the CS segment limits */
626 if (new_eip > env->segs[R_CS].limit) {
627 /* XXX: different exception if CALL ? */
628 raise_exception_err(EXCP0D_GPF, 0);
629 }
630}
631
632/* check if Port I/O is allowed in TSS */
633#ifndef VBOX
634static inline void check_io(int addr, int size)
635{
636 int io_offset, val, mask;
637
638#else /* VBOX */
639DECLINLINE(void) check_io(int addr, int size)
640{
641 int val, mask;
642 unsigned int io_offset;
643#endif /* VBOX */
644 /* TSS must be a valid 32 bit one */
645 if (!(env->tr.flags & DESC_P_MASK) ||
646 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
647 env->tr.limit < 103)
648 goto fail;
649 io_offset = lduw_kernel(env->tr.base + 0x66);
650 io_offset += (addr >> 3);
651 /* Note: the check needs two bytes */
652 if ((io_offset + 1) > env->tr.limit)
653 goto fail;
654 val = lduw_kernel(env->tr.base + io_offset);
655 val >>= (addr & 7);
656 mask = (1 << size) - 1;
657 /* all bits must be zero to allow the I/O */
658 if ((val & mask) != 0) {
659 fail:
660 raise_exception_err(EXCP0D_GPF, 0);
661 }
662}
663
664#ifdef VBOX
665/* Keep in sync with gen_check_external_event() */
666void helper_check_external_event()
667{
668 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
669 | CPU_INTERRUPT_EXTERNAL_TIMER
670 | CPU_INTERRUPT_EXTERNAL_DMA))
671 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
672 && (env->eflags & IF_MASK)
673 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
674 {
675 helper_external_event();
676 }
677
678}
679
680void helper_sync_seg(uint32_t reg)
681{
682 assert(env->segs[reg].newselector != 0);
683 sync_seg(env, reg, env->segs[reg].newselector);
684}
685#endif
686
687void helper_check_iob(uint32_t t0)
688{
689 check_io(t0, 1);
690}
691
692void helper_check_iow(uint32_t t0)
693{
694 check_io(t0, 2);
695}
696
697void helper_check_iol(uint32_t t0)
698{
699 check_io(t0, 4);
700}
701
702void helper_outb(uint32_t port, uint32_t data)
703{
704 cpu_outb(env, port, data & 0xff);
705}
706
707target_ulong helper_inb(uint32_t port)
708{
709 return cpu_inb(env, port);
710}
711
712void helper_outw(uint32_t port, uint32_t data)
713{
714 cpu_outw(env, port, data & 0xffff);
715}
716
717target_ulong helper_inw(uint32_t port)
718{
719 return cpu_inw(env, port);
720}
721
722void helper_outl(uint32_t port, uint32_t data)
723{
724 cpu_outl(env, port, data);
725}
726
727target_ulong helper_inl(uint32_t port)
728{
729 return cpu_inl(env, port);
730}
731
732#ifndef VBOX
733static inline unsigned int get_sp_mask(unsigned int e2)
734#else /* VBOX */
735DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
736#endif /* VBOX */
737{
738 if (e2 & DESC_B_MASK)
739 return 0xffffffff;
740 else
741 return 0xffff;
742}
743
744#ifdef TARGET_X86_64
745#define SET_ESP(val, sp_mask)\
746do {\
747 if ((sp_mask) == 0xffff)\
748 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
749 else if ((sp_mask) == 0xffffffffLL)\
750 ESP = (uint32_t)(val);\
751 else\
752 ESP = (val);\
753} while (0)
754#else
755#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
756#endif
757
758/* in 64-bit machines, this can overflow. So this segment addition macro
759 * can be used to trim the value to 32-bit whenever needed */
760#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
761
762/* XXX: add a is_user flag to have proper security support */
763#define PUSHW(ssp, sp, sp_mask, val)\
764{\
765 sp -= 2;\
766 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
767}
768
769#define PUSHL(ssp, sp, sp_mask, val)\
770{\
771 sp -= 4;\
772 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
773}
774
775#define POPW(ssp, sp, sp_mask, val)\
776{\
777 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
778 sp += 2;\
779}
780
781#define POPL(ssp, sp, sp_mask, val)\
782{\
783 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
784 sp += 4;\
785}
786
787/* protected mode interrupt */
788static void do_interrupt_protected(int intno, int is_int, int error_code,
789 unsigned int next_eip, int is_hw)
790{
791 SegmentCache *dt;
792 target_ulong ptr, ssp;
793 int type, dpl, selector, ss_dpl, cpl;
794 int has_error_code, new_stack, shift;
795 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
796 uint32_t old_eip, sp_mask;
797
798#ifdef VBOX
799 ss = ss_e1 = ss_e2 = 0;
800# ifdef VBOX_WITH_VMI
801 if ( intno == 6
802 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
803 {
804 env->exception_index = EXCP_PARAV_CALL;
805 cpu_loop_exit();
806 }
807# endif
808 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
809 cpu_loop_exit();
810#endif
811
812 has_error_code = 0;
813 if (!is_int && !is_hw) {
814 switch(intno) {
815 case 8:
816 case 10:
817 case 11:
818 case 12:
819 case 13:
820 case 14:
821 case 17:
822 has_error_code = 1;
823 break;
824 }
825 }
826 if (is_int)
827 old_eip = next_eip;
828 else
829 old_eip = env->eip;
830
831 dt = &env->idt;
832#ifndef VBOX
833 if (intno * 8 + 7 > dt->limit)
834#else
835 if ((unsigned)intno * 8 + 7 > dt->limit)
836#endif
837 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
838 ptr = dt->base + intno * 8;
839 e1 = ldl_kernel(ptr);
840 e2 = ldl_kernel(ptr + 4);
841 /* check gate type */
842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
843 switch(type) {
844 case 5: /* task gate */
845 /* must do that check here to return the correct error code */
846 if (!(e2 & DESC_P_MASK))
847 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
848 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
849 if (has_error_code) {
850 int type;
851 uint32_t mask;
852 /* push the error code */
853 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
854 shift = type >> 3;
855 if (env->segs[R_SS].flags & DESC_B_MASK)
856 mask = 0xffffffff;
857 else
858 mask = 0xffff;
859 esp = (ESP - (2 << shift)) & mask;
860 ssp = env->segs[R_SS].base + esp;
861 if (shift)
862 stl_kernel(ssp, error_code);
863 else
864 stw_kernel(ssp, error_code);
865 SET_ESP(esp, mask);
866 }
867 return;
868 case 6: /* 286 interrupt gate */
869 case 7: /* 286 trap gate */
870 case 14: /* 386 interrupt gate */
871 case 15: /* 386 trap gate */
872 break;
873 default:
874 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
875 break;
876 }
877 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
878 cpl = env->hflags & HF_CPL_MASK;
879 /* check privilege if software int */
880 if (is_int && dpl < cpl)
881 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
882 /* check valid bit */
883 if (!(e2 & DESC_P_MASK))
884 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
885 selector = e1 >> 16;
886 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
887 if ((selector & 0xfffc) == 0)
888 raise_exception_err(EXCP0D_GPF, 0);
889
890 if (load_segment(&e1, &e2, selector) != 0)
891 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
892 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
893 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
894 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
895 if (dpl > cpl)
896 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
897 if (!(e2 & DESC_P_MASK))
898 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
899 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
900 /* to inner privilege */
901 get_ss_esp_from_tss(&ss, &esp, dpl);
902 if ((ss & 0xfffc) == 0)
903 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
904 if ((ss & 3) != dpl)
905 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
906 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
907 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
908 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
909 if (ss_dpl != dpl)
910 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
911 if (!(ss_e2 & DESC_S_MASK) ||
912 (ss_e2 & DESC_CS_MASK) ||
913 !(ss_e2 & DESC_W_MASK))
914 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
915 if (!(ss_e2 & DESC_P_MASK))
916#ifdef VBOX /* See page 3-477 of 253666.pdf */
917 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
918#else
919 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
920#endif
921 new_stack = 1;
922 sp_mask = get_sp_mask(ss_e2);
923 ssp = get_seg_base(ss_e1, ss_e2);
924#if defined(VBOX) && defined(DEBUG)
925 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
926#endif
927 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
928 /* to same privilege */
929 if (env->eflags & VM_MASK)
930 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
931 new_stack = 0;
932 sp_mask = get_sp_mask(env->segs[R_SS].flags);
933 ssp = env->segs[R_SS].base;
934 esp = ESP;
935 dpl = cpl;
936 } else {
937 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938 new_stack = 0; /* avoid warning */
939 sp_mask = 0; /* avoid warning */
940 ssp = 0; /* avoid warning */
941 esp = 0; /* avoid warning */
942 }
943
944 shift = type >> 3;
945
946#if 0
947 /* XXX: check that enough room is available */
948 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
949 if (env->eflags & VM_MASK)
950 push_size += 8;
951 push_size <<= shift;
952#endif
953 if (shift == 1) {
954 if (new_stack) {
955 if (env->eflags & VM_MASK) {
956 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
957 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
958 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
959 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
960 }
961 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
962 PUSHL(ssp, esp, sp_mask, ESP);
963 }
964 PUSHL(ssp, esp, sp_mask, compute_eflags());
965 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
966 PUSHL(ssp, esp, sp_mask, old_eip);
967 if (has_error_code) {
968 PUSHL(ssp, esp, sp_mask, error_code);
969 }
970 } else {
971 if (new_stack) {
972 if (env->eflags & VM_MASK) {
973 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
974 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
975 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
976 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
977 }
978 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
979 PUSHW(ssp, esp, sp_mask, ESP);
980 }
981 PUSHW(ssp, esp, sp_mask, compute_eflags());
982 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
983 PUSHW(ssp, esp, sp_mask, old_eip);
984 if (has_error_code) {
985 PUSHW(ssp, esp, sp_mask, error_code);
986 }
987 }
988
989 if (new_stack) {
990 if (env->eflags & VM_MASK) {
991 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
992 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
993 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
994 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
995 }
996 ss = (ss & ~3) | dpl;
997 cpu_x86_load_seg_cache(env, R_SS, ss,
998 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
999 }
1000 SET_ESP(esp, sp_mask);
1001
1002 selector = (selector & ~3) | dpl;
1003 cpu_x86_load_seg_cache(env, R_CS, selector,
1004 get_seg_base(e1, e2),
1005 get_seg_limit(e1, e2),
1006 e2);
1007 cpu_x86_set_cpl(env, dpl);
1008 env->eip = offset;
1009
1010 /* interrupt gate clear IF mask */
1011 if ((type & 1) == 0) {
1012 env->eflags &= ~IF_MASK;
1013 }
1014 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1015}
1016#ifdef VBOX
1017
1018/* check if VME interrupt redirection is enabled in TSS */
1019DECLINLINE(bool) is_vme_irq_redirected(int intno)
1020{
1021 unsigned int io_offset, intredir_offset;
1022 unsigned char val, mask;
1023
1024 /* TSS must be a valid 32 bit one */
1025 if (!(env->tr.flags & DESC_P_MASK) ||
1026 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1027 env->tr.limit < 103)
1028 goto fail;
1029 io_offset = lduw_kernel(env->tr.base + 0x66);
1030 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1031 intredir_offset = io_offset - 0x20;
1032
1033 intredir_offset += (intno >> 3);
1034 if ((intredir_offset) > env->tr.limit)
1035 goto fail;
1036
1037 val = ldub_kernel(env->tr.base + intredir_offset);
1038 mask = 1 << (unsigned char)(intno & 7);
1039
1040 /* bit set means no redirection. */
1041 if ((val & mask) != 0) {
1042 return false;
1043 }
1044 return true;
1045
1046fail:
1047 raise_exception_err(EXCP0D_GPF, 0);
1048 return true;
1049}
1050
1051/* V86 mode software interrupt with CR4.VME=1 */
1052static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1053{
1054 target_ulong ptr, ssp;
1055 int selector;
1056 uint32_t offset, esp;
1057 uint32_t old_cs, old_eflags;
1058 uint32_t iopl;
1059
1060 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1061
1062 if (!is_vme_irq_redirected(intno))
1063 {
1064 if (iopl == 3)
1065 {
1066 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1067 return;
1068 }
1069 else
1070 raise_exception_err(EXCP0D_GPF, 0);
1071 }
1072
1073 /* virtual mode idt is at linear address 0 */
1074 ptr = 0 + intno * 4;
1075 offset = lduw_kernel(ptr);
1076 selector = lduw_kernel(ptr + 2);
1077 esp = ESP;
1078 ssp = env->segs[R_SS].base;
1079 old_cs = env->segs[R_CS].selector;
1080
1081 old_eflags = compute_eflags();
1082 if (iopl < 3)
1083 {
1084 /* copy VIF into IF and set IOPL to 3 */
1085 if (env->eflags & VIF_MASK)
1086 old_eflags |= IF_MASK;
1087 else
1088 old_eflags &= ~IF_MASK;
1089
1090 old_eflags |= (3 << IOPL_SHIFT);
1091 }
1092
1093 /* XXX: use SS segment size ? */
1094 PUSHW(ssp, esp, 0xffff, old_eflags);
1095 PUSHW(ssp, esp, 0xffff, old_cs);
1096 PUSHW(ssp, esp, 0xffff, next_eip);
1097
1098 /* update processor state */
1099 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1100 env->eip = offset;
1101 env->segs[R_CS].selector = selector;
1102 env->segs[R_CS].base = (selector << 4);
1103 env->eflags &= ~(TF_MASK | RF_MASK);
1104
1105 if (iopl < 3)
1106 env->eflags &= ~VIF_MASK;
1107 else
1108 env->eflags &= ~IF_MASK;
1109}
1110#endif /* VBOX */
1111
1112#ifdef TARGET_X86_64
1113
1114#define PUSHQ(sp, val)\
1115{\
1116 sp -= 8;\
1117 stq_kernel(sp, (val));\
1118}
1119
1120#define POPQ(sp, val)\
1121{\
1122 val = ldq_kernel(sp);\
1123 sp += 8;\
1124}
1125
1126#ifndef VBOX
1127static inline target_ulong get_rsp_from_tss(int level)
1128#else /* VBOX */
1129DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1130#endif /* VBOX */
1131{
1132 int index;
1133
1134#if 0
1135 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1136 env->tr.base, env->tr.limit);
1137#endif
1138
1139 if (!(env->tr.flags & DESC_P_MASK))
1140 cpu_abort(env, "invalid tss");
1141 index = 8 * level + 4;
1142 if ((index + 7) > env->tr.limit)
1143 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1144 return ldq_kernel(env->tr.base + index);
1145}
1146
1147/* 64 bit interrupt */
1148static void do_interrupt64(int intno, int is_int, int error_code,
1149 target_ulong next_eip, int is_hw)
1150{
1151 SegmentCache *dt;
1152 target_ulong ptr;
1153 int type, dpl, selector, cpl, ist;
1154 int has_error_code, new_stack;
1155 uint32_t e1, e2, e3, ss;
1156 target_ulong old_eip, esp, offset;
1157
1158#ifdef VBOX
1159 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1160 cpu_loop_exit();
1161#endif
1162
1163 has_error_code = 0;
1164 if (!is_int && !is_hw) {
1165 switch(intno) {
1166 case 8:
1167 case 10:
1168 case 11:
1169 case 12:
1170 case 13:
1171 case 14:
1172 case 17:
1173 has_error_code = 1;
1174 break;
1175 }
1176 }
1177 if (is_int)
1178 old_eip = next_eip;
1179 else
1180 old_eip = env->eip;
1181
1182 dt = &env->idt;
1183 if (intno * 16 + 15 > dt->limit)
1184 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1185 ptr = dt->base + intno * 16;
1186 e1 = ldl_kernel(ptr);
1187 e2 = ldl_kernel(ptr + 4);
1188 e3 = ldl_kernel(ptr + 8);
1189 /* check gate type */
1190 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1191 switch(type) {
1192 case 14: /* 386 interrupt gate */
1193 case 15: /* 386 trap gate */
1194 break;
1195 default:
1196 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1197 break;
1198 }
1199 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1200 cpl = env->hflags & HF_CPL_MASK;
1201 /* check privilege if software int */
1202 if (is_int && dpl < cpl)
1203 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1204 /* check valid bit */
1205 if (!(e2 & DESC_P_MASK))
1206 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1207 selector = e1 >> 16;
1208 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1209 ist = e2 & 7;
1210 if ((selector & 0xfffc) == 0)
1211 raise_exception_err(EXCP0D_GPF, 0);
1212
1213 if (load_segment(&e1, &e2, selector) != 0)
1214 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1215 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1216 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1217 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1218 if (dpl > cpl)
1219 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1220 if (!(e2 & DESC_P_MASK))
1221 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1222 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1223 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1224 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1225 /* to inner privilege */
1226 if (ist != 0)
1227 esp = get_rsp_from_tss(ist + 3);
1228 else
1229 esp = get_rsp_from_tss(dpl);
1230 esp &= ~0xfLL; /* align stack */
1231 ss = 0;
1232 new_stack = 1;
1233 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1234 /* to same privilege */
1235 if (env->eflags & VM_MASK)
1236 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1237 new_stack = 0;
1238 if (ist != 0)
1239 esp = get_rsp_from_tss(ist + 3);
1240 else
1241 esp = ESP;
1242 esp &= ~0xfLL; /* align stack */
1243 dpl = cpl;
1244 } else {
1245 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1246 new_stack = 0; /* avoid warning */
1247 esp = 0; /* avoid warning */
1248 }
1249
1250 PUSHQ(esp, env->segs[R_SS].selector);
1251 PUSHQ(esp, ESP);
1252 PUSHQ(esp, compute_eflags());
1253 PUSHQ(esp, env->segs[R_CS].selector);
1254 PUSHQ(esp, old_eip);
1255 if (has_error_code) {
1256 PUSHQ(esp, error_code);
1257 }
1258
1259 if (new_stack) {
1260 ss = 0 | dpl;
1261 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1262 }
1263 ESP = esp;
1264
1265 selector = (selector & ~3) | dpl;
1266 cpu_x86_load_seg_cache(env, R_CS, selector,
1267 get_seg_base(e1, e2),
1268 get_seg_limit(e1, e2),
1269 e2);
1270 cpu_x86_set_cpl(env, dpl);
1271 env->eip = offset;
1272
1273 /* interrupt gate clear IF mask */
1274 if ((type & 1) == 0) {
1275 env->eflags &= ~IF_MASK;
1276 }
1277 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1278}
1279#endif
1280
1281#if defined(CONFIG_USER_ONLY)
1282void helper_syscall(int next_eip_addend)
1283{
1284 env->exception_index = EXCP_SYSCALL;
1285 env->exception_next_eip = env->eip + next_eip_addend;
1286 cpu_loop_exit();
1287}
1288#else
1289void helper_syscall(int next_eip_addend)
1290{
1291 int selector;
1292
1293 if (!(env->efer & MSR_EFER_SCE)) {
1294 raise_exception_err(EXCP06_ILLOP, 0);
1295 }
1296 selector = (env->star >> 32) & 0xffff;
1297#ifdef TARGET_X86_64
1298 if (env->hflags & HF_LMA_MASK) {
1299 int code64;
1300
1301 ECX = env->eip + next_eip_addend;
1302 env->regs[11] = compute_eflags();
1303
1304 code64 = env->hflags & HF_CS64_MASK;
1305
1306 cpu_x86_set_cpl(env, 0);
1307 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1308 0, 0xffffffff,
1309 DESC_G_MASK | DESC_P_MASK |
1310 DESC_S_MASK |
1311 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1312 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1313 0, 0xffffffff,
1314 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1315 DESC_S_MASK |
1316 DESC_W_MASK | DESC_A_MASK);
1317 env->eflags &= ~env->fmask;
1318 load_eflags(env->eflags, 0);
1319 if (code64)
1320 env->eip = env->lstar;
1321 else
1322 env->eip = env->cstar;
1323 } else
1324#endif
1325 {
1326 ECX = (uint32_t)(env->eip + next_eip_addend);
1327
1328 cpu_x86_set_cpl(env, 0);
1329 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1330 0, 0xffffffff,
1331 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1332 DESC_S_MASK |
1333 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1334 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1335 0, 0xffffffff,
1336 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1337 DESC_S_MASK |
1338 DESC_W_MASK | DESC_A_MASK);
1339 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1340 env->eip = (uint32_t)env->star;
1341 }
1342}
1343#endif
1344
1345void helper_sysret(int dflag)
1346{
1347 int cpl, selector;
1348
1349 if (!(env->efer & MSR_EFER_SCE)) {
1350 raise_exception_err(EXCP06_ILLOP, 0);
1351 }
1352 cpl = env->hflags & HF_CPL_MASK;
1353 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1354 raise_exception_err(EXCP0D_GPF, 0);
1355 }
1356 selector = (env->star >> 48) & 0xffff;
1357#ifdef TARGET_X86_64
1358 if (env->hflags & HF_LMA_MASK) {
1359 if (dflag == 2) {
1360 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1361 0, 0xffffffff,
1362 DESC_G_MASK | DESC_P_MASK |
1363 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1364 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1365 DESC_L_MASK);
1366 env->eip = ECX;
1367 } else {
1368 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1369 0, 0xffffffff,
1370 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1371 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1372 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1373 env->eip = (uint32_t)ECX;
1374 }
1375 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1376 0, 0xffffffff,
1377 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1378 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1379 DESC_W_MASK | DESC_A_MASK);
1380 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1381 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1382 cpu_x86_set_cpl(env, 3);
1383 } else
1384#endif
1385 {
1386 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1387 0, 0xffffffff,
1388 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1389 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1390 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1391 env->eip = (uint32_t)ECX;
1392 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1393 0, 0xffffffff,
1394 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1395 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1396 DESC_W_MASK | DESC_A_MASK);
1397 env->eflags |= IF_MASK;
1398 cpu_x86_set_cpl(env, 3);
1399 }
1400#ifdef USE_KQEMU
1401 if (kqemu_is_ok(env)) {
1402 if (env->hflags & HF_LMA_MASK)
1403 CC_OP = CC_OP_EFLAGS;
1404 env->exception_index = -1;
1405 cpu_loop_exit();
1406 }
1407#endif
1408}
1409
1410#ifdef VBOX
1411/**
1412 * Checks and processes external VMM events.
1413 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1414 */
1415void helper_external_event(void)
1416{
1417#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1418 uintptr_t uESP;
1419 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1420 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1421#endif
1422 /* Keep in sync with flags checked by gen_check_external_event() */
1423 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1424 {
1425 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1426 ~CPU_INTERRUPT_EXTERNAL_HARD);
1427 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1428 }
1429 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1430 {
1431 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1432 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1433 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1434 }
1435 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1436 {
1437 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1438 ~CPU_INTERRUPT_EXTERNAL_DMA);
1439 remR3DmaRun(env);
1440 }
1441 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1442 {
1443 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1444 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1445 remR3TimersRun(env);
1446 }
1447}
1448/* helper for recording call instruction addresses for later scanning */
1449void helper_record_call()
1450{
1451 if ( !(env->state & CPU_RAW_RING0)
1452 && (env->cr[0] & CR0_PG_MASK)
1453 && !(env->eflags & X86_EFL_IF))
1454 remR3RecordCall(env);
1455}
1456#endif /* VBOX */
1457
1458/* real mode interrupt */
1459static void do_interrupt_real(int intno, int is_int, int error_code,
1460 unsigned int next_eip)
1461{
1462 SegmentCache *dt;
1463 target_ulong ptr, ssp;
1464 int selector;
1465 uint32_t offset, esp;
1466 uint32_t old_cs, old_eip;
1467
1468 /* real mode (simpler !) */
1469 dt = &env->idt;
1470#ifndef VBOX
1471 if (intno * 4 + 3 > dt->limit)
1472#else
1473 if ((unsigned)intno * 4 + 3 > dt->limit)
1474#endif
1475 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1476 ptr = dt->base + intno * 4;
1477 offset = lduw_kernel(ptr);
1478 selector = lduw_kernel(ptr + 2);
1479 esp = ESP;
1480 ssp = env->segs[R_SS].base;
1481 if (is_int)
1482 old_eip = next_eip;
1483 else
1484 old_eip = env->eip;
1485 old_cs = env->segs[R_CS].selector;
1486 /* XXX: use SS segment size ? */
1487 PUSHW(ssp, esp, 0xffff, compute_eflags());
1488 PUSHW(ssp, esp, 0xffff, old_cs);
1489 PUSHW(ssp, esp, 0xffff, old_eip);
1490
1491 /* update processor state */
1492 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1493 env->eip = offset;
1494 env->segs[R_CS].selector = selector;
1495 env->segs[R_CS].base = (selector << 4);
1496 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1497}
1498
1499/* fake user mode interrupt */
1500void do_interrupt_user(int intno, int is_int, int error_code,
1501 target_ulong next_eip)
1502{
1503 SegmentCache *dt;
1504 target_ulong ptr;
1505 int dpl, cpl, shift;
1506 uint32_t e2;
1507
1508 dt = &env->idt;
1509 if (env->hflags & HF_LMA_MASK) {
1510 shift = 4;
1511 } else {
1512 shift = 3;
1513 }
1514 ptr = dt->base + (intno << shift);
1515 e2 = ldl_kernel(ptr + 4);
1516
1517 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1518 cpl = env->hflags & HF_CPL_MASK;
1519 /* check privilege if software int */
1520 if (is_int && dpl < cpl)
1521 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1522
1523 /* Since we emulate only user space, we cannot do more than
1524 exiting the emulation with the suitable exception and error
1525 code */
1526 if (is_int)
1527 EIP = next_eip;
1528}
1529
1530/*
1531 * Begin execution of an interruption. is_int is TRUE if coming from
1532 * the int instruction. next_eip is the EIP value AFTER the interrupt
1533 * instruction. It is only relevant if is_int is TRUE.
1534 */
1535void do_interrupt(int intno, int is_int, int error_code,
1536 target_ulong next_eip, int is_hw)
1537{
1538 if (loglevel & CPU_LOG_INT) {
1539 if ((env->cr[0] & CR0_PE_MASK)) {
1540 static int count;
1541 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1542 count, intno, error_code, is_int,
1543 env->hflags & HF_CPL_MASK,
1544 env->segs[R_CS].selector, EIP,
1545 (int)env->segs[R_CS].base + EIP,
1546 env->segs[R_SS].selector, ESP);
1547 if (intno == 0x0e) {
1548 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1549 } else {
1550 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1551 }
1552 fprintf(logfile, "\n");
1553 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1554#if 0
1555 {
1556 int i;
1557 uint8_t *ptr;
1558 fprintf(logfile, " code=");
1559 ptr = env->segs[R_CS].base + env->eip;
1560 for(i = 0; i < 16; i++) {
1561 fprintf(logfile, " %02x", ldub(ptr + i));
1562 }
1563 fprintf(logfile, "\n");
1564 }
1565#endif
1566 count++;
1567 }
1568 }
1569 if (env->cr[0] & CR0_PE_MASK) {
1570#ifdef TARGET_X86_64
1571 if (env->hflags & HF_LMA_MASK) {
1572 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1573 } else
1574#endif
1575 {
1576#ifdef VBOX
1577 /* int xx *, v86 code and VME enabled? */
1578 if ( (env->eflags & VM_MASK)
1579 && (env->cr[4] & CR4_VME_MASK)
1580 && is_int
1581 && !is_hw
1582 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1583 )
1584 do_soft_interrupt_vme(intno, error_code, next_eip);
1585 else
1586#endif /* VBOX */
1587 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1588 }
1589 } else {
1590 do_interrupt_real(intno, is_int, error_code, next_eip);
1591 }
1592}
1593
1594/*
1595 * Check nested exceptions and change to double or triple fault if
1596 * needed. It should only be called, if this is not an interrupt.
1597 * Returns the new exception number.
1598 */
1599static int check_exception(int intno, int *error_code)
1600{
1601 int first_contributory = env->old_exception == 0 ||
1602 (env->old_exception >= 10 &&
1603 env->old_exception <= 13);
1604 int second_contributory = intno == 0 ||
1605 (intno >= 10 && intno <= 13);
1606
1607 if (loglevel & CPU_LOG_INT)
1608 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1609 env->old_exception, intno);
1610
1611 if (env->old_exception == EXCP08_DBLE)
1612 cpu_abort(env, "triple fault");
1613
1614 if ((first_contributory && second_contributory)
1615 || (env->old_exception == EXCP0E_PAGE &&
1616 (second_contributory || (intno == EXCP0E_PAGE)))) {
1617 intno = EXCP08_DBLE;
1618 *error_code = 0;
1619 }
1620
1621 if (second_contributory || (intno == EXCP0E_PAGE) ||
1622 (intno == EXCP08_DBLE))
1623 env->old_exception = intno;
1624
1625 return intno;
1626}
1627
1628/*
1629 * Signal an interruption. It is executed in the main CPU loop.
1630 * is_int is TRUE if coming from the int instruction. next_eip is the
1631 * EIP value AFTER the interrupt instruction. It is only relevant if
1632 * is_int is TRUE.
1633 */
1634void raise_interrupt(int intno, int is_int, int error_code,
1635 int next_eip_addend)
1636{
1637#if defined(VBOX) && defined(DEBUG)
1638 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1639#endif
1640 if (!is_int) {
1641 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1642 intno = check_exception(intno, &error_code);
1643 } else {
1644 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1645 }
1646
1647 env->exception_index = intno;
1648 env->error_code = error_code;
1649 env->exception_is_int = is_int;
1650 env->exception_next_eip = env->eip + next_eip_addend;
1651 cpu_loop_exit();
1652}
1653
1654/* shortcuts to generate exceptions */
1655
1656void (raise_exception_err)(int exception_index, int error_code)
1657{
1658 raise_interrupt(exception_index, 0, error_code, 0);
1659}
1660
1661void raise_exception(int exception_index)
1662{
1663 raise_interrupt(exception_index, 0, 0, 0);
1664}
1665
1666/* SMM support */
1667
1668#if defined(CONFIG_USER_ONLY)
1669
1670void do_smm_enter(void)
1671{
1672}
1673
1674void helper_rsm(void)
1675{
1676}
1677
1678#else
1679
1680#ifdef TARGET_X86_64
1681#define SMM_REVISION_ID 0x00020064
1682#else
1683#define SMM_REVISION_ID 0x00020000
1684#endif
1685
1686void do_smm_enter(void)
1687{
1688 target_ulong sm_state;
1689 SegmentCache *dt;
1690 int i, offset;
1691
1692 if (loglevel & CPU_LOG_INT) {
1693 fprintf(logfile, "SMM: enter\n");
1694 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1695 }
1696
1697 env->hflags |= HF_SMM_MASK;
1698 cpu_smm_update(env);
1699
1700 sm_state = env->smbase + 0x8000;
1701
1702#ifdef TARGET_X86_64
1703 for(i = 0; i < 6; i++) {
1704 dt = &env->segs[i];
1705 offset = 0x7e00 + i * 16;
1706 stw_phys(sm_state + offset, dt->selector);
1707 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1708 stl_phys(sm_state + offset + 4, dt->limit);
1709 stq_phys(sm_state + offset + 8, dt->base);
1710 }
1711
1712 stq_phys(sm_state + 0x7e68, env->gdt.base);
1713 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1714
1715 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1716 stq_phys(sm_state + 0x7e78, env->ldt.base);
1717 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1718 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1719
1720 stq_phys(sm_state + 0x7e88, env->idt.base);
1721 stl_phys(sm_state + 0x7e84, env->idt.limit);
1722
1723 stw_phys(sm_state + 0x7e90, env->tr.selector);
1724 stq_phys(sm_state + 0x7e98, env->tr.base);
1725 stl_phys(sm_state + 0x7e94, env->tr.limit);
1726 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1727
1728 stq_phys(sm_state + 0x7ed0, env->efer);
1729
1730 stq_phys(sm_state + 0x7ff8, EAX);
1731 stq_phys(sm_state + 0x7ff0, ECX);
1732 stq_phys(sm_state + 0x7fe8, EDX);
1733 stq_phys(sm_state + 0x7fe0, EBX);
1734 stq_phys(sm_state + 0x7fd8, ESP);
1735 stq_phys(sm_state + 0x7fd0, EBP);
1736 stq_phys(sm_state + 0x7fc8, ESI);
1737 stq_phys(sm_state + 0x7fc0, EDI);
1738 for(i = 8; i < 16; i++)
1739 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1740 stq_phys(sm_state + 0x7f78, env->eip);
1741 stl_phys(sm_state + 0x7f70, compute_eflags());
1742 stl_phys(sm_state + 0x7f68, env->dr[6]);
1743 stl_phys(sm_state + 0x7f60, env->dr[7]);
1744
1745 stl_phys(sm_state + 0x7f48, env->cr[4]);
1746 stl_phys(sm_state + 0x7f50, env->cr[3]);
1747 stl_phys(sm_state + 0x7f58, env->cr[0]);
1748
1749 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1750 stl_phys(sm_state + 0x7f00, env->smbase);
1751#else
1752 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1753 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1754 stl_phys(sm_state + 0x7ff4, compute_eflags());
1755 stl_phys(sm_state + 0x7ff0, env->eip);
1756 stl_phys(sm_state + 0x7fec, EDI);
1757 stl_phys(sm_state + 0x7fe8, ESI);
1758 stl_phys(sm_state + 0x7fe4, EBP);
1759 stl_phys(sm_state + 0x7fe0, ESP);
1760 stl_phys(sm_state + 0x7fdc, EBX);
1761 stl_phys(sm_state + 0x7fd8, EDX);
1762 stl_phys(sm_state + 0x7fd4, ECX);
1763 stl_phys(sm_state + 0x7fd0, EAX);
1764 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1765 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1766
1767 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1768 stl_phys(sm_state + 0x7f64, env->tr.base);
1769 stl_phys(sm_state + 0x7f60, env->tr.limit);
1770 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1771
1772 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1773 stl_phys(sm_state + 0x7f80, env->ldt.base);
1774 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1775 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1776
1777 stl_phys(sm_state + 0x7f74, env->gdt.base);
1778 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1779
1780 stl_phys(sm_state + 0x7f58, env->idt.base);
1781 stl_phys(sm_state + 0x7f54, env->idt.limit);
1782
1783 for(i = 0; i < 6; i++) {
1784 dt = &env->segs[i];
1785 if (i < 3)
1786 offset = 0x7f84 + i * 12;
1787 else
1788 offset = 0x7f2c + (i - 3) * 12;
1789 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1790 stl_phys(sm_state + offset + 8, dt->base);
1791 stl_phys(sm_state + offset + 4, dt->limit);
1792 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1793 }
1794 stl_phys(sm_state + 0x7f14, env->cr[4]);
1795
1796 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1797 stl_phys(sm_state + 0x7ef8, env->smbase);
1798#endif
1799 /* init SMM cpu state */
1800
1801#ifdef TARGET_X86_64
1802 cpu_load_efer(env, 0);
1803#endif
1804 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1805 env->eip = 0x00008000;
1806 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1807 0xffffffff, 0);
1808 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1809 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1810 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1811 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1812 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1813
1814 cpu_x86_update_cr0(env,
1815 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1816 cpu_x86_update_cr4(env, 0);
1817 env->dr[7] = 0x00000400;
1818 CC_OP = CC_OP_EFLAGS;
1819}
1820
1821void helper_rsm(void)
1822{
1823#ifdef VBOX
1824 cpu_abort(env, "helper_rsm");
1825#else /* !VBOX */
1826 target_ulong sm_
1827
1828 target_ulong sm_state;
1829 int i, offset;
1830 uint32_t val;
1831
1832 sm_state = env->smbase + 0x8000;
1833#ifdef TARGET_X86_64
1834 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1835
1836 for(i = 0; i < 6; i++) {
1837 offset = 0x7e00 + i * 16;
1838 cpu_x86_load_seg_cache(env, i,
1839 lduw_phys(sm_state + offset),
1840 ldq_phys(sm_state + offset + 8),
1841 ldl_phys(sm_state + offset + 4),
1842 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1843 }
1844
1845 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1846 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1847
1848 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1849 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1850 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1851 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1852
1853 env->idt.base = ldq_phys(sm_state + 0x7e88);
1854 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1855
1856 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1857 env->tr.base = ldq_phys(sm_state + 0x7e98);
1858 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1859 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1860
1861 EAX = ldq_phys(sm_state + 0x7ff8);
1862 ECX = ldq_phys(sm_state + 0x7ff0);
1863 EDX = ldq_phys(sm_state + 0x7fe8);
1864 EBX = ldq_phys(sm_state + 0x7fe0);
1865 ESP = ldq_phys(sm_state + 0x7fd8);
1866 EBP = ldq_phys(sm_state + 0x7fd0);
1867 ESI = ldq_phys(sm_state + 0x7fc8);
1868 EDI = ldq_phys(sm_state + 0x7fc0);
1869 for(i = 8; i < 16; i++)
1870 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1871 env->eip = ldq_phys(sm_state + 0x7f78);
1872 load_eflags(ldl_phys(sm_state + 0x7f70),
1873 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1874 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1875 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1876
1877 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1878 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1879 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1880
1881 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1882 if (val & 0x20000) {
1883 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1884 }
1885#else
1886 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1887 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1888 load_eflags(ldl_phys(sm_state + 0x7ff4),
1889 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1890 env->eip = ldl_phys(sm_state + 0x7ff0);
1891 EDI = ldl_phys(sm_state + 0x7fec);
1892 ESI = ldl_phys(sm_state + 0x7fe8);
1893 EBP = ldl_phys(sm_state + 0x7fe4);
1894 ESP = ldl_phys(sm_state + 0x7fe0);
1895 EBX = ldl_phys(sm_state + 0x7fdc);
1896 EDX = ldl_phys(sm_state + 0x7fd8);
1897 ECX = ldl_phys(sm_state + 0x7fd4);
1898 EAX = ldl_phys(sm_state + 0x7fd0);
1899 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1900 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1901
1902 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1903 env->tr.base = ldl_phys(sm_state + 0x7f64);
1904 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1905 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1906
1907 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1908 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1909 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1910 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1911
1912 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1913 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1914
1915 env->idt.base = ldl_phys(sm_state + 0x7f58);
1916 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1917
1918 for(i = 0; i < 6; i++) {
1919 if (i < 3)
1920 offset = 0x7f84 + i * 12;
1921 else
1922 offset = 0x7f2c + (i - 3) * 12;
1923 cpu_x86_load_seg_cache(env, i,
1924 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1925 ldl_phys(sm_state + offset + 8),
1926 ldl_phys(sm_state + offset + 4),
1927 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1928 }
1929 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1930
1931 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1932 if (val & 0x20000) {
1933 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1934 }
1935#endif
1936 CC_OP = CC_OP_EFLAGS;
1937 env->hflags &= ~HF_SMM_MASK;
1938 cpu_smm_update(env);
1939
1940 if (loglevel & CPU_LOG_INT) {
1941 fprintf(logfile, "SMM: after RSM\n");
1942 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1943 }
1944#endif /* !VBOX */
1945}
1946
1947#endif /* !CONFIG_USER_ONLY */
1948
1949
1950/* division, flags are undefined */
1951
1952void helper_divb_AL(target_ulong t0)
1953{
1954 unsigned int num, den, q, r;
1955
1956 num = (EAX & 0xffff);
1957 den = (t0 & 0xff);
1958 if (den == 0) {
1959 raise_exception(EXCP00_DIVZ);
1960 }
1961 q = (num / den);
1962 if (q > 0xff)
1963 raise_exception(EXCP00_DIVZ);
1964 q &= 0xff;
1965 r = (num % den) & 0xff;
1966 EAX = (EAX & ~0xffff) | (r << 8) | q;
1967}
1968
1969void helper_idivb_AL(target_ulong t0)
1970{
1971 int num, den, q, r;
1972
1973 num = (int16_t)EAX;
1974 den = (int8_t)t0;
1975 if (den == 0) {
1976 raise_exception(EXCP00_DIVZ);
1977 }
1978 q = (num / den);
1979 if (q != (int8_t)q)
1980 raise_exception(EXCP00_DIVZ);
1981 q &= 0xff;
1982 r = (num % den) & 0xff;
1983 EAX = (EAX & ~0xffff) | (r << 8) | q;
1984}
1985
1986void helper_divw_AX(target_ulong t0)
1987{
1988 unsigned int num, den, q, r;
1989
1990 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1991 den = (t0 & 0xffff);
1992 if (den == 0) {
1993 raise_exception(EXCP00_DIVZ);
1994 }
1995 q = (num / den);
1996 if (q > 0xffff)
1997 raise_exception(EXCP00_DIVZ);
1998 q &= 0xffff;
1999 r = (num % den) & 0xffff;
2000 EAX = (EAX & ~0xffff) | q;
2001 EDX = (EDX & ~0xffff) | r;
2002}
2003
2004void helper_idivw_AX(target_ulong t0)
2005{
2006 int num, den, q, r;
2007
2008 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2009 den = (int16_t)t0;
2010 if (den == 0) {
2011 raise_exception(EXCP00_DIVZ);
2012 }
2013 q = (num / den);
2014 if (q != (int16_t)q)
2015 raise_exception(EXCP00_DIVZ);
2016 q &= 0xffff;
2017 r = (num % den) & 0xffff;
2018 EAX = (EAX & ~0xffff) | q;
2019 EDX = (EDX & ~0xffff) | r;
2020}
2021
2022void helper_divl_EAX(target_ulong t0)
2023{
2024 unsigned int den, r;
2025 uint64_t num, q;
2026
2027 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2028 den = t0;
2029 if (den == 0) {
2030 raise_exception(EXCP00_DIVZ);
2031 }
2032 q = (num / den);
2033 r = (num % den);
2034 if (q > 0xffffffff)
2035 raise_exception(EXCP00_DIVZ);
2036 EAX = (uint32_t)q;
2037 EDX = (uint32_t)r;
2038}
2039
2040void helper_idivl_EAX(target_ulong t0)
2041{
2042 int den, r;
2043 int64_t num, q;
2044
2045 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2046 den = t0;
2047 if (den == 0) {
2048 raise_exception(EXCP00_DIVZ);
2049 }
2050 q = (num / den);
2051 r = (num % den);
2052 if (q != (int32_t)q)
2053 raise_exception(EXCP00_DIVZ);
2054 EAX = (uint32_t)q;
2055 EDX = (uint32_t)r;
2056}
2057
2058/* bcd */
2059
2060/* XXX: exception */
2061void helper_aam(int base)
2062{
2063 int al, ah;
2064 al = EAX & 0xff;
2065 ah = al / base;
2066 al = al % base;
2067 EAX = (EAX & ~0xffff) | al | (ah << 8);
2068 CC_DST = al;
2069}
2070
2071void helper_aad(int base)
2072{
2073 int al, ah;
2074 al = EAX & 0xff;
2075 ah = (EAX >> 8) & 0xff;
2076 al = ((ah * base) + al) & 0xff;
2077 EAX = (EAX & ~0xffff) | al;
2078 CC_DST = al;
2079}
2080
2081void helper_aaa(void)
2082{
2083 int icarry;
2084 int al, ah, af;
2085 int eflags;
2086
2087 eflags = cc_table[CC_OP].compute_all();
2088 af = eflags & CC_A;
2089 al = EAX & 0xff;
2090 ah = (EAX >> 8) & 0xff;
2091
2092 icarry = (al > 0xf9);
2093 if (((al & 0x0f) > 9 ) || af) {
2094 al = (al + 6) & 0x0f;
2095 ah = (ah + 1 + icarry) & 0xff;
2096 eflags |= CC_C | CC_A;
2097 } else {
2098 eflags &= ~(CC_C | CC_A);
2099 al &= 0x0f;
2100 }
2101 EAX = (EAX & ~0xffff) | al | (ah << 8);
2102 CC_SRC = eflags;
2103 FORCE_RET();
2104}
2105
2106void helper_aas(void)
2107{
2108 int icarry;
2109 int al, ah, af;
2110 int eflags;
2111
2112 eflags = cc_table[CC_OP].compute_all();
2113 af = eflags & CC_A;
2114 al = EAX & 0xff;
2115 ah = (EAX >> 8) & 0xff;
2116
2117 icarry = (al < 6);
2118 if (((al & 0x0f) > 9 ) || af) {
2119 al = (al - 6) & 0x0f;
2120 ah = (ah - 1 - icarry) & 0xff;
2121 eflags |= CC_C | CC_A;
2122 } else {
2123 eflags &= ~(CC_C | CC_A);
2124 al &= 0x0f;
2125 }
2126 EAX = (EAX & ~0xffff) | al | (ah << 8);
2127 CC_SRC = eflags;
2128 FORCE_RET();
2129}
2130
2131void helper_daa(void)
2132{
2133 int al, af, cf;
2134 int eflags;
2135
2136 eflags = cc_table[CC_OP].compute_all();
2137 cf = eflags & CC_C;
2138 af = eflags & CC_A;
2139 al = EAX & 0xff;
2140
2141 eflags = 0;
2142 if (((al & 0x0f) > 9 ) || af) {
2143 al = (al + 6) & 0xff;
2144 eflags |= CC_A;
2145 }
2146 if ((al > 0x9f) || cf) {
2147 al = (al + 0x60) & 0xff;
2148 eflags |= CC_C;
2149 }
2150 EAX = (EAX & ~0xff) | al;
2151 /* well, speed is not an issue here, so we compute the flags by hand */
2152 eflags |= (al == 0) << 6; /* zf */
2153 eflags |= parity_table[al]; /* pf */
2154 eflags |= (al & 0x80); /* sf */
2155 CC_SRC = eflags;
2156 FORCE_RET();
2157}
2158
2159void helper_das(void)
2160{
2161 int al, al1, af, cf;
2162 int eflags;
2163
2164 eflags = cc_table[CC_OP].compute_all();
2165 cf = eflags & CC_C;
2166 af = eflags & CC_A;
2167 al = EAX & 0xff;
2168
2169 eflags = 0;
2170 al1 = al;
2171 if (((al & 0x0f) > 9 ) || af) {
2172 eflags |= CC_A;
2173 if (al < 6 || cf)
2174 eflags |= CC_C;
2175 al = (al - 6) & 0xff;
2176 }
2177 if ((al1 > 0x99) || cf) {
2178 al = (al - 0x60) & 0xff;
2179 eflags |= CC_C;
2180 }
2181 EAX = (EAX & ~0xff) | al;
2182 /* well, speed is not an issue here, so we compute the flags by hand */
2183 eflags |= (al == 0) << 6; /* zf */
2184 eflags |= parity_table[al]; /* pf */
2185 eflags |= (al & 0x80); /* sf */
2186 CC_SRC = eflags;
2187 FORCE_RET();
2188}
2189
2190void helper_into(int next_eip_addend)
2191{
2192 int eflags;
2193 eflags = cc_table[CC_OP].compute_all();
2194 if (eflags & CC_O) {
2195 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2196 }
2197}
2198
2199void helper_cmpxchg8b(target_ulong a0)
2200{
2201 uint64_t d;
2202 int eflags;
2203
2204 eflags = cc_table[CC_OP].compute_all();
2205 d = ldq(a0);
2206 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2207 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2208 eflags |= CC_Z;
2209 } else {
2210 /* always do the store */
2211 stq(a0, d);
2212 EDX = (uint32_t)(d >> 32);
2213 EAX = (uint32_t)d;
2214 eflags &= ~CC_Z;
2215 }
2216 CC_SRC = eflags;
2217}
2218
2219#ifdef TARGET_X86_64
2220void helper_cmpxchg16b(target_ulong a0)
2221{
2222 uint64_t d0, d1;
2223 int eflags;
2224
2225 if ((a0 & 0xf) != 0)
2226 raise_exception(EXCP0D_GPF);
2227 eflags = cc_table[CC_OP].compute_all();
2228 d0 = ldq(a0);
2229 d1 = ldq(a0 + 8);
2230 if (d0 == EAX && d1 == EDX) {
2231 stq(a0, EBX);
2232 stq(a0 + 8, ECX);
2233 eflags |= CC_Z;
2234 } else {
2235 /* always do the store */
2236 stq(a0, d0);
2237 stq(a0 + 8, d1);
2238 EDX = d1;
2239 EAX = d0;
2240 eflags &= ~CC_Z;
2241 }
2242 CC_SRC = eflags;
2243}
2244#endif
2245
2246void helper_single_step(void)
2247{
2248 env->dr[6] |= 0x4000;
2249 raise_exception(EXCP01_SSTP);
2250}
2251
2252void helper_cpuid(void)
2253{
2254#ifndef VBOX
2255 uint32_t index;
2256
2257 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2258
2259 index = (uint32_t)EAX;
2260 /* test if maximum index reached */
2261 if (index & 0x80000000) {
2262 if (index > env->cpuid_xlevel)
2263 index = env->cpuid_level;
2264 } else {
2265 if (index > env->cpuid_level)
2266 index = env->cpuid_level;
2267 }
2268
2269 switch(index) {
2270 case 0:
2271 EAX = env->cpuid_level;
2272 EBX = env->cpuid_vendor1;
2273 EDX = env->cpuid_vendor2;
2274 ECX = env->cpuid_vendor3;
2275 break;
2276 case 1:
2277 EAX = env->cpuid_version;
2278 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2279 ECX = env->cpuid_ext_features;
2280 EDX = env->cpuid_features;
2281 break;
2282 case 2:
2283 /* cache info: needed for Pentium Pro compatibility */
2284 EAX = 1;
2285 EBX = 0;
2286 ECX = 0;
2287 EDX = 0x2c307d;
2288 break;
2289 case 4:
2290 /* cache info: needed for Core compatibility */
2291 switch (ECX) {
2292 case 0: /* L1 dcache info */
2293 EAX = 0x0000121;
2294 EBX = 0x1c0003f;
2295 ECX = 0x000003f;
2296 EDX = 0x0000001;
2297 break;
2298 case 1: /* L1 icache info */
2299 EAX = 0x0000122;
2300 EBX = 0x1c0003f;
2301 ECX = 0x000003f;
2302 EDX = 0x0000001;
2303 break;
2304 case 2: /* L2 cache info */
2305 EAX = 0x0000143;
2306 EBX = 0x3c0003f;
2307 ECX = 0x0000fff;
2308 EDX = 0x0000001;
2309 break;
2310 default: /* end of info */
2311 EAX = 0;
2312 EBX = 0;
2313 ECX = 0;
2314 EDX = 0;
2315 break;
2316 }
2317
2318 break;
2319 case 5:
2320 /* mwait info: needed for Core compatibility */
2321 EAX = 0; /* Smallest monitor-line size in bytes */
2322 EBX = 0; /* Largest monitor-line size in bytes */
2323 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2324 EDX = 0;
2325 break;
2326 case 6:
2327 /* Thermal and Power Leaf */
2328 EAX = 0;
2329 EBX = 0;
2330 ECX = 0;
2331 EDX = 0;
2332 break;
2333 case 9:
2334 /* Direct Cache Access Information Leaf */
2335 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2336 EBX = 0;
2337 ECX = 0;
2338 EDX = 0;
2339 break;
2340 case 0xA:
2341 /* Architectural Performance Monitoring Leaf */
2342 EAX = 0;
2343 EBX = 0;
2344 ECX = 0;
2345 EDX = 0;
2346 break;
2347 case 0x80000000:
2348 EAX = env->cpuid_xlevel;
2349 EBX = env->cpuid_vendor1;
2350 EDX = env->cpuid_vendor2;
2351 ECX = env->cpuid_vendor3;
2352 break;
2353 case 0x80000001:
2354 EAX = env->cpuid_features;
2355 EBX = 0;
2356 ECX = env->cpuid_ext3_features;
2357 EDX = env->cpuid_ext2_features;
2358 break;
2359 case 0x80000002:
2360 case 0x80000003:
2361 case 0x80000004:
2362 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2363 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2364 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2365 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2366 break;
2367 case 0x80000005:
2368 /* cache info (L1 cache) */
2369 EAX = 0x01ff01ff;
2370 EBX = 0x01ff01ff;
2371 ECX = 0x40020140;
2372 EDX = 0x40020140;
2373 break;
2374 case 0x80000006:
2375 /* cache info (L2 cache) */
2376 EAX = 0;
2377 EBX = 0x42004200;
2378 ECX = 0x02008140;
2379 EDX = 0;
2380 break;
2381 case 0x80000008:
2382 /* virtual & phys address size in low 2 bytes. */
2383/* XXX: This value must match the one used in the MMU code. */
2384 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2385 /* 64 bit processor */
2386#if defined(USE_KQEMU)
2387 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2388#else
2389/* XXX: The physical address space is limited to 42 bits in exec.c. */
2390 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2391#endif
2392 } else {
2393#if defined(USE_KQEMU)
2394 EAX = 0x00000020; /* 32 bits physical */
2395#else
2396 if (env->cpuid_features & CPUID_PSE36)
2397 EAX = 0x00000024; /* 36 bits physical */
2398 else
2399 EAX = 0x00000020; /* 32 bits physical */
2400#endif
2401 }
2402 EBX = 0;
2403 ECX = 0;
2404 EDX = 0;
2405 break;
2406 case 0x8000000A:
2407 EAX = 0x00000001;
2408 EBX = 0;
2409 ECX = 0;
2410 EDX = 0;
2411 break;
2412 default:
2413 /* reserved values: zero */
2414 EAX = 0;
2415 EBX = 0;
2416 ECX = 0;
2417 EDX = 0;
2418 break;
2419 }
2420#else /* VBOX */
2421 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2422#endif /* VBOX */
2423}
2424
2425void helper_enter_level(int level, int data32, target_ulong t1)
2426{
2427 target_ulong ssp;
2428 uint32_t esp_mask, esp, ebp;
2429
2430 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2431 ssp = env->segs[R_SS].base;
2432 ebp = EBP;
2433 esp = ESP;
2434 if (data32) {
2435 /* 32 bit */
2436 esp -= 4;
2437 while (--level) {
2438 esp -= 4;
2439 ebp -= 4;
2440 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2441 }
2442 esp -= 4;
2443 stl(ssp + (esp & esp_mask), t1);
2444 } else {
2445 /* 16 bit */
2446 esp -= 2;
2447 while (--level) {
2448 esp -= 2;
2449 ebp -= 2;
2450 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2451 }
2452 esp -= 2;
2453 stw(ssp + (esp & esp_mask), t1);
2454 }
2455}
2456
2457#ifdef TARGET_X86_64
2458void helper_enter64_level(int level, int data64, target_ulong t1)
2459{
2460 target_ulong esp, ebp;
2461 ebp = EBP;
2462 esp = ESP;
2463
2464 if (data64) {
2465 /* 64 bit */
2466 esp -= 8;
2467 while (--level) {
2468 esp -= 8;
2469 ebp -= 8;
2470 stq(esp, ldq(ebp));
2471 }
2472 esp -= 8;
2473 stq(esp, t1);
2474 } else {
2475 /* 16 bit */
2476 esp -= 2;
2477 while (--level) {
2478 esp -= 2;
2479 ebp -= 2;
2480 stw(esp, lduw(ebp));
2481 }
2482 esp -= 2;
2483 stw(esp, t1);
2484 }
2485}
2486#endif
2487
2488void helper_lldt(int selector)
2489{
2490 SegmentCache *dt;
2491 uint32_t e1, e2;
2492#ifndef VBOX
2493 int index, entry_limit;
2494#else
2495 unsigned int index, entry_limit;
2496#endif
2497 target_ulong ptr;
2498
2499#ifdef VBOX
2500 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2501 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2502#endif
2503
2504 selector &= 0xffff;
2505 if ((selector & 0xfffc) == 0) {
2506 /* XXX: NULL selector case: invalid LDT */
2507 env->ldt.base = 0;
2508 env->ldt.limit = 0;
2509 } else {
2510 if (selector & 0x4)
2511 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2512 dt = &env->gdt;
2513 index = selector & ~7;
2514#ifdef TARGET_X86_64
2515 if (env->hflags & HF_LMA_MASK)
2516 entry_limit = 15;
2517 else
2518#endif
2519 entry_limit = 7;
2520 if ((index + entry_limit) > dt->limit)
2521 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2522 ptr = dt->base + index;
2523 e1 = ldl_kernel(ptr);
2524 e2 = ldl_kernel(ptr + 4);
2525 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2526 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2527 if (!(e2 & DESC_P_MASK))
2528 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2529#ifdef TARGET_X86_64
2530 if (env->hflags & HF_LMA_MASK) {
2531 uint32_t e3;
2532 e3 = ldl_kernel(ptr + 8);
2533 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2534 env->ldt.base |= (target_ulong)e3 << 32;
2535 } else
2536#endif
2537 {
2538 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2539 }
2540 }
2541 env->ldt.selector = selector;
2542#ifdef VBOX
2543 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2544 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2545#endif
2546}
2547
2548void helper_ltr(int selector)
2549{
2550 SegmentCache *dt;
2551 uint32_t e1, e2;
2552#ifndef VBOX
2553 int index, type, entry_limit;
2554#else
2555 unsigned int index;
2556 int type, entry_limit;
2557#endif
2558 target_ulong ptr;
2559
2560#ifdef VBOX
2561 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2562 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2563 env->tr.flags, (RTSEL)(selector & 0xffff)));
2564#endif
2565 selector &= 0xffff;
2566 if ((selector & 0xfffc) == 0) {
2567 /* NULL selector case: invalid TR */
2568 env->tr.base = 0;
2569 env->tr.limit = 0;
2570 env->tr.flags = 0;
2571 } else {
2572 if (selector & 0x4)
2573 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2574 dt = &env->gdt;
2575 index = selector & ~7;
2576#ifdef TARGET_X86_64
2577 if (env->hflags & HF_LMA_MASK)
2578 entry_limit = 15;
2579 else
2580#endif
2581 entry_limit = 7;
2582 if ((index + entry_limit) > dt->limit)
2583 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2584 ptr = dt->base + index;
2585 e1 = ldl_kernel(ptr);
2586 e2 = ldl_kernel(ptr + 4);
2587 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2588 if ((e2 & DESC_S_MASK) ||
2589 (type != 1 && type != 9))
2590 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2591 if (!(e2 & DESC_P_MASK))
2592 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2593#ifdef TARGET_X86_64
2594 if (env->hflags & HF_LMA_MASK) {
2595 uint32_t e3, e4;
2596 e3 = ldl_kernel(ptr + 8);
2597 e4 = ldl_kernel(ptr + 12);
2598 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2599 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2600 load_seg_cache_raw_dt(&env->tr, e1, e2);
2601 env->tr.base |= (target_ulong)e3 << 32;
2602 } else
2603#endif
2604 {
2605 load_seg_cache_raw_dt(&env->tr, e1, e2);
2606 }
2607 e2 |= DESC_TSS_BUSY_MASK;
2608 stl_kernel(ptr + 4, e2);
2609 }
2610 env->tr.selector = selector;
2611#ifdef VBOX
2612 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2613 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2614 env->tr.flags, (RTSEL)(selector & 0xffff)));
2615#endif
2616}
2617
2618/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2619void helper_load_seg(int seg_reg, int selector)
2620{
2621 uint32_t e1, e2;
2622 int cpl, dpl, rpl;
2623 SegmentCache *dt;
2624#ifndef VBOX
2625 int index;
2626#else
2627 unsigned int index;
2628#endif
2629 target_ulong ptr;
2630
2631 selector &= 0xffff;
2632 cpl = env->hflags & HF_CPL_MASK;
2633
2634#ifdef VBOX
2635 /* Trying to load a selector with CPL=1? */
2636 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2637 {
2638 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2639 selector = selector & 0xfffc;
2640 }
2641#endif
2642 if ((selector & 0xfffc) == 0) {
2643 /* null selector case */
2644 if (seg_reg == R_SS
2645#ifdef TARGET_X86_64
2646 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2647#endif
2648 )
2649 raise_exception_err(EXCP0D_GPF, 0);
2650 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2651 } else {
2652
2653 if (selector & 0x4)
2654 dt = &env->ldt;
2655 else
2656 dt = &env->gdt;
2657 index = selector & ~7;
2658 if ((index + 7) > dt->limit)
2659 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2660 ptr = dt->base + index;
2661 e1 = ldl_kernel(ptr);
2662 e2 = ldl_kernel(ptr + 4);
2663
2664 if (!(e2 & DESC_S_MASK))
2665 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2666 rpl = selector & 3;
2667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2668 if (seg_reg == R_SS) {
2669 /* must be writable segment */
2670 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2671 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2672 if (rpl != cpl || dpl != cpl)
2673 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2674 } else {
2675 /* must be readable segment */
2676 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2677 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2678
2679 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2680 /* if not conforming code, test rights */
2681 if (dpl < cpl || dpl < rpl)
2682 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2683 }
2684 }
2685
2686 if (!(e2 & DESC_P_MASK)) {
2687 if (seg_reg == R_SS)
2688 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2689 else
2690 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2691 }
2692
2693 /* set the access bit if not already set */
2694 if (!(e2 & DESC_A_MASK)) {
2695 e2 |= DESC_A_MASK;
2696 stl_kernel(ptr + 4, e2);
2697 }
2698
2699 cpu_x86_load_seg_cache(env, seg_reg, selector,
2700 get_seg_base(e1, e2),
2701 get_seg_limit(e1, e2),
2702 e2);
2703#if 0
2704 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2705 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2706#endif
2707 }
2708}
2709
2710/* protected mode jump */
2711void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2712 int next_eip_addend)
2713{
2714 int gate_cs, type;
2715 uint32_t e1, e2, cpl, dpl, rpl, limit;
2716 target_ulong next_eip;
2717
2718#ifdef VBOX
2719 e1 = e2 = 0;
2720#endif
2721 if ((new_cs & 0xfffc) == 0)
2722 raise_exception_err(EXCP0D_GPF, 0);
2723 if (load_segment(&e1, &e2, new_cs) != 0)
2724 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2725 cpl = env->hflags & HF_CPL_MASK;
2726 if (e2 & DESC_S_MASK) {
2727 if (!(e2 & DESC_CS_MASK))
2728 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2729 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2730 if (e2 & DESC_C_MASK) {
2731 /* conforming code segment */
2732 if (dpl > cpl)
2733 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2734 } else {
2735 /* non conforming code segment */
2736 rpl = new_cs & 3;
2737 if (rpl > cpl)
2738 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2739 if (dpl != cpl)
2740 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2741 }
2742 if (!(e2 & DESC_P_MASK))
2743 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2744 limit = get_seg_limit(e1, e2);
2745 if (new_eip > limit &&
2746 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2747 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2748 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2749 get_seg_base(e1, e2), limit, e2);
2750 EIP = new_eip;
2751 } else {
2752 /* jump to call or task gate */
2753 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2754 rpl = new_cs & 3;
2755 cpl = env->hflags & HF_CPL_MASK;
2756 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2757 switch(type) {
2758 case 1: /* 286 TSS */
2759 case 9: /* 386 TSS */
2760 case 5: /* task gate */
2761 if (dpl < cpl || dpl < rpl)
2762 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2763 next_eip = env->eip + next_eip_addend;
2764 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2765 CC_OP = CC_OP_EFLAGS;
2766 break;
2767 case 4: /* 286 call gate */
2768 case 12: /* 386 call gate */
2769 if ((dpl < cpl) || (dpl < rpl))
2770 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2771 if (!(e2 & DESC_P_MASK))
2772 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2773 gate_cs = e1 >> 16;
2774 new_eip = (e1 & 0xffff);
2775 if (type == 12)
2776 new_eip |= (e2 & 0xffff0000);
2777 if (load_segment(&e1, &e2, gate_cs) != 0)
2778 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2779 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2780 /* must be code segment */
2781 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2782 (DESC_S_MASK | DESC_CS_MASK)))
2783 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2784 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2785 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2786 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2787 if (!(e2 & DESC_P_MASK))
2788#ifdef VBOX /* See page 3-514 of 253666.pdf */
2789 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2790#else
2791 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2792#endif
2793 limit = get_seg_limit(e1, e2);
2794 if (new_eip > limit)
2795 raise_exception_err(EXCP0D_GPF, 0);
2796 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2797 get_seg_base(e1, e2), limit, e2);
2798 EIP = new_eip;
2799 break;
2800 default:
2801 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2802 break;
2803 }
2804 }
2805}
2806
2807/* real mode call */
2808void helper_lcall_real(int new_cs, target_ulong new_eip1,
2809 int shift, int next_eip)
2810{
2811 int new_eip;
2812 uint32_t esp, esp_mask;
2813 target_ulong ssp;
2814
2815 new_eip = new_eip1;
2816 esp = ESP;
2817 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2818 ssp = env->segs[R_SS].base;
2819 if (shift) {
2820 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2821 PUSHL(ssp, esp, esp_mask, next_eip);
2822 } else {
2823 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2824 PUSHW(ssp, esp, esp_mask, next_eip);
2825 }
2826
2827 SET_ESP(esp, esp_mask);
2828 env->eip = new_eip;
2829 env->segs[R_CS].selector = new_cs;
2830 env->segs[R_CS].base = (new_cs << 4);
2831}
2832
2833/* protected mode call */
2834void helper_lcall_protected(int new_cs, target_ulong new_eip,
2835 int shift, int next_eip_addend)
2836{
2837 int new_stack, i;
2838 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2839 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2840 uint32_t val, limit, old_sp_mask;
2841 target_ulong ssp, old_ssp, next_eip;
2842
2843#ifdef VBOX
2844 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2845#endif
2846 next_eip = env->eip + next_eip_addend;
2847#ifdef DEBUG_PCALL
2848 if (loglevel & CPU_LOG_PCALL) {
2849 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2850 new_cs, (uint32_t)new_eip, shift);
2851 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2852 }
2853#endif
2854 if ((new_cs & 0xfffc) == 0)
2855 raise_exception_err(EXCP0D_GPF, 0);
2856 if (load_segment(&e1, &e2, new_cs) != 0)
2857 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2858 cpl = env->hflags & HF_CPL_MASK;
2859#ifdef DEBUG_PCALL
2860 if (loglevel & CPU_LOG_PCALL) {
2861 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2862 }
2863#endif
2864 if (e2 & DESC_S_MASK) {
2865 if (!(e2 & DESC_CS_MASK))
2866 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2867 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2868 if (e2 & DESC_C_MASK) {
2869 /* conforming code segment */
2870 if (dpl > cpl)
2871 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2872 } else {
2873 /* non conforming code segment */
2874 rpl = new_cs & 3;
2875 if (rpl > cpl)
2876 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2877 if (dpl != cpl)
2878 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2879 }
2880 if (!(e2 & DESC_P_MASK))
2881 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2882
2883#ifdef TARGET_X86_64
2884 /* XXX: check 16/32 bit cases in long mode */
2885 if (shift == 2) {
2886 target_ulong rsp;
2887 /* 64 bit case */
2888 rsp = ESP;
2889 PUSHQ(rsp, env->segs[R_CS].selector);
2890 PUSHQ(rsp, next_eip);
2891 /* from this point, not restartable */
2892 ESP = rsp;
2893 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2894 get_seg_base(e1, e2),
2895 get_seg_limit(e1, e2), e2);
2896 EIP = new_eip;
2897 } else
2898#endif
2899 {
2900 sp = ESP;
2901 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2902 ssp = env->segs[R_SS].base;
2903 if (shift) {
2904 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2905 PUSHL(ssp, sp, sp_mask, next_eip);
2906 } else {
2907 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2908 PUSHW(ssp, sp, sp_mask, next_eip);
2909 }
2910
2911 limit = get_seg_limit(e1, e2);
2912 if (new_eip > limit)
2913 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2914 /* from this point, not restartable */
2915 SET_ESP(sp, sp_mask);
2916 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2917 get_seg_base(e1, e2), limit, e2);
2918 EIP = new_eip;
2919 }
2920 } else {
2921 /* check gate type */
2922 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2923 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2924 rpl = new_cs & 3;
2925 switch(type) {
2926 case 1: /* available 286 TSS */
2927 case 9: /* available 386 TSS */
2928 case 5: /* task gate */
2929 if (dpl < cpl || dpl < rpl)
2930 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2931 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2932 CC_OP = CC_OP_EFLAGS;
2933 return;
2934 case 4: /* 286 call gate */
2935 case 12: /* 386 call gate */
2936 break;
2937 default:
2938 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2939 break;
2940 }
2941 shift = type >> 3;
2942
2943 if (dpl < cpl || dpl < rpl)
2944 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2945 /* check valid bit */
2946 if (!(e2 & DESC_P_MASK))
2947 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2948 selector = e1 >> 16;
2949 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2950 param_count = e2 & 0x1f;
2951 if ((selector & 0xfffc) == 0)
2952 raise_exception_err(EXCP0D_GPF, 0);
2953
2954 if (load_segment(&e1, &e2, selector) != 0)
2955 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2956 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2957 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2958 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2959 if (dpl > cpl)
2960 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2961 if (!(e2 & DESC_P_MASK))
2962 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2963
2964 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2965 /* to inner privilege */
2966 get_ss_esp_from_tss(&ss, &sp, dpl);
2967#ifdef DEBUG_PCALL
2968 if (loglevel & CPU_LOG_PCALL)
2969 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2970 ss, sp, param_count, ESP);
2971#endif
2972 if ((ss & 0xfffc) == 0)
2973 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2974 if ((ss & 3) != dpl)
2975 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2976 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2977 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2978 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2979 if (ss_dpl != dpl)
2980 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2981 if (!(ss_e2 & DESC_S_MASK) ||
2982 (ss_e2 & DESC_CS_MASK) ||
2983 !(ss_e2 & DESC_W_MASK))
2984 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2985 if (!(ss_e2 & DESC_P_MASK))
2986#ifdef VBOX /* See page 3-99 of 253666.pdf */
2987 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2988#else
2989 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2990#endif
2991
2992 // push_size = ((param_count * 2) + 8) << shift;
2993
2994 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2995 old_ssp = env->segs[R_SS].base;
2996
2997 sp_mask = get_sp_mask(ss_e2);
2998 ssp = get_seg_base(ss_e1, ss_e2);
2999 if (shift) {
3000 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3001 PUSHL(ssp, sp, sp_mask, ESP);
3002 for(i = param_count - 1; i >= 0; i--) {
3003 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3004 PUSHL(ssp, sp, sp_mask, val);
3005 }
3006 } else {
3007 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3008 PUSHW(ssp, sp, sp_mask, ESP);
3009 for(i = param_count - 1; i >= 0; i--) {
3010 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3011 PUSHW(ssp, sp, sp_mask, val);
3012 }
3013 }
3014 new_stack = 1;
3015 } else {
3016 /* to same privilege */
3017 sp = ESP;
3018 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3019 ssp = env->segs[R_SS].base;
3020 // push_size = (4 << shift);
3021 new_stack = 0;
3022 }
3023
3024 if (shift) {
3025 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3026 PUSHL(ssp, sp, sp_mask, next_eip);
3027 } else {
3028 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3029 PUSHW(ssp, sp, sp_mask, next_eip);
3030 }
3031
3032 /* from this point, not restartable */
3033
3034 if (new_stack) {
3035 ss = (ss & ~3) | dpl;
3036 cpu_x86_load_seg_cache(env, R_SS, ss,
3037 ssp,
3038 get_seg_limit(ss_e1, ss_e2),
3039 ss_e2);
3040 }
3041
3042 selector = (selector & ~3) | dpl;
3043 cpu_x86_load_seg_cache(env, R_CS, selector,
3044 get_seg_base(e1, e2),
3045 get_seg_limit(e1, e2),
3046 e2);
3047 cpu_x86_set_cpl(env, dpl);
3048 SET_ESP(sp, sp_mask);
3049 EIP = offset;
3050 }
3051#ifdef USE_KQEMU
3052 if (kqemu_is_ok(env)) {
3053 env->exception_index = -1;
3054 cpu_loop_exit();
3055 }
3056#endif
3057}
3058
3059/* real and vm86 mode iret */
3060void helper_iret_real(int shift)
3061{
3062 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3063 target_ulong ssp;
3064 int eflags_mask;
3065#ifdef VBOX
3066 bool fVME = false;
3067
3068 remR3TrapClear(env->pVM);
3069#endif /* VBOX */
3070
3071 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3072 sp = ESP;
3073 ssp = env->segs[R_SS].base;
3074 if (shift == 1) {
3075 /* 32 bits */
3076 POPL(ssp, sp, sp_mask, new_eip);
3077 POPL(ssp, sp, sp_mask, new_cs);
3078 new_cs &= 0xffff;
3079 POPL(ssp, sp, sp_mask, new_eflags);
3080 } else {
3081 /* 16 bits */
3082 POPW(ssp, sp, sp_mask, new_eip);
3083 POPW(ssp, sp, sp_mask, new_cs);
3084 POPW(ssp, sp, sp_mask, new_eflags);
3085 }
3086#ifdef VBOX
3087 if ( (env->eflags & VM_MASK)
3088 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3089 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3090 {
3091 fVME = true;
3092 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3093 /* if TF will be set -> #GP */
3094 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3095 || (new_eflags & TF_MASK))
3096 raise_exception(EXCP0D_GPF);
3097 }
3098#endif /* VBOX */
3099 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3100 env->segs[R_CS].selector = new_cs;
3101 env->segs[R_CS].base = (new_cs << 4);
3102 env->eip = new_eip;
3103#ifdef VBOX
3104 if (fVME)
3105 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3106 else
3107#endif
3108 if (env->eflags & VM_MASK)
3109 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3110 else
3111 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3112 if (shift == 0)
3113 eflags_mask &= 0xffff;
3114 load_eflags(new_eflags, eflags_mask);
3115 env->hflags2 &= ~HF2_NMI_MASK;
3116#ifdef VBOX
3117 if (fVME)
3118 {
3119 if (new_eflags & IF_MASK)
3120 env->eflags |= VIF_MASK;
3121 else
3122 env->eflags &= ~VIF_MASK;
3123 }
3124#endif /* VBOX */
3125}
3126
3127#ifndef VBOX
3128static inline void validate_seg(int seg_reg, int cpl)
3129#else /* VBOX */
3130DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3131#endif /* VBOX */
3132{
3133 int dpl;
3134 uint32_t e2;
3135
3136 /* XXX: on x86_64, we do not want to nullify FS and GS because
3137 they may still contain a valid base. I would be interested to
3138 know how a real x86_64 CPU behaves */
3139 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3140 (env->segs[seg_reg].selector & 0xfffc) == 0)
3141 return;
3142
3143 e2 = env->segs[seg_reg].flags;
3144 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3145 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3146 /* data or non conforming code segment */
3147 if (dpl < cpl) {
3148 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3149 }
3150 }
3151}
3152
3153/* protected mode iret */
3154#ifndef VBOX
3155static inline void helper_ret_protected(int shift, int is_iret, int addend)
3156#else /* VBOX */
3157DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3158#endif /* VBOX */
3159{
3160 uint32_t new_cs, new_eflags, new_ss;
3161 uint32_t new_es, new_ds, new_fs, new_gs;
3162 uint32_t e1, e2, ss_e1, ss_e2;
3163 int cpl, dpl, rpl, eflags_mask, iopl;
3164 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3165
3166#ifdef VBOX
3167 ss_e1 = ss_e2 = e1 = e2 = 0;
3168#endif
3169
3170#ifdef TARGET_X86_64
3171 if (shift == 2)
3172 sp_mask = -1;
3173 else
3174#endif
3175 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3176 sp = ESP;
3177 ssp = env->segs[R_SS].base;
3178 new_eflags = 0; /* avoid warning */
3179#ifdef TARGET_X86_64
3180 if (shift == 2) {
3181 POPQ(sp, new_eip);
3182 POPQ(sp, new_cs);
3183 new_cs &= 0xffff;
3184 if (is_iret) {
3185 POPQ(sp, new_eflags);
3186 }
3187 } else
3188#endif
3189 if (shift == 1) {
3190 /* 32 bits */
3191 POPL(ssp, sp, sp_mask, new_eip);
3192 POPL(ssp, sp, sp_mask, new_cs);
3193 new_cs &= 0xffff;
3194 if (is_iret) {
3195 POPL(ssp, sp, sp_mask, new_eflags);
3196#if defined(VBOX) && defined(DEBUG)
3197 printf("iret: new CS %04X\n", new_cs);
3198 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3199 printf("iret: new EFLAGS %08X\n", new_eflags);
3200 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3201#endif
3202 if (new_eflags & VM_MASK)
3203 goto return_to_vm86;
3204 }
3205#ifdef VBOX
3206 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3207 {
3208#ifdef DEBUG
3209 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3210#endif
3211 new_cs = new_cs & 0xfffc;
3212 }
3213#endif
3214 } else {
3215 /* 16 bits */
3216 POPW(ssp, sp, sp_mask, new_eip);
3217 POPW(ssp, sp, sp_mask, new_cs);
3218 if (is_iret)
3219 POPW(ssp, sp, sp_mask, new_eflags);
3220 }
3221#ifdef DEBUG_PCALL
3222 if (loglevel & CPU_LOG_PCALL) {
3223 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3224 new_cs, new_eip, shift, addend);
3225 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3226 }
3227#endif
3228 if ((new_cs & 0xfffc) == 0)
3229 {
3230#if defined(VBOX) && defined(DEBUG)
3231 printf("new_cs & 0xfffc) == 0\n");
3232#endif
3233 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3234 }
3235 if (load_segment(&e1, &e2, new_cs) != 0)
3236 {
3237#if defined(VBOX) && defined(DEBUG)
3238 printf("load_segment failed\n");
3239#endif
3240 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3241 }
3242 if (!(e2 & DESC_S_MASK) ||
3243 !(e2 & DESC_CS_MASK))
3244 {
3245#if defined(VBOX) && defined(DEBUG)
3246 printf("e2 mask %08x\n", e2);
3247#endif
3248 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3249 }
3250 cpl = env->hflags & HF_CPL_MASK;
3251 rpl = new_cs & 3;
3252 if (rpl < cpl)
3253 {
3254#if defined(VBOX) && defined(DEBUG)
3255 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3256#endif
3257 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3258 }
3259 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3260 if (e2 & DESC_C_MASK) {
3261 if (dpl > rpl)
3262 {
3263#if defined(VBOX) && defined(DEBUG)
3264 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3265#endif
3266 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3267 }
3268 } else {
3269 if (dpl != rpl)
3270 {
3271#if defined(VBOX) && defined(DEBUG)
3272 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3273#endif
3274 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3275 }
3276 }
3277 if (!(e2 & DESC_P_MASK))
3278 {
3279#if defined(VBOX) && defined(DEBUG)
3280 printf("DESC_P_MASK e2=%08x\n", e2);
3281#endif
3282 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3283 }
3284
3285 sp += addend;
3286 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3287 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3288 /* return to same privilege level */
3289 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3290 get_seg_base(e1, e2),
3291 get_seg_limit(e1, e2),
3292 e2);
3293 } else {
3294 /* return to different privilege level */
3295#ifdef TARGET_X86_64
3296 if (shift == 2) {
3297 POPQ(sp, new_esp);
3298 POPQ(sp, new_ss);
3299 new_ss &= 0xffff;
3300 } else
3301#endif
3302 if (shift == 1) {
3303 /* 32 bits */
3304 POPL(ssp, sp, sp_mask, new_esp);
3305 POPL(ssp, sp, sp_mask, new_ss);
3306 new_ss &= 0xffff;
3307 } else {
3308 /* 16 bits */
3309 POPW(ssp, sp, sp_mask, new_esp);
3310 POPW(ssp, sp, sp_mask, new_ss);
3311 }
3312#ifdef DEBUG_PCALL
3313 if (loglevel & CPU_LOG_PCALL) {
3314 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3315 new_ss, new_esp);
3316 }
3317#endif
3318 if ((new_ss & 0xfffc) == 0) {
3319#ifdef TARGET_X86_64
3320 /* NULL ss is allowed in long mode if cpl != 3*/
3321 /* XXX: test CS64 ? */
3322 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3323 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3324 0, 0xffffffff,
3325 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3326 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3327 DESC_W_MASK | DESC_A_MASK);
3328 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3329 } else
3330#endif
3331 {
3332 raise_exception_err(EXCP0D_GPF, 0);
3333 }
3334 } else {
3335 if ((new_ss & 3) != rpl)
3336 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3337 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3338 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3339 if (!(ss_e2 & DESC_S_MASK) ||
3340 (ss_e2 & DESC_CS_MASK) ||
3341 !(ss_e2 & DESC_W_MASK))
3342 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3343 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3344 if (dpl != rpl)
3345 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3346 if (!(ss_e2 & DESC_P_MASK))
3347 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3348 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3349 get_seg_base(ss_e1, ss_e2),
3350 get_seg_limit(ss_e1, ss_e2),
3351 ss_e2);
3352 }
3353
3354 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3355 get_seg_base(e1, e2),
3356 get_seg_limit(e1, e2),
3357 e2);
3358 cpu_x86_set_cpl(env, rpl);
3359 sp = new_esp;
3360#ifdef TARGET_X86_64
3361 if (env->hflags & HF_CS64_MASK)
3362 sp_mask = -1;
3363 else
3364#endif
3365 sp_mask = get_sp_mask(ss_e2);
3366
3367 /* validate data segments */
3368 validate_seg(R_ES, rpl);
3369 validate_seg(R_DS, rpl);
3370 validate_seg(R_FS, rpl);
3371 validate_seg(R_GS, rpl);
3372
3373 sp += addend;
3374 }
3375 SET_ESP(sp, sp_mask);
3376 env->eip = new_eip;
3377 if (is_iret) {
3378 /* NOTE: 'cpl' is the _old_ CPL */
3379 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3380 if (cpl == 0)
3381#ifdef VBOX
3382 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3383#else
3384 eflags_mask |= IOPL_MASK;
3385#endif
3386 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3387 if (cpl <= iopl)
3388 eflags_mask |= IF_MASK;
3389 if (shift == 0)
3390 eflags_mask &= 0xffff;
3391 load_eflags(new_eflags, eflags_mask);
3392 }
3393 return;
3394
3395 return_to_vm86:
3396 POPL(ssp, sp, sp_mask, new_esp);
3397 POPL(ssp, sp, sp_mask, new_ss);
3398 POPL(ssp, sp, sp_mask, new_es);
3399 POPL(ssp, sp, sp_mask, new_ds);
3400 POPL(ssp, sp, sp_mask, new_fs);
3401 POPL(ssp, sp, sp_mask, new_gs);
3402
3403 /* modify processor state */
3404 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3405 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3406 load_seg_vm(R_CS, new_cs & 0xffff);
3407 cpu_x86_set_cpl(env, 3);
3408 load_seg_vm(R_SS, new_ss & 0xffff);
3409 load_seg_vm(R_ES, new_es & 0xffff);
3410 load_seg_vm(R_DS, new_ds & 0xffff);
3411 load_seg_vm(R_FS, new_fs & 0xffff);
3412 load_seg_vm(R_GS, new_gs & 0xffff);
3413
3414 env->eip = new_eip & 0xffff;
3415 ESP = new_esp;
3416}
3417
3418void helper_iret_protected(int shift, int next_eip)
3419{
3420 int tss_selector, type;
3421 uint32_t e1, e2;
3422
3423#ifdef VBOX
3424 e1 = e2 = 0;
3425 remR3TrapClear(env->pVM);
3426#endif
3427
3428 /* specific case for TSS */
3429 if (env->eflags & NT_MASK) {
3430#ifdef TARGET_X86_64
3431 if (env->hflags & HF_LMA_MASK)
3432 raise_exception_err(EXCP0D_GPF, 0);
3433#endif
3434 tss_selector = lduw_kernel(env->tr.base + 0);
3435 if (tss_selector & 4)
3436 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3437 if (load_segment(&e1, &e2, tss_selector) != 0)
3438 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3439 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3440 /* NOTE: we check both segment and busy TSS */
3441 if (type != 3)
3442 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3443 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3444 } else {
3445 helper_ret_protected(shift, 1, 0);
3446 }
3447 env->hflags2 &= ~HF2_NMI_MASK;
3448#ifdef USE_KQEMU
3449 if (kqemu_is_ok(env)) {
3450 CC_OP = CC_OP_EFLAGS;
3451 env->exception_index = -1;
3452 cpu_loop_exit();
3453 }
3454#endif
3455}
3456
3457void helper_lret_protected(int shift, int addend)
3458{
3459 helper_ret_protected(shift, 0, addend);
3460#ifdef USE_KQEMU
3461 if (kqemu_is_ok(env)) {
3462 env->exception_index = -1;
3463 cpu_loop_exit();
3464 }
3465#endif
3466}
3467
3468void helper_sysenter(void)
3469{
3470 if (env->sysenter_cs == 0) {
3471 raise_exception_err(EXCP0D_GPF, 0);
3472 }
3473 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3474 cpu_x86_set_cpl(env, 0);
3475
3476#ifdef TARGET_X86_64
3477 if (env->hflags & HF_LMA_MASK) {
3478 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3479 0, 0xffffffff,
3480 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3481 DESC_S_MASK |
3482 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3483 } else
3484#endif
3485 {
3486 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3487 0, 0xffffffff,
3488 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3489 DESC_S_MASK |
3490 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3491 }
3492 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3493 0, 0xffffffff,
3494 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3495 DESC_S_MASK |
3496 DESC_W_MASK | DESC_A_MASK);
3497 ESP = env->sysenter_esp;
3498 EIP = env->sysenter_eip;
3499}
3500
3501void helper_sysexit(int dflag)
3502{
3503 int cpl;
3504
3505 cpl = env->hflags & HF_CPL_MASK;
3506 if (env->sysenter_cs == 0 || cpl != 0) {
3507 raise_exception_err(EXCP0D_GPF, 0);
3508 }
3509 cpu_x86_set_cpl(env, 3);
3510#ifdef TARGET_X86_64
3511 if (dflag == 2) {
3512 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3513 0, 0xffffffff,
3514 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3515 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3516 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3517 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3518 0, 0xffffffff,
3519 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3520 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3521 DESC_W_MASK | DESC_A_MASK);
3522 } else
3523#endif
3524 {
3525 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3526 0, 0xffffffff,
3527 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3528 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3529 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3530 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3531 0, 0xffffffff,
3532 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3533 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3534 DESC_W_MASK | DESC_A_MASK);
3535 }
3536 ESP = ECX;
3537 EIP = EDX;
3538#ifdef USE_KQEMU
3539 if (kqemu_is_ok(env)) {
3540 env->exception_index = -1;
3541 cpu_loop_exit();
3542 }
3543#endif
3544}
3545
3546#if defined(CONFIG_USER_ONLY)
3547target_ulong helper_read_crN(int reg)
3548{
3549 return 0;
3550}
3551
3552void helper_write_crN(int reg, target_ulong t0)
3553{
3554}
3555#else
3556target_ulong helper_read_crN(int reg)
3557{
3558 target_ulong val;
3559
3560 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3561 switch(reg) {
3562 default:
3563 val = env->cr[reg];
3564 break;
3565 case 8:
3566 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3567 val = cpu_get_apic_tpr(env);
3568 } else {
3569 val = env->v_tpr;
3570 }
3571 break;
3572 }
3573 return val;
3574}
3575
3576void helper_write_crN(int reg, target_ulong t0)
3577{
3578 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3579 switch(reg) {
3580 case 0:
3581 cpu_x86_update_cr0(env, t0);
3582 break;
3583 case 3:
3584 cpu_x86_update_cr3(env, t0);
3585 break;
3586 case 4:
3587 cpu_x86_update_cr4(env, t0);
3588 break;
3589 case 8:
3590 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3591 cpu_set_apic_tpr(env, t0);
3592 }
3593 env->v_tpr = t0 & 0x0f;
3594 break;
3595 default:
3596 env->cr[reg] = t0;
3597 break;
3598 }
3599}
3600#endif
3601
3602void helper_lmsw(target_ulong t0)
3603{
3604 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3605 if already set to one. */
3606 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3607 helper_write_crN(0, t0);
3608}
3609
3610void helper_clts(void)
3611{
3612 env->cr[0] &= ~CR0_TS_MASK;
3613 env->hflags &= ~HF_TS_MASK;
3614}
3615
3616/* XXX: do more */
3617void helper_movl_drN_T0(int reg, target_ulong t0)
3618{
3619 env->dr[reg] = t0;
3620}
3621
3622void helper_invlpg(target_ulong addr)
3623{
3624 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3625 tlb_flush_page(env, addr);
3626}
3627
3628void helper_rdtsc(void)
3629{
3630 uint64_t val;
3631
3632 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3633 raise_exception(EXCP0D_GPF);
3634 }
3635 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3636
3637 val = cpu_get_tsc(env) + env->tsc_offset;
3638 EAX = (uint32_t)(val);
3639 EDX = (uint32_t)(val >> 32);
3640}
3641
3642#ifdef VBOX
3643void helper_rdtscp(void)
3644{
3645 uint64_t val;
3646 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3647 raise_exception(EXCP0D_GPF);
3648 }
3649
3650 val = cpu_get_tsc(env);
3651 EAX = (uint32_t)(val);
3652 EDX = (uint32_t)(val >> 32);
3653 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3654}
3655#endif
3656
3657void helper_rdpmc(void)
3658{
3659 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3660 raise_exception(EXCP0D_GPF);
3661 }
3662 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3663
3664 /* currently unimplemented */
3665 raise_exception_err(EXCP06_ILLOP, 0);
3666}
3667
3668#if defined(CONFIG_USER_ONLY)
3669void helper_wrmsr(void)
3670{
3671}
3672
3673void helper_rdmsr(void)
3674{
3675}
3676#else
3677void helper_wrmsr(void)
3678{
3679 uint64_t val;
3680
3681 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3682
3683 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3684
3685 switch((uint32_t)ECX) {
3686 case MSR_IA32_SYSENTER_CS:
3687 env->sysenter_cs = val & 0xffff;
3688 break;
3689 case MSR_IA32_SYSENTER_ESP:
3690 env->sysenter_esp = val;
3691 break;
3692 case MSR_IA32_SYSENTER_EIP:
3693 env->sysenter_eip = val;
3694 break;
3695 case MSR_IA32_APICBASE:
3696 cpu_set_apic_base(env, val);
3697 break;
3698 case MSR_EFER:
3699 {
3700 uint64_t update_mask;
3701 update_mask = 0;
3702 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3703 update_mask |= MSR_EFER_SCE;
3704 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3705 update_mask |= MSR_EFER_LME;
3706 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3707 update_mask |= MSR_EFER_FFXSR;
3708 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3709 update_mask |= MSR_EFER_NXE;
3710 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3711 update_mask |= MSR_EFER_SVME;
3712 cpu_load_efer(env, (env->efer & ~update_mask) |
3713 (val & update_mask));
3714 }
3715 break;
3716 case MSR_STAR:
3717 env->star = val;
3718 break;
3719 case MSR_PAT:
3720 env->pat = val;
3721 break;
3722 case MSR_VM_HSAVE_PA:
3723 env->vm_hsave = val;
3724 break;
3725#ifdef TARGET_X86_64
3726 case MSR_LSTAR:
3727 env->lstar = val;
3728 break;
3729 case MSR_CSTAR:
3730 env->cstar = val;
3731 break;
3732 case MSR_FMASK:
3733 env->fmask = val;
3734 break;
3735 case MSR_FSBASE:
3736 env->segs[R_FS].base = val;
3737 break;
3738 case MSR_GSBASE:
3739 env->segs[R_GS].base = val;
3740 break;
3741 case MSR_KERNELGSBASE:
3742 env->kernelgsbase = val;
3743 break;
3744#endif
3745 default:
3746#ifndef VBOX
3747 /* XXX: exception ? */
3748 break;
3749#else /* VBOX */
3750 {
3751 uint32_t ecx = (uint32_t)ECX;
3752 /* In X2APIC specification this range is reserved for APIC control. */
3753 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3754 cpu_apic_wrmsr(env, ecx, val);
3755 /** @todo else exception? */
3756 break;
3757 }
3758 case MSR_K8_TSC_AUX:
3759 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3760 break;
3761#endif /* VBOX */
3762 }
3763}
3764
3765void helper_rdmsr(void)
3766{
3767 uint64_t val;
3768
3769 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3770
3771 switch((uint32_t)ECX) {
3772 case MSR_IA32_SYSENTER_CS:
3773 val = env->sysenter_cs;
3774 break;
3775 case MSR_IA32_SYSENTER_ESP:
3776 val = env->sysenter_esp;
3777 break;
3778 case MSR_IA32_SYSENTER_EIP:
3779 val = env->sysenter_eip;
3780 break;
3781 case MSR_IA32_APICBASE:
3782 val = cpu_get_apic_base(env);
3783 break;
3784 case MSR_EFER:
3785 val = env->efer;
3786 break;
3787 case MSR_STAR:
3788 val = env->star;
3789 break;
3790 case MSR_PAT:
3791 val = env->pat;
3792 break;
3793 case MSR_VM_HSAVE_PA:
3794 val = env->vm_hsave;
3795 break;
3796 case MSR_IA32_PERF_STATUS:
3797 /* tsc_increment_by_tick */
3798 val = 1000ULL;
3799 /* CPU multiplier */
3800 val |= (((uint64_t)4ULL) << 40);
3801 break;
3802#ifdef TARGET_X86_64
3803 case MSR_LSTAR:
3804 val = env->lstar;
3805 break;
3806 case MSR_CSTAR:
3807 val = env->cstar;
3808 break;
3809 case MSR_FMASK:
3810 val = env->fmask;
3811 break;
3812 case MSR_FSBASE:
3813 val = env->segs[R_FS].base;
3814 break;
3815 case MSR_GSBASE:
3816 val = env->segs[R_GS].base;
3817 break;
3818 case MSR_KERNELGSBASE:
3819 val = env->kernelgsbase;
3820 break;
3821#endif
3822#ifdef USE_KQEMU
3823 case MSR_QPI_COMMBASE:
3824 if (env->kqemu_enabled) {
3825 val = kqemu_comm_base;
3826 } else {
3827 val = 0;
3828 }
3829 break;
3830#endif
3831 default:
3832#ifndef VBOX
3833 /* XXX: exception ? */
3834 val = 0;
3835 break;
3836#else /* VBOX */
3837 {
3838 uint32_t ecx = (uint32_t)ECX;
3839 /* In X2APIC specification this range is reserved for APIC control. */
3840 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3841 val = cpu_apic_rdmsr(env, ecx);
3842 else
3843 val = 0; /** @todo else exception? */
3844 break;
3845 }
3846 case MSR_K8_TSC_AUX:
3847 val = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3848 break;
3849#endif /* VBOX */
3850 }
3851 EAX = (uint32_t)(val);
3852 EDX = (uint32_t)(val >> 32);
3853}
3854#endif
3855
3856target_ulong helper_lsl(target_ulong selector1)
3857{
3858 unsigned int limit;
3859 uint32_t e1, e2, eflags, selector;
3860 int rpl, dpl, cpl, type;
3861
3862 selector = selector1 & 0xffff;
3863 eflags = cc_table[CC_OP].compute_all();
3864 if (load_segment(&e1, &e2, selector) != 0)
3865 goto fail;
3866 rpl = selector & 3;
3867 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3868 cpl = env->hflags & HF_CPL_MASK;
3869 if (e2 & DESC_S_MASK) {
3870 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3871 /* conforming */
3872 } else {
3873 if (dpl < cpl || dpl < rpl)
3874 goto fail;
3875 }
3876 } else {
3877 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3878 switch(type) {
3879 case 1:
3880 case 2:
3881 case 3:
3882 case 9:
3883 case 11:
3884 break;
3885 default:
3886 goto fail;
3887 }
3888 if (dpl < cpl || dpl < rpl) {
3889 fail:
3890 CC_SRC = eflags & ~CC_Z;
3891 return 0;
3892 }
3893 }
3894 limit = get_seg_limit(e1, e2);
3895 CC_SRC = eflags | CC_Z;
3896 return limit;
3897}
3898
3899target_ulong helper_lar(target_ulong selector1)
3900{
3901 uint32_t e1, e2, eflags, selector;
3902 int rpl, dpl, cpl, type;
3903
3904 selector = selector1 & 0xffff;
3905 eflags = cc_table[CC_OP].compute_all();
3906 if ((selector & 0xfffc) == 0)
3907 goto fail;
3908 if (load_segment(&e1, &e2, selector) != 0)
3909 goto fail;
3910 rpl = selector & 3;
3911 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3912 cpl = env->hflags & HF_CPL_MASK;
3913 if (e2 & DESC_S_MASK) {
3914 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3915 /* conforming */
3916 } else {
3917 if (dpl < cpl || dpl < rpl)
3918 goto fail;
3919 }
3920 } else {
3921 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3922 switch(type) {
3923 case 1:
3924 case 2:
3925 case 3:
3926 case 4:
3927 case 5:
3928 case 9:
3929 case 11:
3930 case 12:
3931 break;
3932 default:
3933 goto fail;
3934 }
3935 if (dpl < cpl || dpl < rpl) {
3936 fail:
3937 CC_SRC = eflags & ~CC_Z;
3938 return 0;
3939 }
3940 }
3941 CC_SRC = eflags | CC_Z;
3942 return e2 & 0x00f0ff00;
3943}
3944
3945void helper_verr(target_ulong selector1)
3946{
3947 uint32_t e1, e2, eflags, selector;
3948 int rpl, dpl, cpl;
3949
3950 selector = selector1 & 0xffff;
3951 eflags = cc_table[CC_OP].compute_all();
3952 if ((selector & 0xfffc) == 0)
3953 goto fail;
3954 if (load_segment(&e1, &e2, selector) != 0)
3955 goto fail;
3956 if (!(e2 & DESC_S_MASK))
3957 goto fail;
3958 rpl = selector & 3;
3959 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3960 cpl = env->hflags & HF_CPL_MASK;
3961 if (e2 & DESC_CS_MASK) {
3962 if (!(e2 & DESC_R_MASK))
3963 goto fail;
3964 if (!(e2 & DESC_C_MASK)) {
3965 if (dpl < cpl || dpl < rpl)
3966 goto fail;
3967 }
3968 } else {
3969 if (dpl < cpl || dpl < rpl) {
3970 fail:
3971 CC_SRC = eflags & ~CC_Z;
3972 return;
3973 }
3974 }
3975 CC_SRC = eflags | CC_Z;
3976}
3977
3978void helper_verw(target_ulong selector1)
3979{
3980 uint32_t e1, e2, eflags, selector;
3981 int rpl, dpl, cpl;
3982
3983 selector = selector1 & 0xffff;
3984 eflags = cc_table[CC_OP].compute_all();
3985 if ((selector & 0xfffc) == 0)
3986 goto fail;
3987 if (load_segment(&e1, &e2, selector) != 0)
3988 goto fail;
3989 if (!(e2 & DESC_S_MASK))
3990 goto fail;
3991 rpl = selector & 3;
3992 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3993 cpl = env->hflags & HF_CPL_MASK;
3994 if (e2 & DESC_CS_MASK) {
3995 goto fail;
3996 } else {
3997 if (dpl < cpl || dpl < rpl)
3998 goto fail;
3999 if (!(e2 & DESC_W_MASK)) {
4000 fail:
4001 CC_SRC = eflags & ~CC_Z;
4002 return;
4003 }
4004 }
4005 CC_SRC = eflags | CC_Z;
4006}
4007
4008/* x87 FPU helpers */
4009
4010static void fpu_set_exception(int mask)
4011{
4012 env->fpus |= mask;
4013 if (env->fpus & (~env->fpuc & FPUC_EM))
4014 env->fpus |= FPUS_SE | FPUS_B;
4015}
4016
4017#ifndef VBOX
4018static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4019#else /* VBOX */
4020DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4021#endif /* VBOX */
4022{
4023 if (b == 0.0)
4024 fpu_set_exception(FPUS_ZE);
4025 return a / b;
4026}
4027
4028void fpu_raise_exception(void)
4029{
4030 if (env->cr[0] & CR0_NE_MASK) {
4031 raise_exception(EXCP10_COPR);
4032 }
4033#if !defined(CONFIG_USER_ONLY)
4034 else {
4035 cpu_set_ferr(env);
4036 }
4037#endif
4038}
4039
4040void helper_flds_FT0(uint32_t val)
4041{
4042 union {
4043 float32 f;
4044 uint32_t i;
4045 } u;
4046 u.i = val;
4047 FT0 = float32_to_floatx(u.f, &env->fp_status);
4048}
4049
4050void helper_fldl_FT0(uint64_t val)
4051{
4052 union {
4053 float64 f;
4054 uint64_t i;
4055 } u;
4056 u.i = val;
4057 FT0 = float64_to_floatx(u.f, &env->fp_status);
4058}
4059
4060void helper_fildl_FT0(int32_t val)
4061{
4062 FT0 = int32_to_floatx(val, &env->fp_status);
4063}
4064
4065void helper_flds_ST0(uint32_t val)
4066{
4067 int new_fpstt;
4068 union {
4069 float32 f;
4070 uint32_t i;
4071 } u;
4072 new_fpstt = (env->fpstt - 1) & 7;
4073 u.i = val;
4074 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4075 env->fpstt = new_fpstt;
4076 env->fptags[new_fpstt] = 0; /* validate stack entry */
4077}
4078
4079void helper_fldl_ST0(uint64_t val)
4080{
4081 int new_fpstt;
4082 union {
4083 float64 f;
4084 uint64_t i;
4085 } u;
4086 new_fpstt = (env->fpstt - 1) & 7;
4087 u.i = val;
4088 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4089 env->fpstt = new_fpstt;
4090 env->fptags[new_fpstt] = 0; /* validate stack entry */
4091}
4092
4093void helper_fildl_ST0(int32_t val)
4094{
4095 int new_fpstt;
4096 new_fpstt = (env->fpstt - 1) & 7;
4097 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4098 env->fpstt = new_fpstt;
4099 env->fptags[new_fpstt] = 0; /* validate stack entry */
4100}
4101
4102void helper_fildll_ST0(int64_t val)
4103{
4104 int new_fpstt;
4105 new_fpstt = (env->fpstt - 1) & 7;
4106 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4107 env->fpstt = new_fpstt;
4108 env->fptags[new_fpstt] = 0; /* validate stack entry */
4109}
4110
4111#ifndef VBOX
4112uint32_t helper_fsts_ST0(void)
4113#else
4114RTCCUINTREG helper_fsts_ST0(void)
4115#endif
4116{
4117 union {
4118 float32 f;
4119 uint32_t i;
4120 } u;
4121 u.f = floatx_to_float32(ST0, &env->fp_status);
4122 return u.i;
4123}
4124
4125uint64_t helper_fstl_ST0(void)
4126{
4127 union {
4128 float64 f;
4129 uint64_t i;
4130 } u;
4131 u.f = floatx_to_float64(ST0, &env->fp_status);
4132 return u.i;
4133}
4134#ifndef VBOX
4135int32_t helper_fist_ST0(void)
4136#else
4137RTCCINTREG helper_fist_ST0(void)
4138#endif
4139{
4140 int32_t val;
4141 val = floatx_to_int32(ST0, &env->fp_status);
4142 if (val != (int16_t)val)
4143 val = -32768;
4144 return val;
4145}
4146
4147#ifndef VBOX
4148int32_t helper_fistl_ST0(void)
4149#else
4150RTCCINTREG helper_fistl_ST0(void)
4151#endif
4152{
4153 int32_t val;
4154 val = floatx_to_int32(ST0, &env->fp_status);
4155 return val;
4156}
4157
4158int64_t helper_fistll_ST0(void)
4159{
4160 int64_t val;
4161 val = floatx_to_int64(ST0, &env->fp_status);
4162 return val;
4163}
4164
4165#ifndef VBOX
4166int32_t helper_fistt_ST0(void)
4167#else
4168RTCCINTREG helper_fistt_ST0(void)
4169#endif
4170{
4171 int32_t val;
4172 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4173 if (val != (int16_t)val)
4174 val = -32768;
4175 return val;
4176}
4177
4178#ifndef VBOX
4179int32_t helper_fisttl_ST0(void)
4180#else
4181RTCCINTREG helper_fisttl_ST0(void)
4182#endif
4183{
4184 int32_t val;
4185 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4186 return val;
4187}
4188
4189int64_t helper_fisttll_ST0(void)
4190{
4191 int64_t val;
4192 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4193 return val;
4194}
4195
4196void helper_fldt_ST0(target_ulong ptr)
4197{
4198 int new_fpstt;
4199 new_fpstt = (env->fpstt - 1) & 7;
4200 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4201 env->fpstt = new_fpstt;
4202 env->fptags[new_fpstt] = 0; /* validate stack entry */
4203}
4204
4205void helper_fstt_ST0(target_ulong ptr)
4206{
4207 helper_fstt(ST0, ptr);
4208}
4209
4210void helper_fpush(void)
4211{
4212 fpush();
4213}
4214
4215void helper_fpop(void)
4216{
4217 fpop();
4218}
4219
4220void helper_fdecstp(void)
4221{
4222 env->fpstt = (env->fpstt - 1) & 7;
4223 env->fpus &= (~0x4700);
4224}
4225
4226void helper_fincstp(void)
4227{
4228 env->fpstt = (env->fpstt + 1) & 7;
4229 env->fpus &= (~0x4700);
4230}
4231
4232/* FPU move */
4233
4234void helper_ffree_STN(int st_index)
4235{
4236 env->fptags[(env->fpstt + st_index) & 7] = 1;
4237}
4238
4239void helper_fmov_ST0_FT0(void)
4240{
4241 ST0 = FT0;
4242}
4243
4244void helper_fmov_FT0_STN(int st_index)
4245{
4246 FT0 = ST(st_index);
4247}
4248
4249void helper_fmov_ST0_STN(int st_index)
4250{
4251 ST0 = ST(st_index);
4252}
4253
4254void helper_fmov_STN_ST0(int st_index)
4255{
4256 ST(st_index) = ST0;
4257}
4258
4259void helper_fxchg_ST0_STN(int st_index)
4260{
4261 CPU86_LDouble tmp;
4262 tmp = ST(st_index);
4263 ST(st_index) = ST0;
4264 ST0 = tmp;
4265}
4266
4267/* FPU operations */
4268
4269static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4270
4271void helper_fcom_ST0_FT0(void)
4272{
4273 int ret;
4274
4275 ret = floatx_compare(ST0, FT0, &env->fp_status);
4276 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4277 FORCE_RET();
4278}
4279
4280void helper_fucom_ST0_FT0(void)
4281{
4282 int ret;
4283
4284 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4285 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4286 FORCE_RET();
4287}
4288
4289static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4290
4291void helper_fcomi_ST0_FT0(void)
4292{
4293 int eflags;
4294 int ret;
4295
4296 ret = floatx_compare(ST0, FT0, &env->fp_status);
4297 eflags = cc_table[CC_OP].compute_all();
4298 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4299 CC_SRC = eflags;
4300 FORCE_RET();
4301}
4302
4303void helper_fucomi_ST0_FT0(void)
4304{
4305 int eflags;
4306 int ret;
4307
4308 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4309 eflags = cc_table[CC_OP].compute_all();
4310 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4311 CC_SRC = eflags;
4312 FORCE_RET();
4313}
4314
4315void helper_fadd_ST0_FT0(void)
4316{
4317 ST0 += FT0;
4318}
4319
4320void helper_fmul_ST0_FT0(void)
4321{
4322 ST0 *= FT0;
4323}
4324
4325void helper_fsub_ST0_FT0(void)
4326{
4327 ST0 -= FT0;
4328}
4329
4330void helper_fsubr_ST0_FT0(void)
4331{
4332 ST0 = FT0 - ST0;
4333}
4334
4335void helper_fdiv_ST0_FT0(void)
4336{
4337 ST0 = helper_fdiv(ST0, FT0);
4338}
4339
4340void helper_fdivr_ST0_FT0(void)
4341{
4342 ST0 = helper_fdiv(FT0, ST0);
4343}
4344
4345/* fp operations between STN and ST0 */
4346
4347void helper_fadd_STN_ST0(int st_index)
4348{
4349 ST(st_index) += ST0;
4350}
4351
4352void helper_fmul_STN_ST0(int st_index)
4353{
4354 ST(st_index) *= ST0;
4355}
4356
4357void helper_fsub_STN_ST0(int st_index)
4358{
4359 ST(st_index) -= ST0;
4360}
4361
4362void helper_fsubr_STN_ST0(int st_index)
4363{
4364 CPU86_LDouble *p;
4365 p = &ST(st_index);
4366 *p = ST0 - *p;
4367}
4368
4369void helper_fdiv_STN_ST0(int st_index)
4370{
4371 CPU86_LDouble *p;
4372 p = &ST(st_index);
4373 *p = helper_fdiv(*p, ST0);
4374}
4375
4376void helper_fdivr_STN_ST0(int st_index)
4377{
4378 CPU86_LDouble *p;
4379 p = &ST(st_index);
4380 *p = helper_fdiv(ST0, *p);
4381}
4382
4383/* misc FPU operations */
4384void helper_fchs_ST0(void)
4385{
4386 ST0 = floatx_chs(ST0);
4387}
4388
4389void helper_fabs_ST0(void)
4390{
4391 ST0 = floatx_abs(ST0);
4392}
4393
4394void helper_fld1_ST0(void)
4395{
4396 ST0 = f15rk[1];
4397}
4398
4399void helper_fldl2t_ST0(void)
4400{
4401 ST0 = f15rk[6];
4402}
4403
4404void helper_fldl2e_ST0(void)
4405{
4406 ST0 = f15rk[5];
4407}
4408
4409void helper_fldpi_ST0(void)
4410{
4411 ST0 = f15rk[2];
4412}
4413
4414void helper_fldlg2_ST0(void)
4415{
4416 ST0 = f15rk[3];
4417}
4418
4419void helper_fldln2_ST0(void)
4420{
4421 ST0 = f15rk[4];
4422}
4423
4424void helper_fldz_ST0(void)
4425{
4426 ST0 = f15rk[0];
4427}
4428
4429void helper_fldz_FT0(void)
4430{
4431 FT0 = f15rk[0];
4432}
4433
4434#ifndef VBOX
4435uint32_t helper_fnstsw(void)
4436#else
4437RTCCUINTREG helper_fnstsw(void)
4438#endif
4439{
4440 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4441}
4442
4443#ifndef VBOX
4444uint32_t helper_fnstcw(void)
4445#else
4446RTCCUINTREG helper_fnstcw(void)
4447#endif
4448{
4449 return env->fpuc;
4450}
4451
4452static void update_fp_status(void)
4453{
4454 int rnd_type;
4455
4456 /* set rounding mode */
4457 switch(env->fpuc & RC_MASK) {
4458 default:
4459 case RC_NEAR:
4460 rnd_type = float_round_nearest_even;
4461 break;
4462 case RC_DOWN:
4463 rnd_type = float_round_down;
4464 break;
4465 case RC_UP:
4466 rnd_type = float_round_up;
4467 break;
4468 case RC_CHOP:
4469 rnd_type = float_round_to_zero;
4470 break;
4471 }
4472 set_float_rounding_mode(rnd_type, &env->fp_status);
4473#ifdef FLOATX80
4474 switch((env->fpuc >> 8) & 3) {
4475 case 0:
4476 rnd_type = 32;
4477 break;
4478 case 2:
4479 rnd_type = 64;
4480 break;
4481 case 3:
4482 default:
4483 rnd_type = 80;
4484 break;
4485 }
4486 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4487#endif
4488}
4489
4490void helper_fldcw(uint32_t val)
4491{
4492 env->fpuc = val;
4493 update_fp_status();
4494}
4495
4496void helper_fclex(void)
4497{
4498 env->fpus &= 0x7f00;
4499}
4500
4501void helper_fwait(void)
4502{
4503 if (env->fpus & FPUS_SE)
4504 fpu_raise_exception();
4505 FORCE_RET();
4506}
4507
4508void helper_fninit(void)
4509{
4510 env->fpus = 0;
4511 env->fpstt = 0;
4512 env->fpuc = 0x37f;
4513 env->fptags[0] = 1;
4514 env->fptags[1] = 1;
4515 env->fptags[2] = 1;
4516 env->fptags[3] = 1;
4517 env->fptags[4] = 1;
4518 env->fptags[5] = 1;
4519 env->fptags[6] = 1;
4520 env->fptags[7] = 1;
4521}
4522
4523/* BCD ops */
4524
4525void helper_fbld_ST0(target_ulong ptr)
4526{
4527 CPU86_LDouble tmp;
4528 uint64_t val;
4529 unsigned int v;
4530 int i;
4531
4532 val = 0;
4533 for(i = 8; i >= 0; i--) {
4534 v = ldub(ptr + i);
4535 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4536 }
4537 tmp = val;
4538 if (ldub(ptr + 9) & 0x80)
4539 tmp = -tmp;
4540 fpush();
4541 ST0 = tmp;
4542}
4543
4544void helper_fbst_ST0(target_ulong ptr)
4545{
4546 int v;
4547 target_ulong mem_ref, mem_end;
4548 int64_t val;
4549
4550 val = floatx_to_int64(ST0, &env->fp_status);
4551 mem_ref = ptr;
4552 mem_end = mem_ref + 9;
4553 if (val < 0) {
4554 stb(mem_end, 0x80);
4555 val = -val;
4556 } else {
4557 stb(mem_end, 0x00);
4558 }
4559 while (mem_ref < mem_end) {
4560 if (val == 0)
4561 break;
4562 v = val % 100;
4563 val = val / 100;
4564 v = ((v / 10) << 4) | (v % 10);
4565 stb(mem_ref++, v);
4566 }
4567 while (mem_ref < mem_end) {
4568 stb(mem_ref++, 0);
4569 }
4570}
4571
4572void helper_f2xm1(void)
4573{
4574 ST0 = pow(2.0,ST0) - 1.0;
4575}
4576
4577void helper_fyl2x(void)
4578{
4579 CPU86_LDouble fptemp;
4580
4581 fptemp = ST0;
4582 if (fptemp>0.0){
4583 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4584 ST1 *= fptemp;
4585 fpop();
4586 } else {
4587 env->fpus &= (~0x4700);
4588 env->fpus |= 0x400;
4589 }
4590}
4591
4592void helper_fptan(void)
4593{
4594 CPU86_LDouble fptemp;
4595
4596 fptemp = ST0;
4597 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4598 env->fpus |= 0x400;
4599 } else {
4600 ST0 = tan(fptemp);
4601 fpush();
4602 ST0 = 1.0;
4603 env->fpus &= (~0x400); /* C2 <-- 0 */
4604 /* the above code is for |arg| < 2**52 only */
4605 }
4606}
4607
4608void helper_fpatan(void)
4609{
4610 CPU86_LDouble fptemp, fpsrcop;
4611
4612 fpsrcop = ST1;
4613 fptemp = ST0;
4614 ST1 = atan2(fpsrcop,fptemp);
4615 fpop();
4616}
4617
4618void helper_fxtract(void)
4619{
4620 CPU86_LDoubleU temp;
4621 unsigned int expdif;
4622
4623 temp.d = ST0;
4624 expdif = EXPD(temp) - EXPBIAS;
4625 /*DP exponent bias*/
4626 ST0 = expdif;
4627 fpush();
4628 BIASEXPONENT(temp);
4629 ST0 = temp.d;
4630}
4631
4632#ifdef VBOX
4633#ifdef _MSC_VER
4634/* MSC cannot divide by zero */
4635extern double _Nan;
4636#define NaN _Nan
4637#else
4638#define NaN (0.0 / 0.0)
4639#endif
4640#endif /* VBOX */
4641
4642void helper_fprem1(void)
4643{
4644 CPU86_LDouble dblq, fpsrcop, fptemp;
4645 CPU86_LDoubleU fpsrcop1, fptemp1;
4646 int expdif;
4647 signed long long int q;
4648
4649#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4650 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4651#else
4652 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4653#endif
4654 ST0 = 0.0 / 0.0; /* NaN */
4655 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4656 return;
4657 }
4658
4659 fpsrcop = ST0;
4660 fptemp = ST1;
4661 fpsrcop1.d = fpsrcop;
4662 fptemp1.d = fptemp;
4663 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4664
4665 if (expdif < 0) {
4666 /* optimisation? taken from the AMD docs */
4667 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4668 /* ST0 is unchanged */
4669 return;
4670 }
4671
4672 if (expdif < 53) {
4673 dblq = fpsrcop / fptemp;
4674 /* round dblq towards nearest integer */
4675 dblq = rint(dblq);
4676 ST0 = fpsrcop - fptemp * dblq;
4677
4678 /* convert dblq to q by truncating towards zero */
4679 if (dblq < 0.0)
4680 q = (signed long long int)(-dblq);
4681 else
4682 q = (signed long long int)dblq;
4683
4684 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4685 /* (C0,C3,C1) <-- (q2,q1,q0) */
4686 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4687 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4688 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4689 } else {
4690 env->fpus |= 0x400; /* C2 <-- 1 */
4691 fptemp = pow(2.0, expdif - 50);
4692 fpsrcop = (ST0 / ST1) / fptemp;
4693 /* fpsrcop = integer obtained by chopping */
4694 fpsrcop = (fpsrcop < 0.0) ?
4695 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4696 ST0 -= (ST1 * fpsrcop * fptemp);
4697 }
4698}
4699
4700void helper_fprem(void)
4701{
4702 CPU86_LDouble dblq, fpsrcop, fptemp;
4703 CPU86_LDoubleU fpsrcop1, fptemp1;
4704 int expdif;
4705 signed long long int q;
4706
4707#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4708 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4709#else
4710 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4711#endif
4712 ST0 = 0.0 / 0.0; /* NaN */
4713 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4714 return;
4715 }
4716
4717 fpsrcop = (CPU86_LDouble)ST0;
4718 fptemp = (CPU86_LDouble)ST1;
4719 fpsrcop1.d = fpsrcop;
4720 fptemp1.d = fptemp;
4721 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4722
4723 if (expdif < 0) {
4724 /* optimisation? taken from the AMD docs */
4725 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4726 /* ST0 is unchanged */
4727 return;
4728 }
4729
4730 if ( expdif < 53 ) {
4731 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4732 /* round dblq towards zero */
4733 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4734 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4735
4736 /* convert dblq to q by truncating towards zero */
4737 if (dblq < 0.0)
4738 q = (signed long long int)(-dblq);
4739 else
4740 q = (signed long long int)dblq;
4741
4742 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4743 /* (C0,C3,C1) <-- (q2,q1,q0) */
4744 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4745 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4746 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4747 } else {
4748 int N = 32 + (expdif % 32); /* as per AMD docs */
4749 env->fpus |= 0x400; /* C2 <-- 1 */
4750 fptemp = pow(2.0, (double)(expdif - N));
4751 fpsrcop = (ST0 / ST1) / fptemp;
4752 /* fpsrcop = integer obtained by chopping */
4753 fpsrcop = (fpsrcop < 0.0) ?
4754 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4755 ST0 -= (ST1 * fpsrcop * fptemp);
4756 }
4757}
4758
4759void helper_fyl2xp1(void)
4760{
4761 CPU86_LDouble fptemp;
4762
4763 fptemp = ST0;
4764 if ((fptemp+1.0)>0.0) {
4765 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4766 ST1 *= fptemp;
4767 fpop();
4768 } else {
4769 env->fpus &= (~0x4700);
4770 env->fpus |= 0x400;
4771 }
4772}
4773
4774void helper_fsqrt(void)
4775{
4776 CPU86_LDouble fptemp;
4777
4778 fptemp = ST0;
4779 if (fptemp<0.0) {
4780 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4781 env->fpus |= 0x400;
4782 }
4783 ST0 = sqrt(fptemp);
4784}
4785
4786void helper_fsincos(void)
4787{
4788 CPU86_LDouble fptemp;
4789
4790 fptemp = ST0;
4791 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4792 env->fpus |= 0x400;
4793 } else {
4794 ST0 = sin(fptemp);
4795 fpush();
4796 ST0 = cos(fptemp);
4797 env->fpus &= (~0x400); /* C2 <-- 0 */
4798 /* the above code is for |arg| < 2**63 only */
4799 }
4800}
4801
4802void helper_frndint(void)
4803{
4804 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4805}
4806
4807void helper_fscale(void)
4808{
4809 ST0 = ldexp (ST0, (int)(ST1));
4810}
4811
4812void helper_fsin(void)
4813{
4814 CPU86_LDouble fptemp;
4815
4816 fptemp = ST0;
4817 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4818 env->fpus |= 0x400;
4819 } else {
4820 ST0 = sin(fptemp);
4821 env->fpus &= (~0x400); /* C2 <-- 0 */
4822 /* the above code is for |arg| < 2**53 only */
4823 }
4824}
4825
4826void helper_fcos(void)
4827{
4828 CPU86_LDouble fptemp;
4829
4830 fptemp = ST0;
4831 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4832 env->fpus |= 0x400;
4833 } else {
4834 ST0 = cos(fptemp);
4835 env->fpus &= (~0x400); /* C2 <-- 0 */
4836 /* the above code is for |arg5 < 2**63 only */
4837 }
4838}
4839
4840void helper_fxam_ST0(void)
4841{
4842 CPU86_LDoubleU temp;
4843 int expdif;
4844
4845 temp.d = ST0;
4846
4847 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4848 if (SIGND(temp))
4849 env->fpus |= 0x200; /* C1 <-- 1 */
4850
4851 /* XXX: test fptags too */
4852 expdif = EXPD(temp);
4853 if (expdif == MAXEXPD) {
4854#ifdef USE_X86LDOUBLE
4855 if (MANTD(temp) == 0x8000000000000000ULL)
4856#else
4857 if (MANTD(temp) == 0)
4858#endif
4859 env->fpus |= 0x500 /*Infinity*/;
4860 else
4861 env->fpus |= 0x100 /*NaN*/;
4862 } else if (expdif == 0) {
4863 if (MANTD(temp) == 0)
4864 env->fpus |= 0x4000 /*Zero*/;
4865 else
4866 env->fpus |= 0x4400 /*Denormal*/;
4867 } else {
4868 env->fpus |= 0x400;
4869 }
4870}
4871
4872void helper_fstenv(target_ulong ptr, int data32)
4873{
4874 int fpus, fptag, exp, i;
4875 uint64_t mant;
4876 CPU86_LDoubleU tmp;
4877
4878 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4879 fptag = 0;
4880 for (i=7; i>=0; i--) {
4881 fptag <<= 2;
4882 if (env->fptags[i]) {
4883 fptag |= 3;
4884 } else {
4885 tmp.d = env->fpregs[i].d;
4886 exp = EXPD(tmp);
4887 mant = MANTD(tmp);
4888 if (exp == 0 && mant == 0) {
4889 /* zero */
4890 fptag |= 1;
4891 } else if (exp == 0 || exp == MAXEXPD
4892#ifdef USE_X86LDOUBLE
4893 || (mant & (1LL << 63)) == 0
4894#endif
4895 ) {
4896 /* NaNs, infinity, denormal */
4897 fptag |= 2;
4898 }
4899 }
4900 }
4901 if (data32) {
4902 /* 32 bit */
4903 stl(ptr, env->fpuc);
4904 stl(ptr + 4, fpus);
4905 stl(ptr + 8, fptag);
4906 stl(ptr + 12, 0); /* fpip */
4907 stl(ptr + 16, 0); /* fpcs */
4908 stl(ptr + 20, 0); /* fpoo */
4909 stl(ptr + 24, 0); /* fpos */
4910 } else {
4911 /* 16 bit */
4912 stw(ptr, env->fpuc);
4913 stw(ptr + 2, fpus);
4914 stw(ptr + 4, fptag);
4915 stw(ptr + 6, 0);
4916 stw(ptr + 8, 0);
4917 stw(ptr + 10, 0);
4918 stw(ptr + 12, 0);
4919 }
4920}
4921
4922void helper_fldenv(target_ulong ptr, int data32)
4923{
4924 int i, fpus, fptag;
4925
4926 if (data32) {
4927 env->fpuc = lduw(ptr);
4928 fpus = lduw(ptr + 4);
4929 fptag = lduw(ptr + 8);
4930 }
4931 else {
4932 env->fpuc = lduw(ptr);
4933 fpus = lduw(ptr + 2);
4934 fptag = lduw(ptr + 4);
4935 }
4936 env->fpstt = (fpus >> 11) & 7;
4937 env->fpus = fpus & ~0x3800;
4938 for(i = 0;i < 8; i++) {
4939 env->fptags[i] = ((fptag & 3) == 3);
4940 fptag >>= 2;
4941 }
4942}
4943
4944void helper_fsave(target_ulong ptr, int data32)
4945{
4946 CPU86_LDouble tmp;
4947 int i;
4948
4949 helper_fstenv(ptr, data32);
4950
4951 ptr += (14 << data32);
4952 for(i = 0;i < 8; i++) {
4953 tmp = ST(i);
4954 helper_fstt(tmp, ptr);
4955 ptr += 10;
4956 }
4957
4958 /* fninit */
4959 env->fpus = 0;
4960 env->fpstt = 0;
4961 env->fpuc = 0x37f;
4962 env->fptags[0] = 1;
4963 env->fptags[1] = 1;
4964 env->fptags[2] = 1;
4965 env->fptags[3] = 1;
4966 env->fptags[4] = 1;
4967 env->fptags[5] = 1;
4968 env->fptags[6] = 1;
4969 env->fptags[7] = 1;
4970}
4971
4972void helper_frstor(target_ulong ptr, int data32)
4973{
4974 CPU86_LDouble tmp;
4975 int i;
4976
4977 helper_fldenv(ptr, data32);
4978 ptr += (14 << data32);
4979
4980 for(i = 0;i < 8; i++) {
4981 tmp = helper_fldt(ptr);
4982 ST(i) = tmp;
4983 ptr += 10;
4984 }
4985}
4986
4987void helper_fxsave(target_ulong ptr, int data64)
4988{
4989 int fpus, fptag, i, nb_xmm_regs;
4990 CPU86_LDouble tmp;
4991 target_ulong addr;
4992
4993 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4994 fptag = 0;
4995 for(i = 0; i < 8; i++) {
4996 fptag |= (env->fptags[i] << i);
4997 }
4998 stw(ptr, env->fpuc);
4999 stw(ptr + 2, fpus);
5000 stw(ptr + 4, fptag ^ 0xff);
5001#ifdef TARGET_X86_64
5002 if (data64) {
5003 stq(ptr + 0x08, 0); /* rip */
5004 stq(ptr + 0x10, 0); /* rdp */
5005 } else
5006#endif
5007 {
5008 stl(ptr + 0x08, 0); /* eip */
5009 stl(ptr + 0x0c, 0); /* sel */
5010 stl(ptr + 0x10, 0); /* dp */
5011 stl(ptr + 0x14, 0); /* sel */
5012 }
5013
5014 addr = ptr + 0x20;
5015 for(i = 0;i < 8; i++) {
5016 tmp = ST(i);
5017 helper_fstt(tmp, addr);
5018 addr += 16;
5019 }
5020
5021 if (env->cr[4] & CR4_OSFXSR_MASK) {
5022 /* XXX: finish it */
5023 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5024 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5025 if (env->hflags & HF_CS64_MASK)
5026 nb_xmm_regs = 16;
5027 else
5028 nb_xmm_regs = 8;
5029 addr = ptr + 0xa0;
5030 for(i = 0; i < nb_xmm_regs; i++) {
5031 stq(addr, env->xmm_regs[i].XMM_Q(0));
5032 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5033 addr += 16;
5034 }
5035 }
5036}
5037
5038void helper_fxrstor(target_ulong ptr, int data64)
5039{
5040 int i, fpus, fptag, nb_xmm_regs;
5041 CPU86_LDouble tmp;
5042 target_ulong addr;
5043
5044 env->fpuc = lduw(ptr);
5045 fpus = lduw(ptr + 2);
5046 fptag = lduw(ptr + 4);
5047 env->fpstt = (fpus >> 11) & 7;
5048 env->fpus = fpus & ~0x3800;
5049 fptag ^= 0xff;
5050 for(i = 0;i < 8; i++) {
5051 env->fptags[i] = ((fptag >> i) & 1);
5052 }
5053
5054 addr = ptr + 0x20;
5055 for(i = 0;i < 8; i++) {
5056 tmp = helper_fldt(addr);
5057 ST(i) = tmp;
5058 addr += 16;
5059 }
5060
5061 if (env->cr[4] & CR4_OSFXSR_MASK) {
5062 /* XXX: finish it */
5063 env->mxcsr = ldl(ptr + 0x18);
5064 //ldl(ptr + 0x1c);
5065 if (env->hflags & HF_CS64_MASK)
5066 nb_xmm_regs = 16;
5067 else
5068 nb_xmm_regs = 8;
5069 addr = ptr + 0xa0;
5070 for(i = 0; i < nb_xmm_regs; i++) {
5071#if !defined(VBOX) || __GNUC__ < 4
5072 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5073 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5074#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5075# if 1
5076 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5077 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5078 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5079 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5080# else
5081 /* this works fine on Mac OS X, gcc 4.0.1 */
5082 uint64_t u64 = ldq(addr);
5083 env->xmm_regs[i].XMM_Q(0);
5084 u64 = ldq(addr + 4);
5085 env->xmm_regs[i].XMM_Q(1) = u64;
5086# endif
5087#endif
5088 addr += 16;
5089 }
5090 }
5091}
5092
5093#ifndef USE_X86LDOUBLE
5094
5095void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5096{
5097 CPU86_LDoubleU temp;
5098 int e;
5099
5100 temp.d = f;
5101 /* mantissa */
5102 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5103 /* exponent + sign */
5104 e = EXPD(temp) - EXPBIAS + 16383;
5105 e |= SIGND(temp) >> 16;
5106 *pexp = e;
5107}
5108
5109CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5110{
5111 CPU86_LDoubleU temp;
5112 int e;
5113 uint64_t ll;
5114
5115 /* XXX: handle overflow ? */
5116 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5117 e |= (upper >> 4) & 0x800; /* sign */
5118 ll = (mant >> 11) & ((1LL << 52) - 1);
5119#ifdef __arm__
5120 temp.l.upper = (e << 20) | (ll >> 32);
5121 temp.l.lower = ll;
5122#else
5123 temp.ll = ll | ((uint64_t)e << 52);
5124#endif
5125 return temp.d;
5126}
5127
5128#else
5129
5130void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5131{
5132 CPU86_LDoubleU temp;
5133
5134 temp.d = f;
5135 *pmant = temp.l.lower;
5136 *pexp = temp.l.upper;
5137}
5138
5139CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5140{
5141 CPU86_LDoubleU temp;
5142
5143 temp.l.upper = upper;
5144 temp.l.lower = mant;
5145 return temp.d;
5146}
5147#endif
5148
5149#ifdef TARGET_X86_64
5150
5151//#define DEBUG_MULDIV
5152
5153static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5154{
5155 *plow += a;
5156 /* carry test */
5157 if (*plow < a)
5158 (*phigh)++;
5159 *phigh += b;
5160}
5161
5162static void neg128(uint64_t *plow, uint64_t *phigh)
5163{
5164 *plow = ~ *plow;
5165 *phigh = ~ *phigh;
5166 add128(plow, phigh, 1, 0);
5167}
5168
5169/* return TRUE if overflow */
5170static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5171{
5172 uint64_t q, r, a1, a0;
5173 int i, qb, ab;
5174
5175 a0 = *plow;
5176 a1 = *phigh;
5177 if (a1 == 0) {
5178 q = a0 / b;
5179 r = a0 % b;
5180 *plow = q;
5181 *phigh = r;
5182 } else {
5183 if (a1 >= b)
5184 return 1;
5185 /* XXX: use a better algorithm */
5186 for(i = 0; i < 64; i++) {
5187 ab = a1 >> 63;
5188 a1 = (a1 << 1) | (a0 >> 63);
5189 if (ab || a1 >= b) {
5190 a1 -= b;
5191 qb = 1;
5192 } else {
5193 qb = 0;
5194 }
5195 a0 = (a0 << 1) | qb;
5196 }
5197#if defined(DEBUG_MULDIV)
5198 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5199 *phigh, *plow, b, a0, a1);
5200#endif
5201 *plow = a0;
5202 *phigh = a1;
5203 }
5204 return 0;
5205}
5206
5207/* return TRUE if overflow */
5208static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5209{
5210 int sa, sb;
5211 sa = ((int64_t)*phigh < 0);
5212 if (sa)
5213 neg128(plow, phigh);
5214 sb = (b < 0);
5215 if (sb)
5216 b = -b;
5217 if (div64(plow, phigh, b) != 0)
5218 return 1;
5219 if (sa ^ sb) {
5220 if (*plow > (1ULL << 63))
5221 return 1;
5222 *plow = - *plow;
5223 } else {
5224 if (*plow >= (1ULL << 63))
5225 return 1;
5226 }
5227 if (sa)
5228 *phigh = - *phigh;
5229 return 0;
5230}
5231
5232void helper_mulq_EAX_T0(target_ulong t0)
5233{
5234 uint64_t r0, r1;
5235
5236 mulu64(&r0, &r1, EAX, t0);
5237 EAX = r0;
5238 EDX = r1;
5239 CC_DST = r0;
5240 CC_SRC = r1;
5241}
5242
5243void helper_imulq_EAX_T0(target_ulong t0)
5244{
5245 uint64_t r0, r1;
5246
5247 muls64(&r0, &r1, EAX, t0);
5248 EAX = r0;
5249 EDX = r1;
5250 CC_DST = r0;
5251 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5252}
5253
5254target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5255{
5256 uint64_t r0, r1;
5257
5258 muls64(&r0, &r1, t0, t1);
5259 CC_DST = r0;
5260 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5261 return r0;
5262}
5263
5264void helper_divq_EAX(target_ulong t0)
5265{
5266 uint64_t r0, r1;
5267 if (t0 == 0) {
5268 raise_exception(EXCP00_DIVZ);
5269 }
5270 r0 = EAX;
5271 r1 = EDX;
5272 if (div64(&r0, &r1, t0))
5273 raise_exception(EXCP00_DIVZ);
5274 EAX = r0;
5275 EDX = r1;
5276}
5277
5278void helper_idivq_EAX(target_ulong t0)
5279{
5280 uint64_t r0, r1;
5281 if (t0 == 0) {
5282 raise_exception(EXCP00_DIVZ);
5283 }
5284 r0 = EAX;
5285 r1 = EDX;
5286 if (idiv64(&r0, &r1, t0))
5287 raise_exception(EXCP00_DIVZ);
5288 EAX = r0;
5289 EDX = r1;
5290}
5291#endif
5292
5293static void do_hlt(void)
5294{
5295 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5296 env->halted = 1;
5297 env->exception_index = EXCP_HLT;
5298 cpu_loop_exit();
5299}
5300
5301void helper_hlt(int next_eip_addend)
5302{
5303 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5304 EIP += next_eip_addend;
5305
5306 do_hlt();
5307}
5308
5309void helper_monitor(target_ulong ptr)
5310{
5311 if ((uint32_t)ECX != 0)
5312 raise_exception(EXCP0D_GPF);
5313 /* XXX: store address ? */
5314 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5315}
5316
5317void helper_mwait(int next_eip_addend)
5318{
5319 if ((uint32_t)ECX != 0)
5320 raise_exception(EXCP0D_GPF);
5321#ifdef VBOX
5322 helper_hlt(next_eip_addend);
5323#else
5324 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5325 EIP += next_eip_addend;
5326
5327 /* XXX: not complete but not completely erroneous */
5328 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5329 /* more than one CPU: do not sleep because another CPU may
5330 wake this one */
5331 } else {
5332 do_hlt();
5333 }
5334#endif
5335}
5336
5337void helper_debug(void)
5338{
5339 env->exception_index = EXCP_DEBUG;
5340 cpu_loop_exit();
5341}
5342
5343void helper_raise_interrupt(int intno, int next_eip_addend)
5344{
5345 raise_interrupt(intno, 1, 0, next_eip_addend);
5346}
5347
5348void helper_raise_exception(int exception_index)
5349{
5350 raise_exception(exception_index);
5351}
5352
5353void helper_cli(void)
5354{
5355 env->eflags &= ~IF_MASK;
5356}
5357
5358void helper_sti(void)
5359{
5360 env->eflags |= IF_MASK;
5361}
5362
5363#ifdef VBOX
5364void helper_cli_vme(void)
5365{
5366 env->eflags &= ~IF_MASK;
5367}
5368
5369void helper_sti_vme(void)
5370{
5371 /* First check, then change eflags according to the AMD manual */
5372 if (env->eflags & VIP_MASK) {
5373 raise_exception(EXCP0D_GPF);
5374 }
5375 env->eflags |= IF_MASK;
5376}
5377#endif
5378
5379#if 0
5380/* vm86plus instructions */
5381void helper_cli_vm(void)
5382{
5383 env->eflags &= ~VIF_MASK;
5384}
5385
5386void helper_sti_vm(void)
5387{
5388 env->eflags |= VIF_MASK;
5389 if (env->eflags & VIP_MASK) {
5390 raise_exception(EXCP0D_GPF);
5391 }
5392}
5393#endif
5394
5395void helper_set_inhibit_irq(void)
5396{
5397 env->hflags |= HF_INHIBIT_IRQ_MASK;
5398}
5399
5400void helper_reset_inhibit_irq(void)
5401{
5402 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5403}
5404
5405void helper_boundw(target_ulong a0, int v)
5406{
5407 int low, high;
5408 low = ldsw(a0);
5409 high = ldsw(a0 + 2);
5410 v = (int16_t)v;
5411 if (v < low || v > high) {
5412 raise_exception(EXCP05_BOUND);
5413 }
5414 FORCE_RET();
5415}
5416
5417void helper_boundl(target_ulong a0, int v)
5418{
5419 int low, high;
5420 low = ldl(a0);
5421 high = ldl(a0 + 4);
5422 if (v < low || v > high) {
5423 raise_exception(EXCP05_BOUND);
5424 }
5425 FORCE_RET();
5426}
5427
5428static float approx_rsqrt(float a)
5429{
5430 return 1.0 / sqrt(a);
5431}
5432
5433static float approx_rcp(float a)
5434{
5435 return 1.0 / a;
5436}
5437
5438#if !defined(CONFIG_USER_ONLY)
5439
5440#define MMUSUFFIX _mmu
5441
5442#define SHIFT 0
5443#include "softmmu_template.h"
5444
5445#define SHIFT 1
5446#include "softmmu_template.h"
5447
5448#define SHIFT 2
5449#include "softmmu_template.h"
5450
5451#define SHIFT 3
5452#include "softmmu_template.h"
5453
5454#endif
5455
5456#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5457/* This code assumes real physical address always fit into host CPU reg,
5458 which is wrong in general, but true for our current use cases. */
5459RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5460{
5461 return remR3PhysReadS8(addr);
5462}
5463RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5464{
5465 return remR3PhysReadU8(addr);
5466}
5467void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5468{
5469 remR3PhysWriteU8(addr, val);
5470}
5471RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5472{
5473 return remR3PhysReadS16(addr);
5474}
5475RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5476{
5477 return remR3PhysReadU16(addr);
5478}
5479void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5480{
5481 remR3PhysWriteU16(addr, val);
5482}
5483RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5484{
5485 return remR3PhysReadS32(addr);
5486}
5487RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5488{
5489 return remR3PhysReadU32(addr);
5490}
5491void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5492{
5493 remR3PhysWriteU32(addr, val);
5494}
5495uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5496{
5497 return remR3PhysReadU64(addr);
5498}
5499void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5500{
5501 remR3PhysWriteU64(addr, val);
5502}
5503#endif
5504
5505/* try to fill the TLB and return an exception if error. If retaddr is
5506 NULL, it means that the function was called in C code (i.e. not
5507 from generated code or from helper.c) */
5508/* XXX: fix it to restore all registers */
5509void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5510{
5511 TranslationBlock *tb;
5512 int ret;
5513 unsigned long pc;
5514 CPUX86State *saved_env;
5515
5516 /* XXX: hack to restore env in all cases, even if not called from
5517 generated code */
5518 saved_env = env;
5519 env = cpu_single_env;
5520
5521 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5522 if (ret) {
5523 if (retaddr) {
5524 /* now we have a real cpu fault */
5525 pc = (unsigned long)retaddr;
5526 tb = tb_find_pc(pc);
5527 if (tb) {
5528 /* the PC is inside the translated code. It means that we have
5529 a virtual CPU fault */
5530 cpu_restore_state(tb, env, pc, NULL);
5531 }
5532 }
5533 raise_exception_err(env->exception_index, env->error_code);
5534 }
5535 env = saved_env;
5536}
5537
5538#ifdef VBOX
5539
5540/**
5541 * Correctly computes the eflags.
5542 * @returns eflags.
5543 * @param env1 CPU environment.
5544 */
5545uint32_t raw_compute_eflags(CPUX86State *env1)
5546{
5547 CPUX86State *savedenv = env;
5548 uint32_t efl;
5549 env = env1;
5550 efl = compute_eflags();
5551 env = savedenv;
5552 return efl;
5553}
5554
5555/**
5556 * Reads byte from virtual address in guest memory area.
5557 * XXX: is it working for any addresses? swapped out pages?
5558 * @returns readed data byte.
5559 * @param env1 CPU environment.
5560 * @param pvAddr GC Virtual address.
5561 */
5562uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5563{
5564 CPUX86State *savedenv = env;
5565 uint8_t u8;
5566 env = env1;
5567 u8 = ldub_kernel(addr);
5568 env = savedenv;
5569 return u8;
5570}
5571
5572/**
5573 * Reads byte from virtual address in guest memory area.
5574 * XXX: is it working for any addresses? swapped out pages?
5575 * @returns readed data byte.
5576 * @param env1 CPU environment.
5577 * @param pvAddr GC Virtual address.
5578 */
5579uint16_t read_word(CPUX86State *env1, target_ulong addr)
5580{
5581 CPUX86State *savedenv = env;
5582 uint16_t u16;
5583 env = env1;
5584 u16 = lduw_kernel(addr);
5585 env = savedenv;
5586 return u16;
5587}
5588
5589/**
5590 * Reads byte from virtual address in guest memory area.
5591 * XXX: is it working for any addresses? swapped out pages?
5592 * @returns readed data byte.
5593 * @param env1 CPU environment.
5594 * @param pvAddr GC Virtual address.
5595 */
5596uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5597{
5598 CPUX86State *savedenv = env;
5599 uint32_t u32;
5600 env = env1;
5601 u32 = ldl_kernel(addr);
5602 env = savedenv;
5603 return u32;
5604}
5605
5606/**
5607 * Writes byte to virtual address in guest memory area.
5608 * XXX: is it working for any addresses? swapped out pages?
5609 * @returns readed data byte.
5610 * @param env1 CPU environment.
5611 * @param pvAddr GC Virtual address.
5612 * @param val byte value
5613 */
5614void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5615{
5616 CPUX86State *savedenv = env;
5617 env = env1;
5618 stb(addr, val);
5619 env = savedenv;
5620}
5621
5622void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5623{
5624 CPUX86State *savedenv = env;
5625 env = env1;
5626 stw(addr, val);
5627 env = savedenv;
5628}
5629
5630void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5631{
5632 CPUX86State *savedenv = env;
5633 env = env1;
5634 stl(addr, val);
5635 env = savedenv;
5636}
5637
5638/**
5639 * Correctly loads selector into segment register with updating internal
5640 * qemu data/caches.
5641 * @param env1 CPU environment.
5642 * @param seg_reg Segment register.
5643 * @param selector Selector to load.
5644 */
5645void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5646{
5647 CPUX86State *savedenv = env;
5648 jmp_buf old_buf;
5649
5650 env = env1;
5651
5652 if ( env->eflags & X86_EFL_VM
5653 || !(env->cr[0] & X86_CR0_PE))
5654 {
5655 load_seg_vm(seg_reg, selector);
5656
5657 env = savedenv;
5658
5659 /* Successful sync. */
5660 env1->segs[seg_reg].newselector = 0;
5661 }
5662 else
5663 {
5664 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5665 time critical - let's not do that */
5666#if 0
5667 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5668#endif
5669 if (setjmp(env1->jmp_env) == 0)
5670 {
5671 if (seg_reg == R_CS)
5672 {
5673 uint32_t e1, e2;
5674 e1 = e2 = 0;
5675 load_segment(&e1, &e2, selector);
5676 cpu_x86_load_seg_cache(env, R_CS, selector,
5677 get_seg_base(e1, e2),
5678 get_seg_limit(e1, e2),
5679 e2);
5680 }
5681 else
5682 tss_load_seg(seg_reg, selector);
5683 env = savedenv;
5684
5685 /* Successful sync. */
5686 env1->segs[seg_reg].newselector = 0;
5687 }
5688 else
5689 {
5690 env = savedenv;
5691
5692 /* Postpone sync until the guest uses the selector. */
5693 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5694 env1->segs[seg_reg].newselector = selector;
5695 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5696 env1->exception_index = -1;
5697 env1->error_code = 0;
5698 env1->old_exception = -1;
5699 }
5700#if 0
5701 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5702#endif
5703 }
5704
5705}
5706
5707DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5708{
5709 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5710}
5711
5712
5713int emulate_single_instr(CPUX86State *env1)
5714{
5715 TranslationBlock *tb;
5716 TranslationBlock *current;
5717 int flags;
5718 uint8_t *tc_ptr;
5719 target_ulong old_eip;
5720
5721 /* ensures env is loaded! */
5722 CPUX86State *savedenv = env;
5723 env = env1;
5724
5725 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5726
5727 current = env->current_tb;
5728 env->current_tb = NULL;
5729 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5730
5731 /*
5732 * Translate only one instruction.
5733 */
5734 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5735 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5736 env->segs[R_CS].base, flags, 0);
5737
5738 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5739
5740
5741 /* tb_link_phys: */
5742 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5743 tb->jmp_next[0] = NULL;
5744 tb->jmp_next[1] = NULL;
5745 Assert(tb->jmp_next[0] == NULL);
5746 Assert(tb->jmp_next[1] == NULL);
5747 if (tb->tb_next_offset[0] != 0xffff)
5748 tb_reset_jump(tb, 0);
5749 if (tb->tb_next_offset[1] != 0xffff)
5750 tb_reset_jump(tb, 1);
5751
5752 /*
5753 * Execute it using emulation
5754 */
5755 old_eip = env->eip;
5756 env->current_tb = tb;
5757
5758 /*
5759 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5760 * perhaps not a very safe hack
5761 */
5762 while(old_eip == env->eip)
5763 {
5764 tc_ptr = tb->tc_ptr;
5765
5766#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5767 int fake_ret;
5768 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5769#else
5770 tcg_qemu_tb_exec(tc_ptr);
5771#endif
5772 /*
5773 * Exit once we detect an external interrupt and interrupts are enabled
5774 */
5775 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5776 ( (env->eflags & IF_MASK) &&
5777 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5778 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5779 {
5780 break;
5781 }
5782 }
5783 env->current_tb = current;
5784
5785 tb_phys_invalidate(tb, -1);
5786 tb_free(tb);
5787/*
5788 Assert(tb->tb_next_offset[0] == 0xffff);
5789 Assert(tb->tb_next_offset[1] == 0xffff);
5790 Assert(tb->tb_next[0] == 0xffff);
5791 Assert(tb->tb_next[1] == 0xffff);
5792 Assert(tb->jmp_next[0] == NULL);
5793 Assert(tb->jmp_next[1] == NULL);
5794 Assert(tb->jmp_first == NULL); */
5795
5796 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5797
5798 /*
5799 * Execute the next instruction when we encounter instruction fusing.
5800 */
5801 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5802 {
5803 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5804 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5805 emulate_single_instr(env);
5806 }
5807
5808 env = savedenv;
5809 return 0;
5810}
5811
5812/**
5813 * Correctly loads a new ldtr selector.
5814 *
5815 * @param env1 CPU environment.
5816 * @param selector Selector to load.
5817 */
5818void sync_ldtr(CPUX86State *env1, int selector)
5819{
5820 CPUX86State *saved_env = env;
5821 if (setjmp(env1->jmp_env) == 0)
5822 {
5823 env = env1;
5824 helper_lldt(selector);
5825 env = saved_env;
5826 }
5827 else
5828 {
5829 env = saved_env;
5830#ifdef VBOX_STRICT
5831 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5832#endif
5833 }
5834}
5835
5836/**
5837 * Correctly loads a new tr selector.
5838 *
5839 * @param env1 CPU environment.
5840 * @param selector Selector to load.
5841 */
5842int sync_tr(CPUX86State *env1, int selector)
5843{
5844 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
5845 SegmentCache *dt;
5846 uint32_t e1, e2;
5847 int index, type, entry_limit;
5848 target_ulong ptr;
5849 CPUX86State *saved_env = env;
5850 env = env1;
5851
5852 selector &= 0xffff;
5853 if ((selector & 0xfffc) == 0) {
5854 /* NULL selector case: invalid TR */
5855 env->tr.base = 0;
5856 env->tr.limit = 0;
5857 env->tr.flags = 0;
5858 } else {
5859 if (selector & 0x4)
5860 goto l_failure;
5861 dt = &env->gdt;
5862 index = selector & ~7;
5863#ifdef TARGET_X86_64
5864 if (env->hflags & HF_LMA_MASK)
5865 entry_limit = 15;
5866 else
5867#endif
5868 entry_limit = 7;
5869 if ((index + entry_limit) > dt->limit)
5870 goto l_failure;
5871 ptr = dt->base + index;
5872 e1 = ldl_kernel(ptr);
5873 e2 = ldl_kernel(ptr + 4);
5874 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5875 if ((e2 & DESC_S_MASK) /*||
5876 (type != 1 && type != 9)*/)
5877 goto l_failure;
5878 if (!(e2 & DESC_P_MASK))
5879 goto l_failure;
5880#ifdef TARGET_X86_64
5881 if (env->hflags & HF_LMA_MASK) {
5882 uint32_t e3;
5883 e3 = ldl_kernel(ptr + 8);
5884 load_seg_cache_raw_dt(&env->tr, e1, e2);
5885 env->tr.base |= (target_ulong)e3 << 32;
5886 } else
5887#endif
5888 {
5889 load_seg_cache_raw_dt(&env->tr, e1, e2);
5890 }
5891 e2 |= DESC_TSS_BUSY_MASK;
5892 stl_kernel(ptr + 4, e2);
5893 }
5894 env->tr.selector = selector;
5895
5896 env = saved_env;
5897 return 0;
5898l_failure:
5899 AssertMsgFailed(("selector=%d\n", selector));
5900 return -1;
5901}
5902
5903
5904int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5905 uint32_t *esp_ptr, int dpl)
5906{
5907 int type, index, shift;
5908
5909 CPUX86State *savedenv = env;
5910 env = env1;
5911
5912 if (!(env->tr.flags & DESC_P_MASK))
5913 cpu_abort(env, "invalid tss");
5914 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5915 if ((type & 7) != 1)
5916 cpu_abort(env, "invalid tss type %d", type);
5917 shift = type >> 3;
5918 index = (dpl * 4 + 2) << shift;
5919 if (index + (4 << shift) - 1 > env->tr.limit)
5920 {
5921 env = savedenv;
5922 return 0;
5923 }
5924 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5925
5926 if (shift == 0) {
5927 *esp_ptr = lduw_kernel(env->tr.base + index);
5928 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5929 } else {
5930 *esp_ptr = ldl_kernel(env->tr.base + index);
5931 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5932 }
5933
5934 env = savedenv;
5935 return 1;
5936}
5937
5938//*****************************************************************************
5939// Needs to be at the bottom of the file (overriding macros)
5940
5941#ifndef VBOX
5942static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5943#else /* VBOX */
5944DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5945#endif /* VBOX */
5946{
5947 return *(CPU86_LDouble *)ptr;
5948}
5949
5950#ifndef VBOX
5951static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5952#else /* VBOX */
5953DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5954#endif /* VBOX */
5955{
5956 *(CPU86_LDouble *)ptr = f;
5957}
5958
5959#undef stw
5960#undef stl
5961#undef stq
5962#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5963#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5964#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5965#define data64 0
5966
5967//*****************************************************************************
5968void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5969{
5970 int fpus, fptag, i, nb_xmm_regs;
5971 CPU86_LDouble tmp;
5972 uint8_t *addr;
5973
5974 if (env->cpuid_features & CPUID_FXSR)
5975 {
5976 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5977 fptag = 0;
5978 for(i = 0; i < 8; i++) {
5979 fptag |= (env->fptags[i] << i);
5980 }
5981 stw(ptr, env->fpuc);
5982 stw(ptr + 2, fpus);
5983 stw(ptr + 4, fptag ^ 0xff);
5984
5985 addr = ptr + 0x20;
5986 for(i = 0;i < 8; i++) {
5987 tmp = ST(i);
5988 helper_fstt_raw(tmp, addr);
5989 addr += 16;
5990 }
5991
5992 if (env->cr[4] & CR4_OSFXSR_MASK) {
5993 /* XXX: finish it */
5994 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5995 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5996 nb_xmm_regs = 8 << data64;
5997 addr = ptr + 0xa0;
5998 for(i = 0; i < nb_xmm_regs; i++) {
5999#if __GNUC__ < 4
6000 stq(addr, env->xmm_regs[i].XMM_Q(0));
6001 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6002#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6003 stl(addr, env->xmm_regs[i].XMM_L(0));
6004 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6005 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6006 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6007#endif
6008 addr += 16;
6009 }
6010 }
6011 }
6012 else
6013 {
6014 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6015 int fptag;
6016
6017 fp->FCW = env->fpuc;
6018 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6019 fptag = 0;
6020 for (i=7; i>=0; i--) {
6021 fptag <<= 2;
6022 if (env->fptags[i]) {
6023 fptag |= 3;
6024 } else {
6025 /* the FPU automatically computes it */
6026 }
6027 }
6028 fp->FTW = fptag;
6029
6030 for(i = 0;i < 8; i++) {
6031 tmp = ST(i);
6032 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6033 }
6034 }
6035}
6036
6037//*****************************************************************************
6038#undef lduw
6039#undef ldl
6040#undef ldq
6041#define lduw(a) *(uint16_t *)(a)
6042#define ldl(a) *(uint32_t *)(a)
6043#define ldq(a) *(uint64_t *)(a)
6044//*****************************************************************************
6045void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6046{
6047 int i, fpus, fptag, nb_xmm_regs;
6048 CPU86_LDouble tmp;
6049 uint8_t *addr;
6050
6051 if (env->cpuid_features & CPUID_FXSR)
6052 {
6053 env->fpuc = lduw(ptr);
6054 fpus = lduw(ptr + 2);
6055 fptag = lduw(ptr + 4);
6056 env->fpstt = (fpus >> 11) & 7;
6057 env->fpus = fpus & ~0x3800;
6058 fptag ^= 0xff;
6059 for(i = 0;i < 8; i++) {
6060 env->fptags[i] = ((fptag >> i) & 1);
6061 }
6062
6063 addr = ptr + 0x20;
6064 for(i = 0;i < 8; i++) {
6065 tmp = helper_fldt_raw(addr);
6066 ST(i) = tmp;
6067 addr += 16;
6068 }
6069
6070 if (env->cr[4] & CR4_OSFXSR_MASK) {
6071 /* XXX: finish it, endianness */
6072 env->mxcsr = ldl(ptr + 0x18);
6073 //ldl(ptr + 0x1c);
6074 nb_xmm_regs = 8 << data64;
6075 addr = ptr + 0xa0;
6076 for(i = 0; i < nb_xmm_regs; i++) {
6077#if HC_ARCH_BITS == 32
6078 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6079 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6080 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6081 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6082 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6083#else
6084 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6085 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6086#endif
6087 addr += 16;
6088 }
6089 }
6090 }
6091 else
6092 {
6093 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6094 int fptag, j;
6095
6096 env->fpuc = fp->FCW;
6097 env->fpstt = (fp->FSW >> 11) & 7;
6098 env->fpus = fp->FSW & ~0x3800;
6099 fptag = fp->FTW;
6100 for(i = 0;i < 8; i++) {
6101 env->fptags[i] = ((fptag & 3) == 3);
6102 fptag >>= 2;
6103 }
6104 j = env->fpstt;
6105 for(i = 0;i < 8; i++) {
6106 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6107 ST(i) = tmp;
6108 }
6109 }
6110}
6111//*****************************************************************************
6112//*****************************************************************************
6113
6114#endif /* VBOX */
6115
6116/* Secure Virtual Machine helpers */
6117
6118#if defined(CONFIG_USER_ONLY)
6119
6120void helper_vmrun(int aflag, int next_eip_addend)
6121{
6122}
6123void helper_vmmcall(void)
6124{
6125}
6126void helper_vmload(int aflag)
6127{
6128}
6129void helper_vmsave(int aflag)
6130{
6131}
6132void helper_stgi(void)
6133{
6134}
6135void helper_clgi(void)
6136{
6137}
6138void helper_skinit(void)
6139{
6140}
6141void helper_invlpga(int aflag)
6142{
6143}
6144void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6145{
6146}
6147void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6148{
6149}
6150
6151void helper_svm_check_io(uint32_t port, uint32_t param,
6152 uint32_t next_eip_addend)
6153{
6154}
6155#else
6156
6157#ifndef VBOX
6158static inline void svm_save_seg(target_phys_addr_t addr,
6159#else /* VBOX */
6160DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6161#endif /* VBOX */
6162 const SegmentCache *sc)
6163{
6164 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6165 sc->selector);
6166 stq_phys(addr + offsetof(struct vmcb_seg, base),
6167 sc->base);
6168 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6169 sc->limit);
6170 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6171 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6172}
6173
6174#ifndef VBOX
6175static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6176#else /* VBOX */
6177DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6178#endif /* VBOX */
6179{
6180 unsigned int flags;
6181
6182 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6183 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6184 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6185 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6186 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6187}
6188
6189#ifndef VBOX
6190static inline void svm_load_seg_cache(target_phys_addr_t addr,
6191#else /* VBOX */
6192DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6193#endif /* VBOX */
6194 CPUState *env, int seg_reg)
6195{
6196 SegmentCache sc1, *sc = &sc1;
6197 svm_load_seg(addr, sc);
6198 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6199 sc->base, sc->limit, sc->flags);
6200}
6201
6202void helper_vmrun(int aflag, int next_eip_addend)
6203{
6204 target_ulong addr;
6205 uint32_t event_inj;
6206 uint32_t int_ctl;
6207
6208 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6209
6210 if (aflag == 2)
6211 addr = EAX;
6212 else
6213 addr = (uint32_t)EAX;
6214
6215 if (loglevel & CPU_LOG_TB_IN_ASM)
6216 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6217
6218 env->vm_vmcb = addr;
6219
6220 /* save the current CPU state in the hsave page */
6221 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6222 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6223
6224 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6225 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6226
6227 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6228 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6229 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6230 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6231 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6232 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6233
6234 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6235 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6236
6237 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6238 &env->segs[R_ES]);
6239 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6240 &env->segs[R_CS]);
6241 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6242 &env->segs[R_SS]);
6243 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6244 &env->segs[R_DS]);
6245
6246 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6247 EIP + next_eip_addend);
6248 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6249 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6250
6251 /* load the interception bitmaps so we do not need to access the
6252 vmcb in svm mode */
6253 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6254 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6255 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6256 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6257 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6258 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6259
6260 /* enable intercepts */
6261 env->hflags |= HF_SVMI_MASK;
6262
6263 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6264
6265 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6266 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6267
6268 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6269 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6270
6271 /* clear exit_info_2 so we behave like the real hardware */
6272 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6273
6274 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6275 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6276 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6277 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6278 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6279 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6280 if (int_ctl & V_INTR_MASKING_MASK) {
6281 env->v_tpr = int_ctl & V_TPR_MASK;
6282 env->hflags2 |= HF2_VINTR_MASK;
6283 if (env->eflags & IF_MASK)
6284 env->hflags2 |= HF2_HIF_MASK;
6285 }
6286
6287 cpu_load_efer(env,
6288 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6289 env->eflags = 0;
6290 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6291 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6292 CC_OP = CC_OP_EFLAGS;
6293
6294 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6295 env, R_ES);
6296 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6297 env, R_CS);
6298 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6299 env, R_SS);
6300 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6301 env, R_DS);
6302
6303 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6304 env->eip = EIP;
6305 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6306 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6307 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6308 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6309 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6310
6311 /* FIXME: guest state consistency checks */
6312
6313 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6314 case TLB_CONTROL_DO_NOTHING:
6315 break;
6316 case TLB_CONTROL_FLUSH_ALL_ASID:
6317 /* FIXME: this is not 100% correct but should work for now */
6318 tlb_flush(env, 1);
6319 break;
6320 }
6321
6322 env->hflags2 |= HF2_GIF_MASK;
6323
6324 if (int_ctl & V_IRQ_MASK) {
6325 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6326 }
6327
6328 /* maybe we need to inject an event */
6329 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6330 if (event_inj & SVM_EVTINJ_VALID) {
6331 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6332 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6333 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6334 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6335
6336 if (loglevel & CPU_LOG_TB_IN_ASM)
6337 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6338 /* FIXME: need to implement valid_err */
6339 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6340 case SVM_EVTINJ_TYPE_INTR:
6341 env->exception_index = vector;
6342 env->error_code = event_inj_err;
6343 env->exception_is_int = 0;
6344 env->exception_next_eip = -1;
6345 if (loglevel & CPU_LOG_TB_IN_ASM)
6346 fprintf(logfile, "INTR");
6347 /* XXX: is it always correct ? */
6348 do_interrupt(vector, 0, 0, 0, 1);
6349 break;
6350 case SVM_EVTINJ_TYPE_NMI:
6351 env->exception_index = EXCP02_NMI;
6352 env->error_code = event_inj_err;
6353 env->exception_is_int = 0;
6354 env->exception_next_eip = EIP;
6355 if (loglevel & CPU_LOG_TB_IN_ASM)
6356 fprintf(logfile, "NMI");
6357 cpu_loop_exit();
6358 break;
6359 case SVM_EVTINJ_TYPE_EXEPT:
6360 env->exception_index = vector;
6361 env->error_code = event_inj_err;
6362 env->exception_is_int = 0;
6363 env->exception_next_eip = -1;
6364 if (loglevel & CPU_LOG_TB_IN_ASM)
6365 fprintf(logfile, "EXEPT");
6366 cpu_loop_exit();
6367 break;
6368 case SVM_EVTINJ_TYPE_SOFT:
6369 env->exception_index = vector;
6370 env->error_code = event_inj_err;
6371 env->exception_is_int = 1;
6372 env->exception_next_eip = EIP;
6373 if (loglevel & CPU_LOG_TB_IN_ASM)
6374 fprintf(logfile, "SOFT");
6375 cpu_loop_exit();
6376 break;
6377 }
6378 if (loglevel & CPU_LOG_TB_IN_ASM)
6379 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6380 }
6381}
6382
6383void helper_vmmcall(void)
6384{
6385 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6386 raise_exception(EXCP06_ILLOP);
6387}
6388
6389void helper_vmload(int aflag)
6390{
6391 target_ulong addr;
6392 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6393
6394 if (aflag == 2)
6395 addr = EAX;
6396 else
6397 addr = (uint32_t)EAX;
6398
6399 if (loglevel & CPU_LOG_TB_IN_ASM)
6400 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6401 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6402 env->segs[R_FS].base);
6403
6404 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6405 env, R_FS);
6406 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6407 env, R_GS);
6408 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6409 &env->tr);
6410 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6411 &env->ldt);
6412
6413#ifdef TARGET_X86_64
6414 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6415 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6416 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6417 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6418#endif
6419 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6420 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6421 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6422 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6423}
6424
6425void helper_vmsave(int aflag)
6426{
6427 target_ulong addr;
6428 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6429
6430 if (aflag == 2)
6431 addr = EAX;
6432 else
6433 addr = (uint32_t)EAX;
6434
6435 if (loglevel & CPU_LOG_TB_IN_ASM)
6436 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6437 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6438 env->segs[R_FS].base);
6439
6440 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6441 &env->segs[R_FS]);
6442 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6443 &env->segs[R_GS]);
6444 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6445 &env->tr);
6446 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6447 &env->ldt);
6448
6449#ifdef TARGET_X86_64
6450 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6451 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6452 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6453 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6454#endif
6455 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6456 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6457 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6458 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6459}
6460
6461void helper_stgi(void)
6462{
6463 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6464 env->hflags2 |= HF2_GIF_MASK;
6465}
6466
6467void helper_clgi(void)
6468{
6469 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6470 env->hflags2 &= ~HF2_GIF_MASK;
6471}
6472
6473void helper_skinit(void)
6474{
6475 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6476 /* XXX: not implemented */
6477 raise_exception(EXCP06_ILLOP);
6478}
6479
6480void helper_invlpga(int aflag)
6481{
6482 target_ulong addr;
6483 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6484
6485 if (aflag == 2)
6486 addr = EAX;
6487 else
6488 addr = (uint32_t)EAX;
6489
6490 /* XXX: could use the ASID to see if it is needed to do the
6491 flush */
6492 tlb_flush_page(env, addr);
6493}
6494
6495void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6496{
6497 if (likely(!(env->hflags & HF_SVMI_MASK)))
6498 return;
6499#ifndef VBOX
6500 switch(type) {
6501#ifndef VBOX
6502 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6503#else
6504 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6505 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6506 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6507#endif
6508 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6509 helper_vmexit(type, param);
6510 }
6511 break;
6512#ifndef VBOX
6513 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6514#else
6515 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6516 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6517 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6518#endif
6519 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6520 helper_vmexit(type, param);
6521 }
6522 break;
6523 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6524 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6525 helper_vmexit(type, param);
6526 }
6527 break;
6528 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6529 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6530 helper_vmexit(type, param);
6531 }
6532 break;
6533 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6534 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6535 helper_vmexit(type, param);
6536 }
6537 break;
6538 case SVM_EXIT_MSR:
6539 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6540 /* FIXME: this should be read in at vmrun (faster this way?) */
6541 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6542 uint32_t t0, t1;
6543 switch((uint32_t)ECX) {
6544 case 0 ... 0x1fff:
6545 t0 = (ECX * 2) % 8;
6546 t1 = ECX / 8;
6547 break;
6548 case 0xc0000000 ... 0xc0001fff:
6549 t0 = (8192 + ECX - 0xc0000000) * 2;
6550 t1 = (t0 / 8);
6551 t0 %= 8;
6552 break;
6553 case 0xc0010000 ... 0xc0011fff:
6554 t0 = (16384 + ECX - 0xc0010000) * 2;
6555 t1 = (t0 / 8);
6556 t0 %= 8;
6557 break;
6558 default:
6559 helper_vmexit(type, param);
6560 t0 = 0;
6561 t1 = 0;
6562 break;
6563 }
6564 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6565 helper_vmexit(type, param);
6566 }
6567 break;
6568 default:
6569 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6570 helper_vmexit(type, param);
6571 }
6572 break;
6573 }
6574#else
6575 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6576#endif
6577}
6578
6579void helper_svm_check_io(uint32_t port, uint32_t param,
6580 uint32_t next_eip_addend)
6581{
6582 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6583 /* FIXME: this should be read in at vmrun (faster this way?) */
6584 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6585 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6586 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6587 /* next EIP */
6588 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6589 env->eip + next_eip_addend);
6590 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6591 }
6592 }
6593}
6594
6595/* Note: currently only 32 bits of exit_code are used */
6596void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6597{
6598 uint32_t int_ctl;
6599
6600 if (loglevel & CPU_LOG_TB_IN_ASM)
6601 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6602 exit_code, exit_info_1,
6603 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6604 EIP);
6605
6606 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6607 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6608 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6609 } else {
6610 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6611 }
6612
6613 /* Save the VM state in the vmcb */
6614 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6615 &env->segs[R_ES]);
6616 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6617 &env->segs[R_CS]);
6618 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6619 &env->segs[R_SS]);
6620 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6621 &env->segs[R_DS]);
6622
6623 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6624 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6625
6626 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6627 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6628
6629 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6630 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6631 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6632 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6633 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6634
6635 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6636 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6637 int_ctl |= env->v_tpr & V_TPR_MASK;
6638 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6639 int_ctl |= V_IRQ_MASK;
6640 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6641
6642 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6643 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6644 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6645 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6646 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6647 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6648 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6649
6650 /* Reload the host state from vm_hsave */
6651 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6652 env->hflags &= ~HF_SVMI_MASK;
6653 env->intercept = 0;
6654 env->intercept_exceptions = 0;
6655 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6656 env->tsc_offset = 0;
6657
6658 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6659 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6660
6661 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6662 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6663
6664 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6665 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6666 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6667 /* we need to set the efer after the crs so the hidden flags get
6668 set properly */
6669 cpu_load_efer(env,
6670 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6671 env->eflags = 0;
6672 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6673 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6674 CC_OP = CC_OP_EFLAGS;
6675
6676 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6677 env, R_ES);
6678 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6679 env, R_CS);
6680 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6681 env, R_SS);
6682 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6683 env, R_DS);
6684
6685 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6686 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6687 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6688
6689 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6690 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6691
6692 /* other setups */
6693 cpu_x86_set_cpl(env, 0);
6694 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6695 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6696
6697 env->hflags2 &= ~HF2_GIF_MASK;
6698 /* FIXME: Resets the current ASID register to zero (host ASID). */
6699
6700 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6701
6702 /* Clears the TSC_OFFSET inside the processor. */
6703
6704 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6705 from the page table indicated the host's CR3. If the PDPEs contain
6706 illegal state, the processor causes a shutdown. */
6707
6708 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6709 env->cr[0] |= CR0_PE_MASK;
6710 env->eflags &= ~VM_MASK;
6711
6712 /* Disables all breakpoints in the host DR7 register. */
6713
6714 /* Checks the reloaded host state for consistency. */
6715
6716 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6717 host's code segment or non-canonical (in the case of long mode), a
6718 #GP fault is delivered inside the host.) */
6719
6720 /* remove any pending exception */
6721 env->exception_index = -1;
6722 env->error_code = 0;
6723 env->old_exception = -1;
6724
6725 cpu_loop_exit();
6726}
6727
6728#endif
6729
6730/* MMX/SSE */
6731/* XXX: optimize by storing fptt and fptags in the static cpu state */
6732void helper_enter_mmx(void)
6733{
6734 env->fpstt = 0;
6735 *(uint32_t *)(env->fptags) = 0;
6736 *(uint32_t *)(env->fptags + 4) = 0;
6737}
6738
6739void helper_emms(void)
6740{
6741 /* set to empty state */
6742 *(uint32_t *)(env->fptags) = 0x01010101;
6743 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6744}
6745
6746/* XXX: suppress */
6747void helper_movq(uint64_t *d, uint64_t *s)
6748{
6749 *d = *s;
6750}
6751
6752#define SHIFT 0
6753#include "ops_sse.h"
6754
6755#define SHIFT 1
6756#include "ops_sse.h"
6757
6758#define SHIFT 0
6759#include "helper_template.h"
6760#undef SHIFT
6761
6762#define SHIFT 1
6763#include "helper_template.h"
6764#undef SHIFT
6765
6766#define SHIFT 2
6767#include "helper_template.h"
6768#undef SHIFT
6769
6770#ifdef TARGET_X86_64
6771
6772#define SHIFT 3
6773#include "helper_template.h"
6774#undef SHIFT
6775
6776#endif
6777
6778/* bit operations */
6779target_ulong helper_bsf(target_ulong t0)
6780{
6781 int count;
6782 target_ulong res;
6783
6784 res = t0;
6785 count = 0;
6786 while ((res & 1) == 0) {
6787 count++;
6788 res >>= 1;
6789 }
6790 return count;
6791}
6792
6793target_ulong helper_bsr(target_ulong t0)
6794{
6795 int count;
6796 target_ulong res, mask;
6797
6798 res = t0;
6799 count = TARGET_LONG_BITS - 1;
6800 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6801 while ((res & mask) == 0) {
6802 count--;
6803 res <<= 1;
6804 }
6805 return count;
6806}
6807
6808
6809static int compute_all_eflags(void)
6810{
6811 return CC_SRC;
6812}
6813
6814static int compute_c_eflags(void)
6815{
6816 return CC_SRC & CC_C;
6817}
6818
6819#ifndef VBOX
6820CCTable cc_table[CC_OP_NB] = {
6821 [CC_OP_DYNAMIC] = { /* should never happen */ },
6822
6823 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6824
6825 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6826 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6827 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6828
6829 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6830 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6831 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6832
6833 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6834 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6835 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6836
6837 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6838 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6839 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6840
6841 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6842 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6843 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6844
6845 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6846 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6847 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6848
6849 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6850 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6851 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6852
6853 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6854 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6855 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6856
6857 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6858 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6859 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6860
6861 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6862 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6863 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6864
6865#ifdef TARGET_X86_64
6866 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6867
6868 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6869
6870 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6871
6872 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6873
6874 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6875
6876 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6877
6878 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6879
6880 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6881
6882 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6883
6884 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6885#endif
6886};
6887#else /* VBOX */
6888/* Sync carefully with cpu.h */
6889CCTable cc_table[CC_OP_NB] = {
6890 /* CC_OP_DYNAMIC */ { 0, 0 },
6891
6892 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6893
6894 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6895 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6896 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6897#ifdef TARGET_X86_64
6898 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6899#else
6900 /* CC_OP_MULQ */ { 0, 0 },
6901#endif
6902
6903 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6904 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6905 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6906#ifdef TARGET_X86_64
6907 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6908#else
6909 /* CC_OP_ADDQ */ { 0, 0 },
6910#endif
6911
6912 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6913 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6914 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6915#ifdef TARGET_X86_64
6916 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6917#else
6918 /* CC_OP_ADCQ */ { 0, 0 },
6919#endif
6920
6921 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6922 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6923 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6924#ifdef TARGET_X86_64
6925 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6926#else
6927 /* CC_OP_SUBQ */ { 0, 0 },
6928#endif
6929
6930 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6931 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6932 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6933#ifdef TARGET_X86_64
6934 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6935#else
6936 /* CC_OP_SBBQ */ { 0, 0 },
6937#endif
6938
6939 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6940 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6941 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6942#ifdef TARGET_X86_64
6943 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6944#else
6945 /* CC_OP_LOGICQ */ { 0, 0 },
6946#endif
6947
6948 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6949 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6950 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6951#ifdef TARGET_X86_64
6952 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6953#else
6954 /* CC_OP_INCQ */ { 0, 0 },
6955#endif
6956
6957 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6958 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6959 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6960#ifdef TARGET_X86_64
6961 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6962#else
6963 /* CC_OP_DECQ */ { 0, 0 },
6964#endif
6965
6966 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6967 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6968 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6969#ifdef TARGET_X86_64
6970 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6971#else
6972 /* CC_OP_SHLQ */ { 0, 0 },
6973#endif
6974
6975 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6976 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6977 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6978#ifdef TARGET_X86_64
6979 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6980#else
6981 /* CC_OP_SARQ */ { 0, 0 },
6982#endif
6983};
6984#endif /* VBOX */
6985
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette