VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 34079

Last change on this file since 34079 was 33971, checked in by vboxsync, 14 years ago

recompiler: type fix

  • Property svn:eol-style set to native
File size: 194.6 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "host-utils.h"
33
34#ifdef VBOX
35#include "qemu-common.h"
36#include <math.h>
37#include "tcg.h"
38#endif
39//#define DEBUG_PCALL
40
41#if 0
42#define raise_exception_err(a, b)\
43do {\
44 if (logfile)\
45 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
46 (raise_exception_err)(a, b);\
47} while (0)
48#endif
49
50const uint8_t parity_table[256] = {
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
82 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
83};
84
85/* modulo 17 table */
86const uint8_t rclw_table[32] = {
87 0, 1, 2, 3, 4, 5, 6, 7,
88 8, 9,10,11,12,13,14,15,
89 16, 0, 1, 2, 3, 4, 5, 6,
90 7, 8, 9,10,11,12,13,14,
91};
92
93/* modulo 9 table */
94const uint8_t rclb_table[32] = {
95 0, 1, 2, 3, 4, 5, 6, 7,
96 8, 0, 1, 2, 3, 4, 5, 6,
97 7, 8, 0, 1, 2, 3, 4, 5,
98 6, 7, 8, 0, 1, 2, 3, 4,
99};
100
101const CPU86_LDouble f15rk[7] =
102{
103 0.00000000000000000000L,
104 1.00000000000000000000L,
105 3.14159265358979323851L, /*pi*/
106 0.30102999566398119523L, /*lg2*/
107 0.69314718055994530943L, /*ln2*/
108 1.44269504088896340739L, /*l2e*/
109 3.32192809488736234781L, /*l2t*/
110};
111
112/* broken thread support */
113
114spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
115
116void helper_lock(void)
117{
118 spin_lock(&global_cpu_lock);
119}
120
121void helper_unlock(void)
122{
123 spin_unlock(&global_cpu_lock);
124}
125
126void helper_write_eflags(target_ulong t0, uint32_t update_mask)
127{
128 load_eflags(t0, update_mask);
129}
130
131target_ulong helper_read_eflags(void)
132{
133 uint32_t eflags;
134 eflags = cc_table[CC_OP].compute_all();
135 eflags |= (DF & DF_MASK);
136 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
137 return eflags;
138}
139
140#ifdef VBOX
141void helper_write_eflags_vme(target_ulong t0)
142{
143 unsigned int new_eflags = t0;
144
145 assert(env->eflags & (1<<VM_SHIFT));
146
147 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
148 /* if TF will be set -> #GP */
149 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
150 || (new_eflags & TF_MASK)) {
151 raise_exception(EXCP0D_GPF);
152 } else {
153 load_eflags(new_eflags,
154 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
155
156 if (new_eflags & IF_MASK) {
157 env->eflags |= VIF_MASK;
158 } else {
159 env->eflags &= ~VIF_MASK;
160 }
161 }
162}
163
164target_ulong helper_read_eflags_vme(void)
165{
166 uint32_t eflags;
167 eflags = cc_table[CC_OP].compute_all();
168 eflags |= (DF & DF_MASK);
169 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
170 if (env->eflags & VIF_MASK)
171 eflags |= IF_MASK;
172 else
173 eflags &= ~IF_MASK;
174
175 /* According to AMD manual, should be read with IOPL == 3 */
176 eflags |= (3 << IOPL_SHIFT);
177
178 /* We only use helper_read_eflags_vme() in 16-bits mode */
179 return eflags & 0xffff;
180}
181
182void helper_dump_state()
183{
184 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
185 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
186 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
187 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
188 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
189 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
190 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
191}
192#endif
193
194/* return non zero if error */
195#ifndef VBOX
196static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
197#else /* VBOX */
198DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#endif /* VBOX */
200 int selector)
201{
202 SegmentCache *dt;
203 int index;
204 target_ulong ptr;
205
206#ifdef VBOX
207 /* Trying to load a selector with CPL=1? */
208 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
209 {
210 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
211 selector = selector & 0xfffc;
212 }
213#endif
214
215 if (selector & 0x4)
216 dt = &env->ldt;
217 else
218 dt = &env->gdt;
219 index = selector & ~7;
220 if ((index + 7) > dt->limit)
221 return -1;
222 ptr = dt->base + index;
223 *e1_ptr = ldl_kernel(ptr);
224 *e2_ptr = ldl_kernel(ptr + 4);
225 return 0;
226}
227
228#ifndef VBOX
229static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
230#else /* VBOX */
231DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
232#endif /* VBOX */
233{
234 unsigned int limit;
235 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
236 if (e2 & DESC_G_MASK)
237 limit = (limit << 12) | 0xfff;
238 return limit;
239}
240
241#ifndef VBOX
242static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
243#else /* VBOX */
244DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
245#endif /* VBOX */
246{
247 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
248}
249
250#ifndef VBOX
251static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
252#else /* VBOX */
253DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#endif /* VBOX */
255{
256 sc->base = get_seg_base(e1, e2);
257 sc->limit = get_seg_limit(e1, e2);
258 sc->flags = e2;
259}
260
261/* init the segment cache in vm86 mode. */
262#ifndef VBOX
263static inline void load_seg_vm(int seg, int selector)
264#else /* VBOX */
265DECLINLINE(void) load_seg_vm(int seg, int selector)
266#endif /* VBOX */
267{
268 selector &= 0xffff;
269#ifdef VBOX
270 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
271 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
272 flags |= (3 << DESC_DPL_SHIFT);
273
274 cpu_x86_load_seg_cache(env, seg, selector,
275 (selector << 4), 0xffff, flags);
276#else
277 cpu_x86_load_seg_cache(env, seg, selector,
278 (selector << 4), 0xffff, 0);
279#endif
280}
281
282#ifndef VBOX
283static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
284#else /* VBOX */
285DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
286#endif /* VBOX */
287 uint32_t *esp_ptr, int dpl)
288{
289#ifndef VBOX
290 int type, index, shift;
291#else
292 unsigned int type, index, shift;
293#endif
294
295#if 0
296 {
297 int i;
298 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
299 for(i=0;i<env->tr.limit;i++) {
300 printf("%02x ", env->tr.base[i]);
301 if ((i & 7) == 7) printf("\n");
302 }
303 printf("\n");
304 }
305#endif
306
307 if (!(env->tr.flags & DESC_P_MASK))
308 cpu_abort(env, "invalid tss");
309 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
310 if ((type & 7) != 1)
311 cpu_abort(env, "invalid tss type");
312 shift = type >> 3;
313 index = (dpl * 4 + 2) << shift;
314 if (index + (4 << shift) - 1 > env->tr.limit)
315 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
316 if (shift == 0) {
317 *esp_ptr = lduw_kernel(env->tr.base + index);
318 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
319 } else {
320 *esp_ptr = ldl_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
322 }
323}
324
325/* XXX: merge with load_seg() */
326static void tss_load_seg(int seg_reg, int selector)
327{
328 uint32_t e1, e2;
329 int rpl, dpl, cpl;
330
331#ifdef VBOX
332 e1 = e2 = 0;
333 cpl = env->hflags & HF_CPL_MASK;
334 /* Trying to load a selector with CPL=1? */
335 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
336 {
337 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
338 selector = selector & 0xfffc;
339 }
340#endif
341
342 if ((selector & 0xfffc) != 0) {
343 if (load_segment(&e1, &e2, selector) != 0)
344 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
345 if (!(e2 & DESC_S_MASK))
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 rpl = selector & 3;
348 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
349 cpl = env->hflags & HF_CPL_MASK;
350 if (seg_reg == R_CS) {
351 if (!(e2 & DESC_CS_MASK))
352 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
353 /* XXX: is it correct ? */
354 if (dpl != rpl)
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 if ((e2 & DESC_C_MASK) && dpl > rpl)
357 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
358 } else if (seg_reg == R_SS) {
359 /* SS must be writable data */
360 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
361 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
362 if (dpl != cpl || dpl != rpl)
363 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
364 } else {
365 /* not readable code */
366 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
367 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
368 /* if data or non conforming code, checks the rights */
369 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
370 if (dpl < cpl || dpl < rpl)
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372 }
373 }
374 if (!(e2 & DESC_P_MASK))
375 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
376 cpu_x86_load_seg_cache(env, seg_reg, selector,
377 get_seg_base(e1, e2),
378 get_seg_limit(e1, e2),
379 e2);
380 } else {
381 if (seg_reg == R_SS || seg_reg == R_CS)
382 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
383#ifdef VBOX
384#if 0
385 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
386 cpu_x86_load_seg_cache(env, seg_reg, selector,
387 0, 0, 0);
388#endif
389#endif
390 }
391}
392
393#define SWITCH_TSS_JMP 0
394#define SWITCH_TSS_IRET 1
395#define SWITCH_TSS_CALL 2
396
397/* XXX: restore CPU state in registers (PowerPC case) */
398static void switch_tss(int tss_selector,
399 uint32_t e1, uint32_t e2, int source,
400 uint32_t next_eip)
401{
402 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
403 target_ulong tss_base;
404 uint32_t new_regs[8], new_segs[6];
405 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
406 uint32_t old_eflags, eflags_mask;
407 SegmentCache *dt;
408#ifndef VBOX
409 int index;
410#else
411 unsigned int index;
412#endif
413 target_ulong ptr;
414
415 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
416#ifdef DEBUG_PCALL
417 if (loglevel & CPU_LOG_PCALL)
418 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
419#endif
420
421#if defined(VBOX) && defined(DEBUG)
422 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
423#endif
424
425 /* if task gate, we read the TSS segment and we load it */
426 if (type == 5) {
427 if (!(e2 & DESC_P_MASK))
428 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
429 tss_selector = e1 >> 16;
430 if (tss_selector & 4)
431 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
432 if (load_segment(&e1, &e2, tss_selector) != 0)
433 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
434 if (e2 & DESC_S_MASK)
435 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
436 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
437 if ((type & 7) != 1)
438 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
439 }
440
441 if (!(e2 & DESC_P_MASK))
442 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
443
444 if (type & 8)
445 tss_limit_max = 103;
446 else
447 tss_limit_max = 43;
448 tss_limit = get_seg_limit(e1, e2);
449 tss_base = get_seg_base(e1, e2);
450 if ((tss_selector & 4) != 0 ||
451 tss_limit < tss_limit_max)
452 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
453 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
454 if (old_type & 8)
455 old_tss_limit_max = 103;
456 else
457 old_tss_limit_max = 43;
458
459 /* read all the registers from the new TSS */
460 if (type & 8) {
461 /* 32 bit */
462 new_cr3 = ldl_kernel(tss_base + 0x1c);
463 new_eip = ldl_kernel(tss_base + 0x20);
464 new_eflags = ldl_kernel(tss_base + 0x24);
465 for(i = 0; i < 8; i++)
466 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
467 for(i = 0; i < 6; i++)
468 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
469 new_ldt = lduw_kernel(tss_base + 0x60);
470 new_trap = ldl_kernel(tss_base + 0x64);
471 } else {
472 /* 16 bit */
473 new_cr3 = 0;
474 new_eip = lduw_kernel(tss_base + 0x0e);
475 new_eflags = lduw_kernel(tss_base + 0x10);
476 for(i = 0; i < 8; i++)
477 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
478 for(i = 0; i < 4; i++)
479 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
480 new_ldt = lduw_kernel(tss_base + 0x2a);
481 new_segs[R_FS] = 0;
482 new_segs[R_GS] = 0;
483 new_trap = 0;
484 }
485
486 /* NOTE: we must avoid memory exceptions during the task switch,
487 so we make dummy accesses before */
488 /* XXX: it can still fail in some cases, so a bigger hack is
489 necessary to valid the TLB after having done the accesses */
490
491 v1 = ldub_kernel(env->tr.base);
492 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
493 stb_kernel(env->tr.base, v1);
494 stb_kernel(env->tr.base + old_tss_limit_max, v2);
495
496 /* clear busy bit (it is restartable) */
497 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
498 target_ulong ptr;
499 uint32_t e2;
500 ptr = env->gdt.base + (env->tr.selector & ~7);
501 e2 = ldl_kernel(ptr + 4);
502 e2 &= ~DESC_TSS_BUSY_MASK;
503 stl_kernel(ptr + 4, e2);
504 }
505 old_eflags = compute_eflags();
506 if (source == SWITCH_TSS_IRET)
507 old_eflags &= ~NT_MASK;
508
509 /* save the current state in the old TSS */
510 if (type & 8) {
511 /* 32 bit */
512 stl_kernel(env->tr.base + 0x20, next_eip);
513 stl_kernel(env->tr.base + 0x24, old_eflags);
514 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
515 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
516 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
517 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
518 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
519 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
520 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
521 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
522 for(i = 0; i < 6; i++)
523 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
524#ifdef VBOX
525 /* Must store the ldt as it gets reloaded and might have been changed. */
526 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
527#endif
528#if defined(VBOX) && defined(DEBUG)
529 printf("TSS 32 bits switch\n");
530 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
531#endif
532 } else {
533 /* 16 bit */
534 stw_kernel(env->tr.base + 0x0e, next_eip);
535 stw_kernel(env->tr.base + 0x10, old_eflags);
536 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
537 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
538 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
539 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
540 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
541 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
542 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
543 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
544 for(i = 0; i < 4; i++)
545 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
546#ifdef VBOX
547 /* Must store the ldt as it gets reloaded and might have been changed. */
548 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
549#endif
550 }
551
552 /* now if an exception occurs, it will occurs in the next task
553 context */
554
555 if (source == SWITCH_TSS_CALL) {
556 stw_kernel(tss_base, env->tr.selector);
557 new_eflags |= NT_MASK;
558 }
559
560 /* set busy bit */
561 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
562 target_ulong ptr;
563 uint32_t e2;
564 ptr = env->gdt.base + (tss_selector & ~7);
565 e2 = ldl_kernel(ptr + 4);
566 e2 |= DESC_TSS_BUSY_MASK;
567 stl_kernel(ptr + 4, e2);
568 }
569
570 /* set the new CPU state */
571 /* from this point, any exception which occurs can give problems */
572 env->cr[0] |= CR0_TS_MASK;
573 env->hflags |= HF_TS_MASK;
574 env->tr.selector = tss_selector;
575 env->tr.base = tss_base;
576 env->tr.limit = tss_limit;
577 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
578
579 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
580 cpu_x86_update_cr3(env, new_cr3);
581 }
582
583 /* load all registers without an exception, then reload them with
584 possible exception */
585 env->eip = new_eip;
586 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
587 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
588 if (!(type & 8))
589 eflags_mask &= 0xffff;
590 load_eflags(new_eflags, eflags_mask);
591 /* XXX: what to do in 16 bit case ? */
592 EAX = new_regs[0];
593 ECX = new_regs[1];
594 EDX = new_regs[2];
595 EBX = new_regs[3];
596 ESP = new_regs[4];
597 EBP = new_regs[5];
598 ESI = new_regs[6];
599 EDI = new_regs[7];
600 if (new_eflags & VM_MASK) {
601 for(i = 0; i < 6; i++)
602 load_seg_vm(i, new_segs[i]);
603 /* in vm86, CPL is always 3 */
604 cpu_x86_set_cpl(env, 3);
605 } else {
606 /* CPL is set the RPL of CS */
607 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
608 /* first just selectors as the rest may trigger exceptions */
609 for(i = 0; i < 6; i++)
610 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
611 }
612
613 env->ldt.selector = new_ldt & ~4;
614 env->ldt.base = 0;
615 env->ldt.limit = 0;
616 env->ldt.flags = 0;
617
618 /* load the LDT */
619 if (new_ldt & 4)
620 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
621
622 if ((new_ldt & 0xfffc) != 0) {
623 dt = &env->gdt;
624 index = new_ldt & ~7;
625 if ((index + 7) > dt->limit)
626 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
627 ptr = dt->base + index;
628 e1 = ldl_kernel(ptr);
629 e2 = ldl_kernel(ptr + 4);
630 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
631 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
632 if (!(e2 & DESC_P_MASK))
633 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
634 load_seg_cache_raw_dt(&env->ldt, e1, e2);
635 }
636
637 /* load the segments */
638 if (!(new_eflags & VM_MASK)) {
639 tss_load_seg(R_CS, new_segs[R_CS]);
640 tss_load_seg(R_SS, new_segs[R_SS]);
641 tss_load_seg(R_ES, new_segs[R_ES]);
642 tss_load_seg(R_DS, new_segs[R_DS]);
643 tss_load_seg(R_FS, new_segs[R_FS]);
644 tss_load_seg(R_GS, new_segs[R_GS]);
645 }
646
647 /* check that EIP is in the CS segment limits */
648 if (new_eip > env->segs[R_CS].limit) {
649 /* XXX: different exception if CALL ? */
650 raise_exception_err(EXCP0D_GPF, 0);
651 }
652}
653
654/* check if Port I/O is allowed in TSS */
655#ifndef VBOX
656static inline void check_io(int addr, int size)
657{
658 int io_offset, val, mask;
659
660#else /* VBOX */
661DECLINLINE(void) check_io(int addr, int size)
662{
663 int val, mask;
664 unsigned int io_offset;
665#endif /* VBOX */
666 /* TSS must be a valid 32 bit one */
667 if (!(env->tr.flags & DESC_P_MASK) ||
668 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
669 env->tr.limit < 103)
670 goto fail;
671 io_offset = lduw_kernel(env->tr.base + 0x66);
672 io_offset += (addr >> 3);
673 /* Note: the check needs two bytes */
674 if ((io_offset + 1) > env->tr.limit)
675 goto fail;
676 val = lduw_kernel(env->tr.base + io_offset);
677 val >>= (addr & 7);
678 mask = (1 << size) - 1;
679 /* all bits must be zero to allow the I/O */
680 if ((val & mask) != 0) {
681 fail:
682 raise_exception_err(EXCP0D_GPF, 0);
683 }
684}
685
686#ifdef VBOX
687/* Keep in sync with gen_check_external_event() */
688void helper_check_external_event()
689{
690 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
691 | CPU_INTERRUPT_EXTERNAL_TIMER
692 | CPU_INTERRUPT_EXTERNAL_DMA))
693 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
694 && (env->eflags & IF_MASK)
695 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
696 {
697 helper_external_event();
698 }
699
700}
701
702void helper_sync_seg(uint32_t reg)
703{
704 if (env->segs[reg].newselector)
705 sync_seg(env, reg, env->segs[reg].newselector);
706}
707#endif
708
709void helper_check_iob(uint32_t t0)
710{
711 check_io(t0, 1);
712}
713
714void helper_check_iow(uint32_t t0)
715{
716 check_io(t0, 2);
717}
718
719void helper_check_iol(uint32_t t0)
720{
721 check_io(t0, 4);
722}
723
724void helper_outb(uint32_t port, uint32_t data)
725{
726 cpu_outb(env, port, data & 0xff);
727}
728
729target_ulong helper_inb(uint32_t port)
730{
731 return cpu_inb(env, port);
732}
733
734void helper_outw(uint32_t port, uint32_t data)
735{
736 cpu_outw(env, port, data & 0xffff);
737}
738
739target_ulong helper_inw(uint32_t port)
740{
741 return cpu_inw(env, port);
742}
743
744void helper_outl(uint32_t port, uint32_t data)
745{
746 cpu_outl(env, port, data);
747}
748
749target_ulong helper_inl(uint32_t port)
750{
751 return cpu_inl(env, port);
752}
753
754#ifndef VBOX
755static inline unsigned int get_sp_mask(unsigned int e2)
756#else /* VBOX */
757DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
758#endif /* VBOX */
759{
760 if (e2 & DESC_B_MASK)
761 return 0xffffffff;
762 else
763 return 0xffff;
764}
765
766#ifdef TARGET_X86_64
767#define SET_ESP(val, sp_mask)\
768do {\
769 if ((sp_mask) == 0xffff)\
770 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
771 else if ((sp_mask) == 0xffffffffLL)\
772 ESP = (uint32_t)(val);\
773 else\
774 ESP = (val);\
775} while (0)
776#else
777#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
778#endif
779
780/* in 64-bit machines, this can overflow. So this segment addition macro
781 * can be used to trim the value to 32-bit whenever needed */
782#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
783
784/* XXX: add a is_user flag to have proper security support */
785#define PUSHW(ssp, sp, sp_mask, val)\
786{\
787 sp -= 2;\
788 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
789}
790
791#define PUSHL(ssp, sp, sp_mask, val)\
792{\
793 sp -= 4;\
794 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
795}
796
797#define POPW(ssp, sp, sp_mask, val)\
798{\
799 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
800 sp += 2;\
801}
802
803#define POPL(ssp, sp, sp_mask, val)\
804{\
805 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
806 sp += 4;\
807}
808
809/* protected mode interrupt */
810static void do_interrupt_protected(int intno, int is_int, int error_code,
811 unsigned int next_eip, int is_hw)
812{
813 SegmentCache *dt;
814 target_ulong ptr, ssp;
815 int type, dpl, selector, ss_dpl, cpl;
816 int has_error_code, new_stack, shift;
817 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
818 uint32_t old_eip, sp_mask;
819
820#ifdef VBOX
821 ss = ss_e1 = ss_e2 = 0;
822 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
823 cpu_loop_exit();
824#endif
825
826 has_error_code = 0;
827 if (!is_int && !is_hw) {
828 switch(intno) {
829 case 8:
830 case 10:
831 case 11:
832 case 12:
833 case 13:
834 case 14:
835 case 17:
836 has_error_code = 1;
837 break;
838 }
839 }
840 if (is_int)
841 old_eip = next_eip;
842 else
843 old_eip = env->eip;
844
845 dt = &env->idt;
846#ifndef VBOX
847 if (intno * 8 + 7 > dt->limit)
848#else
849 if ((unsigned)intno * 8 + 7 > dt->limit)
850#endif
851 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
852 ptr = dt->base + intno * 8;
853 e1 = ldl_kernel(ptr);
854 e2 = ldl_kernel(ptr + 4);
855 /* check gate type */
856 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
857 switch(type) {
858 case 5: /* task gate */
859 /* must do that check here to return the correct error code */
860 if (!(e2 & DESC_P_MASK))
861 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
862 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
863 if (has_error_code) {
864 int type;
865 uint32_t mask;
866 /* push the error code */
867 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
868 shift = type >> 3;
869 if (env->segs[R_SS].flags & DESC_B_MASK)
870 mask = 0xffffffff;
871 else
872 mask = 0xffff;
873 esp = (ESP - (2 << shift)) & mask;
874 ssp = env->segs[R_SS].base + esp;
875 if (shift)
876 stl_kernel(ssp, error_code);
877 else
878 stw_kernel(ssp, error_code);
879 SET_ESP(esp, mask);
880 }
881 return;
882 case 6: /* 286 interrupt gate */
883 case 7: /* 286 trap gate */
884 case 14: /* 386 interrupt gate */
885 case 15: /* 386 trap gate */
886 break;
887 default:
888 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
889 break;
890 }
891 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
892 cpl = env->hflags & HF_CPL_MASK;
893 /* check privilege if software int */
894 if (is_int && dpl < cpl)
895 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
896 /* check valid bit */
897 if (!(e2 & DESC_P_MASK))
898 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
899 selector = e1 >> 16;
900 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
901 if ((selector & 0xfffc) == 0)
902 raise_exception_err(EXCP0D_GPF, 0);
903
904 if (load_segment(&e1, &e2, selector) != 0)
905 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
906 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
907 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
909 if (dpl > cpl)
910 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
911 if (!(e2 & DESC_P_MASK))
912 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
913 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
914 /* to inner privilege */
915 get_ss_esp_from_tss(&ss, &esp, dpl);
916 if ((ss & 0xfffc) == 0)
917 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
918 if ((ss & 3) != dpl)
919 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
920 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
921 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
922 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
923 if (ss_dpl != dpl)
924 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925 if (!(ss_e2 & DESC_S_MASK) ||
926 (ss_e2 & DESC_CS_MASK) ||
927 !(ss_e2 & DESC_W_MASK))
928 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
929 if (!(ss_e2 & DESC_P_MASK))
930#ifdef VBOX /* See page 3-477 of 253666.pdf */
931 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
932#else
933 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
934#endif
935 new_stack = 1;
936 sp_mask = get_sp_mask(ss_e2);
937 ssp = get_seg_base(ss_e1, ss_e2);
938#if defined(VBOX) && defined(DEBUG)
939 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
940#endif
941 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
942 /* to same privilege */
943 if (env->eflags & VM_MASK)
944 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
945 new_stack = 0;
946 sp_mask = get_sp_mask(env->segs[R_SS].flags);
947 ssp = env->segs[R_SS].base;
948 esp = ESP;
949 dpl = cpl;
950 } else {
951 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
952 new_stack = 0; /* avoid warning */
953 sp_mask = 0; /* avoid warning */
954 ssp = 0; /* avoid warning */
955 esp = 0; /* avoid warning */
956 }
957
958 shift = type >> 3;
959
960#if 0
961 /* XXX: check that enough room is available */
962 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
963 if (env->eflags & VM_MASK)
964 push_size += 8;
965 push_size <<= shift;
966#endif
967 if (shift == 1) {
968 if (new_stack) {
969 if (env->eflags & VM_MASK) {
970 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
971 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
972 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
973 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
974 }
975 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
976 PUSHL(ssp, esp, sp_mask, ESP);
977 }
978 PUSHL(ssp, esp, sp_mask, compute_eflags());
979 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
980 PUSHL(ssp, esp, sp_mask, old_eip);
981 if (has_error_code) {
982 PUSHL(ssp, esp, sp_mask, error_code);
983 }
984 } else {
985 if (new_stack) {
986 if (env->eflags & VM_MASK) {
987 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
988 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
989 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
990 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
991 }
992 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
993 PUSHW(ssp, esp, sp_mask, ESP);
994 }
995 PUSHW(ssp, esp, sp_mask, compute_eflags());
996 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
997 PUSHW(ssp, esp, sp_mask, old_eip);
998 if (has_error_code) {
999 PUSHW(ssp, esp, sp_mask, error_code);
1000 }
1001 }
1002
1003 if (new_stack) {
1004 if (env->eflags & VM_MASK) {
1005 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1006 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1007 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1008 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1009 }
1010 ss = (ss & ~3) | dpl;
1011 cpu_x86_load_seg_cache(env, R_SS, ss,
1012 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1013 }
1014 SET_ESP(esp, sp_mask);
1015
1016 selector = (selector & ~3) | dpl;
1017 cpu_x86_load_seg_cache(env, R_CS, selector,
1018 get_seg_base(e1, e2),
1019 get_seg_limit(e1, e2),
1020 e2);
1021 cpu_x86_set_cpl(env, dpl);
1022 env->eip = offset;
1023
1024 /* interrupt gate clear IF mask */
1025 if ((type & 1) == 0) {
1026 env->eflags &= ~IF_MASK;
1027 }
1028#ifndef VBOX
1029 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1030#else
1031 /*
1032 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1033 * gets confused by seemingly changed EFLAGS. See #3491 and
1034 * public bug #2341.
1035 */
1036 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1037#endif
1038}
1039#ifdef VBOX
1040
1041/* check if VME interrupt redirection is enabled in TSS */
1042DECLINLINE(bool) is_vme_irq_redirected(int intno)
1043{
1044 unsigned int io_offset, intredir_offset;
1045 unsigned char val, mask;
1046
1047 /* TSS must be a valid 32 bit one */
1048 if (!(env->tr.flags & DESC_P_MASK) ||
1049 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1050 env->tr.limit < 103)
1051 goto fail;
1052 io_offset = lduw_kernel(env->tr.base + 0x66);
1053 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1054 if (io_offset < 0x68 + 0x20)
1055 io_offset = 0x68 + 0x20;
1056 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1057 intredir_offset = io_offset - 0x20;
1058
1059 intredir_offset += (intno >> 3);
1060 if ((intredir_offset) > env->tr.limit)
1061 goto fail;
1062
1063 val = ldub_kernel(env->tr.base + intredir_offset);
1064 mask = 1 << (unsigned char)(intno & 7);
1065
1066 /* bit set means no redirection. */
1067 if ((val & mask) != 0) {
1068 return false;
1069 }
1070 return true;
1071
1072fail:
1073 raise_exception_err(EXCP0D_GPF, 0);
1074 return true;
1075}
1076
1077/* V86 mode software interrupt with CR4.VME=1 */
1078static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1079{
1080 target_ulong ptr, ssp;
1081 int selector;
1082 uint32_t offset, esp;
1083 uint32_t old_cs, old_eflags;
1084 uint32_t iopl;
1085
1086 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1087
1088 if (!is_vme_irq_redirected(intno))
1089 {
1090 if (iopl == 3)
1091 {
1092 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1093 return;
1094 }
1095 else
1096 raise_exception_err(EXCP0D_GPF, 0);
1097 }
1098
1099 /* virtual mode idt is at linear address 0 */
1100 ptr = 0 + intno * 4;
1101 offset = lduw_kernel(ptr);
1102 selector = lduw_kernel(ptr + 2);
1103 esp = ESP;
1104 ssp = env->segs[R_SS].base;
1105 old_cs = env->segs[R_CS].selector;
1106
1107 old_eflags = compute_eflags();
1108 if (iopl < 3)
1109 {
1110 /* copy VIF into IF and set IOPL to 3 */
1111 if (env->eflags & VIF_MASK)
1112 old_eflags |= IF_MASK;
1113 else
1114 old_eflags &= ~IF_MASK;
1115
1116 old_eflags |= (3 << IOPL_SHIFT);
1117 }
1118
1119 /* XXX: use SS segment size ? */
1120 PUSHW(ssp, esp, 0xffff, old_eflags);
1121 PUSHW(ssp, esp, 0xffff, old_cs);
1122 PUSHW(ssp, esp, 0xffff, next_eip);
1123
1124 /* update processor state */
1125 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1126 env->eip = offset;
1127 env->segs[R_CS].selector = selector;
1128 env->segs[R_CS].base = (selector << 4);
1129 env->eflags &= ~(TF_MASK | RF_MASK);
1130
1131 if (iopl < 3)
1132 env->eflags &= ~VIF_MASK;
1133 else
1134 env->eflags &= ~IF_MASK;
1135}
1136#endif /* VBOX */
1137
1138#ifdef TARGET_X86_64
1139
1140#define PUSHQ(sp, val)\
1141{\
1142 sp -= 8;\
1143 stq_kernel(sp, (val));\
1144}
1145
1146#define POPQ(sp, val)\
1147{\
1148 val = ldq_kernel(sp);\
1149 sp += 8;\
1150}
1151
1152#ifndef VBOX
1153static inline target_ulong get_rsp_from_tss(int level)
1154#else /* VBOX */
1155DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1156#endif /* VBOX */
1157{
1158 int index;
1159
1160#if 0
1161 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1162 env->tr.base, env->tr.limit);
1163#endif
1164
1165 if (!(env->tr.flags & DESC_P_MASK))
1166 cpu_abort(env, "invalid tss");
1167 index = 8 * level + 4;
1168 if ((index + 7) > env->tr.limit)
1169 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1170 return ldq_kernel(env->tr.base + index);
1171}
1172
1173/* 64 bit interrupt */
1174static void do_interrupt64(int intno, int is_int, int error_code,
1175 target_ulong next_eip, int is_hw)
1176{
1177 SegmentCache *dt;
1178 target_ulong ptr;
1179 int type, dpl, selector, cpl, ist;
1180 int has_error_code, new_stack;
1181 uint32_t e1, e2, e3, ss;
1182 target_ulong old_eip, esp, offset;
1183
1184#ifdef VBOX
1185 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1186 cpu_loop_exit();
1187#endif
1188
1189 has_error_code = 0;
1190 if (!is_int && !is_hw) {
1191 switch(intno) {
1192 case 8:
1193 case 10:
1194 case 11:
1195 case 12:
1196 case 13:
1197 case 14:
1198 case 17:
1199 has_error_code = 1;
1200 break;
1201 }
1202 }
1203 if (is_int)
1204 old_eip = next_eip;
1205 else
1206 old_eip = env->eip;
1207
1208 dt = &env->idt;
1209 if (intno * 16 + 15 > dt->limit)
1210 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1211 ptr = dt->base + intno * 16;
1212 e1 = ldl_kernel(ptr);
1213 e2 = ldl_kernel(ptr + 4);
1214 e3 = ldl_kernel(ptr + 8);
1215 /* check gate type */
1216 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1217 switch(type) {
1218 case 14: /* 386 interrupt gate */
1219 case 15: /* 386 trap gate */
1220 break;
1221 default:
1222 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1223 break;
1224 }
1225 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1226 cpl = env->hflags & HF_CPL_MASK;
1227 /* check privilege if software int */
1228 if (is_int && dpl < cpl)
1229 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1230 /* check valid bit */
1231 if (!(e2 & DESC_P_MASK))
1232 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1233 selector = e1 >> 16;
1234 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1235 ist = e2 & 7;
1236 if ((selector & 0xfffc) == 0)
1237 raise_exception_err(EXCP0D_GPF, 0);
1238
1239 if (load_segment(&e1, &e2, selector) != 0)
1240 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1241 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1242 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1243 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1244 if (dpl > cpl)
1245 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1246 if (!(e2 & DESC_P_MASK))
1247 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1248 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1249 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1250 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1251 /* to inner privilege */
1252 if (ist != 0)
1253 esp = get_rsp_from_tss(ist + 3);
1254 else
1255 esp = get_rsp_from_tss(dpl);
1256 esp &= ~0xfLL; /* align stack */
1257 ss = 0;
1258 new_stack = 1;
1259 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1260 /* to same privilege */
1261 if (env->eflags & VM_MASK)
1262 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1263 new_stack = 0;
1264 if (ist != 0)
1265 esp = get_rsp_from_tss(ist + 3);
1266 else
1267 esp = ESP;
1268 esp &= ~0xfLL; /* align stack */
1269 dpl = cpl;
1270 } else {
1271 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1272 new_stack = 0; /* avoid warning */
1273 esp = 0; /* avoid warning */
1274 }
1275
1276 PUSHQ(esp, env->segs[R_SS].selector);
1277 PUSHQ(esp, ESP);
1278 PUSHQ(esp, compute_eflags());
1279 PUSHQ(esp, env->segs[R_CS].selector);
1280 PUSHQ(esp, old_eip);
1281 if (has_error_code) {
1282 PUSHQ(esp, error_code);
1283 }
1284
1285 if (new_stack) {
1286 ss = 0 | dpl;
1287 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1288 }
1289 ESP = esp;
1290
1291 selector = (selector & ~3) | dpl;
1292 cpu_x86_load_seg_cache(env, R_CS, selector,
1293 get_seg_base(e1, e2),
1294 get_seg_limit(e1, e2),
1295 e2);
1296 cpu_x86_set_cpl(env, dpl);
1297 env->eip = offset;
1298
1299 /* interrupt gate clear IF mask */
1300 if ((type & 1) == 0) {
1301 env->eflags &= ~IF_MASK;
1302 }
1303
1304#ifndef VBOX
1305 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1306#else
1307 /*
1308 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1309 * gets confused by seemingly changed EFLAGS. See #3491 and
1310 * public bug #2341.
1311 */
1312 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1313#endif
1314}
1315#endif
1316
1317#if defined(CONFIG_USER_ONLY)
1318void helper_syscall(int next_eip_addend)
1319{
1320 env->exception_index = EXCP_SYSCALL;
1321 env->exception_next_eip = env->eip + next_eip_addend;
1322 cpu_loop_exit();
1323}
1324#else
1325void helper_syscall(int next_eip_addend)
1326{
1327 int selector;
1328
1329 if (!(env->efer & MSR_EFER_SCE)) {
1330 raise_exception_err(EXCP06_ILLOP, 0);
1331 }
1332 selector = (env->star >> 32) & 0xffff;
1333#ifdef TARGET_X86_64
1334 if (env->hflags & HF_LMA_MASK) {
1335 int code64;
1336
1337 ECX = env->eip + next_eip_addend;
1338 env->regs[11] = compute_eflags();
1339
1340 code64 = env->hflags & HF_CS64_MASK;
1341
1342 cpu_x86_set_cpl(env, 0);
1343 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1344 0, 0xffffffff,
1345 DESC_G_MASK | DESC_P_MASK |
1346 DESC_S_MASK |
1347 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1348 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1349 0, 0xffffffff,
1350 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1351 DESC_S_MASK |
1352 DESC_W_MASK | DESC_A_MASK);
1353 env->eflags &= ~env->fmask;
1354 load_eflags(env->eflags, 0);
1355 if (code64)
1356 env->eip = env->lstar;
1357 else
1358 env->eip = env->cstar;
1359 } else
1360#endif
1361 {
1362 ECX = (uint32_t)(env->eip + next_eip_addend);
1363
1364 cpu_x86_set_cpl(env, 0);
1365 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1366 0, 0xffffffff,
1367 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1368 DESC_S_MASK |
1369 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1370 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1371 0, 0xffffffff,
1372 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1373 DESC_S_MASK |
1374 DESC_W_MASK | DESC_A_MASK);
1375 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1376 env->eip = (uint32_t)env->star;
1377 }
1378}
1379#endif
1380
1381void helper_sysret(int dflag)
1382{
1383 int cpl, selector;
1384
1385 if (!(env->efer & MSR_EFER_SCE)) {
1386 raise_exception_err(EXCP06_ILLOP, 0);
1387 }
1388 cpl = env->hflags & HF_CPL_MASK;
1389 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1390 raise_exception_err(EXCP0D_GPF, 0);
1391 }
1392 selector = (env->star >> 48) & 0xffff;
1393#ifdef TARGET_X86_64
1394 if (env->hflags & HF_LMA_MASK) {
1395 if (dflag == 2) {
1396 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1397 0, 0xffffffff,
1398 DESC_G_MASK | DESC_P_MASK |
1399 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1400 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1401 DESC_L_MASK);
1402 env->eip = ECX;
1403 } else {
1404 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1405 0, 0xffffffff,
1406 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1407 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1408 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1409 env->eip = (uint32_t)ECX;
1410 }
1411 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1412 0, 0xffffffff,
1413 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1414 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1415 DESC_W_MASK | DESC_A_MASK);
1416 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1417 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1418 cpu_x86_set_cpl(env, 3);
1419 } else
1420#endif
1421 {
1422 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1423 0, 0xffffffff,
1424 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1425 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1426 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1427 env->eip = (uint32_t)ECX;
1428 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1429 0, 0xffffffff,
1430 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1431 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1432 DESC_W_MASK | DESC_A_MASK);
1433 env->eflags |= IF_MASK;
1434 cpu_x86_set_cpl(env, 3);
1435 }
1436#ifdef USE_KQEMU
1437 if (kqemu_is_ok(env)) {
1438 if (env->hflags & HF_LMA_MASK)
1439 CC_OP = CC_OP_EFLAGS;
1440 env->exception_index = -1;
1441 cpu_loop_exit();
1442 }
1443#endif
1444}
1445
1446#ifdef VBOX
1447/**
1448 * Checks and processes external VMM events.
1449 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1450 */
1451void helper_external_event(void)
1452{
1453#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1454 uintptr_t uSP;
1455# ifdef RT_ARCH_AMD64
1456 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1457# else
1458 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1459# endif
1460 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1461#endif
1462 /* Keep in sync with flags checked by gen_check_external_event() */
1463 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1464 {
1465 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1466 ~CPU_INTERRUPT_EXTERNAL_HARD);
1467 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1468 }
1469 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1470 {
1471 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1472 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1473 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1474 }
1475 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1476 {
1477 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1478 ~CPU_INTERRUPT_EXTERNAL_DMA);
1479 remR3DmaRun(env);
1480 }
1481 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1482 {
1483 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1484 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1485 remR3TimersRun(env);
1486 }
1487}
1488/* helper for recording call instruction addresses for later scanning */
1489void helper_record_call()
1490{
1491 if ( !(env->state & CPU_RAW_RING0)
1492 && (env->cr[0] & CR0_PG_MASK)
1493 && !(env->eflags & X86_EFL_IF))
1494 remR3RecordCall(env);
1495}
1496#endif /* VBOX */
1497
1498/* real mode interrupt */
1499static void do_interrupt_real(int intno, int is_int, int error_code,
1500 unsigned int next_eip)
1501{
1502 SegmentCache *dt;
1503 target_ulong ptr, ssp;
1504 int selector;
1505 uint32_t offset, esp;
1506 uint32_t old_cs, old_eip;
1507
1508 /* real mode (simpler !) */
1509 dt = &env->idt;
1510#ifndef VBOX
1511 if (intno * 4 + 3 > dt->limit)
1512#else
1513 if ((unsigned)intno * 4 + 3 > dt->limit)
1514#endif
1515 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1516 ptr = dt->base + intno * 4;
1517 offset = lduw_kernel(ptr);
1518 selector = lduw_kernel(ptr + 2);
1519 esp = ESP;
1520 ssp = env->segs[R_SS].base;
1521 if (is_int)
1522 old_eip = next_eip;
1523 else
1524 old_eip = env->eip;
1525 old_cs = env->segs[R_CS].selector;
1526 /* XXX: use SS segment size ? */
1527 PUSHW(ssp, esp, 0xffff, compute_eflags());
1528 PUSHW(ssp, esp, 0xffff, old_cs);
1529 PUSHW(ssp, esp, 0xffff, old_eip);
1530
1531 /* update processor state */
1532 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1533 env->eip = offset;
1534 env->segs[R_CS].selector = selector;
1535 env->segs[R_CS].base = (selector << 4);
1536 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1537}
1538
1539/* fake user mode interrupt */
1540void do_interrupt_user(int intno, int is_int, int error_code,
1541 target_ulong next_eip)
1542{
1543 SegmentCache *dt;
1544 target_ulong ptr;
1545 int dpl, cpl, shift;
1546 uint32_t e2;
1547
1548 dt = &env->idt;
1549 if (env->hflags & HF_LMA_MASK) {
1550 shift = 4;
1551 } else {
1552 shift = 3;
1553 }
1554 ptr = dt->base + (intno << shift);
1555 e2 = ldl_kernel(ptr + 4);
1556
1557 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1558 cpl = env->hflags & HF_CPL_MASK;
1559 /* check privilege if software int */
1560 if (is_int && dpl < cpl)
1561 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1562
1563 /* Since we emulate only user space, we cannot do more than
1564 exiting the emulation with the suitable exception and error
1565 code */
1566 if (is_int)
1567 EIP = next_eip;
1568}
1569
1570/*
1571 * Begin execution of an interruption. is_int is TRUE if coming from
1572 * the int instruction. next_eip is the EIP value AFTER the interrupt
1573 * instruction. It is only relevant if is_int is TRUE.
1574 */
1575void do_interrupt(int intno, int is_int, int error_code,
1576 target_ulong next_eip, int is_hw)
1577{
1578 if (loglevel & CPU_LOG_INT) {
1579 if ((env->cr[0] & CR0_PE_MASK)) {
1580 static int count;
1581 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1582 count, intno, error_code, is_int,
1583 env->hflags & HF_CPL_MASK,
1584 env->segs[R_CS].selector, EIP,
1585 (int)env->segs[R_CS].base + EIP,
1586 env->segs[R_SS].selector, ESP);
1587 if (intno == 0x0e) {
1588 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1589 } else {
1590 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1591 }
1592 fprintf(logfile, "\n");
1593 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1594#if 0
1595 {
1596 int i;
1597 uint8_t *ptr;
1598 fprintf(logfile, " code=");
1599 ptr = env->segs[R_CS].base + env->eip;
1600 for(i = 0; i < 16; i++) {
1601 fprintf(logfile, " %02x", ldub(ptr + i));
1602 }
1603 fprintf(logfile, "\n");
1604 }
1605#endif
1606 count++;
1607 }
1608 }
1609 if (env->cr[0] & CR0_PE_MASK) {
1610#ifdef TARGET_X86_64
1611 if (env->hflags & HF_LMA_MASK) {
1612 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1613 } else
1614#endif
1615 {
1616#ifdef VBOX
1617 /* int xx *, v86 code and VME enabled? */
1618 if ( (env->eflags & VM_MASK)
1619 && (env->cr[4] & CR4_VME_MASK)
1620 && is_int
1621 && !is_hw
1622 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1623 )
1624 do_soft_interrupt_vme(intno, error_code, next_eip);
1625 else
1626#endif /* VBOX */
1627 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1628 }
1629 } else {
1630 do_interrupt_real(intno, is_int, error_code, next_eip);
1631 }
1632}
1633
1634/*
1635 * Check nested exceptions and change to double or triple fault if
1636 * needed. It should only be called, if this is not an interrupt.
1637 * Returns the new exception number.
1638 */
1639static int check_exception(int intno, int *error_code)
1640{
1641 int first_contributory = env->old_exception == 0 ||
1642 (env->old_exception >= 10 &&
1643 env->old_exception <= 13);
1644 int second_contributory = intno == 0 ||
1645 (intno >= 10 && intno <= 13);
1646
1647 if (loglevel & CPU_LOG_INT)
1648 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1649 env->old_exception, intno);
1650
1651 if (env->old_exception == EXCP08_DBLE)
1652 cpu_abort(env, "triple fault");
1653
1654 if ((first_contributory && second_contributory)
1655 || (env->old_exception == EXCP0E_PAGE &&
1656 (second_contributory || (intno == EXCP0E_PAGE)))) {
1657 intno = EXCP08_DBLE;
1658 *error_code = 0;
1659 }
1660
1661 if (second_contributory || (intno == EXCP0E_PAGE) ||
1662 (intno == EXCP08_DBLE))
1663 env->old_exception = intno;
1664
1665 return intno;
1666}
1667
1668/*
1669 * Signal an interruption. It is executed in the main CPU loop.
1670 * is_int is TRUE if coming from the int instruction. next_eip is the
1671 * EIP value AFTER the interrupt instruction. It is only relevant if
1672 * is_int is TRUE.
1673 */
1674void raise_interrupt(int intno, int is_int, int error_code,
1675 int next_eip_addend)
1676{
1677#if defined(VBOX) && defined(DEBUG)
1678 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1679#endif
1680 if (!is_int) {
1681 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1682 intno = check_exception(intno, &error_code);
1683 } else {
1684 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1685 }
1686
1687 env->exception_index = intno;
1688 env->error_code = error_code;
1689 env->exception_is_int = is_int;
1690 env->exception_next_eip = env->eip + next_eip_addend;
1691 cpu_loop_exit();
1692}
1693
1694/* shortcuts to generate exceptions */
1695
1696void (raise_exception_err)(int exception_index, int error_code)
1697{
1698 raise_interrupt(exception_index, 0, error_code, 0);
1699}
1700
1701void raise_exception(int exception_index)
1702{
1703 raise_interrupt(exception_index, 0, 0, 0);
1704}
1705
1706/* SMM support */
1707
1708#if defined(CONFIG_USER_ONLY)
1709
1710void do_smm_enter(void)
1711{
1712}
1713
1714void helper_rsm(void)
1715{
1716}
1717
1718#else
1719
1720#ifdef TARGET_X86_64
1721#define SMM_REVISION_ID 0x00020064
1722#else
1723#define SMM_REVISION_ID 0x00020000
1724#endif
1725
1726void do_smm_enter(void)
1727{
1728 target_ulong sm_state;
1729 SegmentCache *dt;
1730 int i, offset;
1731
1732 if (loglevel & CPU_LOG_INT) {
1733 fprintf(logfile, "SMM: enter\n");
1734 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1735 }
1736
1737 env->hflags |= HF_SMM_MASK;
1738 cpu_smm_update(env);
1739
1740 sm_state = env->smbase + 0x8000;
1741
1742#ifdef TARGET_X86_64
1743 for(i = 0; i < 6; i++) {
1744 dt = &env->segs[i];
1745 offset = 0x7e00 + i * 16;
1746 stw_phys(sm_state + offset, dt->selector);
1747 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1748 stl_phys(sm_state + offset + 4, dt->limit);
1749 stq_phys(sm_state + offset + 8, dt->base);
1750 }
1751
1752 stq_phys(sm_state + 0x7e68, env->gdt.base);
1753 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1754
1755 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1756 stq_phys(sm_state + 0x7e78, env->ldt.base);
1757 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1758 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1759
1760 stq_phys(sm_state + 0x7e88, env->idt.base);
1761 stl_phys(sm_state + 0x7e84, env->idt.limit);
1762
1763 stw_phys(sm_state + 0x7e90, env->tr.selector);
1764 stq_phys(sm_state + 0x7e98, env->tr.base);
1765 stl_phys(sm_state + 0x7e94, env->tr.limit);
1766 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1767
1768 stq_phys(sm_state + 0x7ed0, env->efer);
1769
1770 stq_phys(sm_state + 0x7ff8, EAX);
1771 stq_phys(sm_state + 0x7ff0, ECX);
1772 stq_phys(sm_state + 0x7fe8, EDX);
1773 stq_phys(sm_state + 0x7fe0, EBX);
1774 stq_phys(sm_state + 0x7fd8, ESP);
1775 stq_phys(sm_state + 0x7fd0, EBP);
1776 stq_phys(sm_state + 0x7fc8, ESI);
1777 stq_phys(sm_state + 0x7fc0, EDI);
1778 for(i = 8; i < 16; i++)
1779 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1780 stq_phys(sm_state + 0x7f78, env->eip);
1781 stl_phys(sm_state + 0x7f70, compute_eflags());
1782 stl_phys(sm_state + 0x7f68, env->dr[6]);
1783 stl_phys(sm_state + 0x7f60, env->dr[7]);
1784
1785 stl_phys(sm_state + 0x7f48, env->cr[4]);
1786 stl_phys(sm_state + 0x7f50, env->cr[3]);
1787 stl_phys(sm_state + 0x7f58, env->cr[0]);
1788
1789 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1790 stl_phys(sm_state + 0x7f00, env->smbase);
1791#else
1792 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1793 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1794 stl_phys(sm_state + 0x7ff4, compute_eflags());
1795 stl_phys(sm_state + 0x7ff0, env->eip);
1796 stl_phys(sm_state + 0x7fec, EDI);
1797 stl_phys(sm_state + 0x7fe8, ESI);
1798 stl_phys(sm_state + 0x7fe4, EBP);
1799 stl_phys(sm_state + 0x7fe0, ESP);
1800 stl_phys(sm_state + 0x7fdc, EBX);
1801 stl_phys(sm_state + 0x7fd8, EDX);
1802 stl_phys(sm_state + 0x7fd4, ECX);
1803 stl_phys(sm_state + 0x7fd0, EAX);
1804 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1805 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1806
1807 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1808 stl_phys(sm_state + 0x7f64, env->tr.base);
1809 stl_phys(sm_state + 0x7f60, env->tr.limit);
1810 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1811
1812 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1813 stl_phys(sm_state + 0x7f80, env->ldt.base);
1814 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1815 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1816
1817 stl_phys(sm_state + 0x7f74, env->gdt.base);
1818 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1819
1820 stl_phys(sm_state + 0x7f58, env->idt.base);
1821 stl_phys(sm_state + 0x7f54, env->idt.limit);
1822
1823 for(i = 0; i < 6; i++) {
1824 dt = &env->segs[i];
1825 if (i < 3)
1826 offset = 0x7f84 + i * 12;
1827 else
1828 offset = 0x7f2c + (i - 3) * 12;
1829 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1830 stl_phys(sm_state + offset + 8, dt->base);
1831 stl_phys(sm_state + offset + 4, dt->limit);
1832 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1833 }
1834 stl_phys(sm_state + 0x7f14, env->cr[4]);
1835
1836 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1837 stl_phys(sm_state + 0x7ef8, env->smbase);
1838#endif
1839 /* init SMM cpu state */
1840
1841#ifdef TARGET_X86_64
1842 cpu_load_efer(env, 0);
1843#endif
1844 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1845 env->eip = 0x00008000;
1846 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1847 0xffffffff, 0);
1848 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1849 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1850 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1851 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1852 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1853
1854 cpu_x86_update_cr0(env,
1855 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1856 cpu_x86_update_cr4(env, 0);
1857 env->dr[7] = 0x00000400;
1858 CC_OP = CC_OP_EFLAGS;
1859}
1860
1861void helper_rsm(void)
1862{
1863#ifdef VBOX
1864 cpu_abort(env, "helper_rsm");
1865#else /* !VBOX */
1866 target_ulong sm_
1867
1868 target_ulong sm_state;
1869 int i, offset;
1870 uint32_t val;
1871
1872 sm_state = env->smbase + 0x8000;
1873#ifdef TARGET_X86_64
1874 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1875
1876 for(i = 0; i < 6; i++) {
1877 offset = 0x7e00 + i * 16;
1878 cpu_x86_load_seg_cache(env, i,
1879 lduw_phys(sm_state + offset),
1880 ldq_phys(sm_state + offset + 8),
1881 ldl_phys(sm_state + offset + 4),
1882 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1883 }
1884
1885 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1886 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1887
1888 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1889 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1890 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1891 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1892
1893 env->idt.base = ldq_phys(sm_state + 0x7e88);
1894 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1895
1896 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1897 env->tr.base = ldq_phys(sm_state + 0x7e98);
1898 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1899 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1900
1901 EAX = ldq_phys(sm_state + 0x7ff8);
1902 ECX = ldq_phys(sm_state + 0x7ff0);
1903 EDX = ldq_phys(sm_state + 0x7fe8);
1904 EBX = ldq_phys(sm_state + 0x7fe0);
1905 ESP = ldq_phys(sm_state + 0x7fd8);
1906 EBP = ldq_phys(sm_state + 0x7fd0);
1907 ESI = ldq_phys(sm_state + 0x7fc8);
1908 EDI = ldq_phys(sm_state + 0x7fc0);
1909 for(i = 8; i < 16; i++)
1910 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1911 env->eip = ldq_phys(sm_state + 0x7f78);
1912 load_eflags(ldl_phys(sm_state + 0x7f70),
1913 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1914 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1915 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1916
1917 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1918 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1919 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1920
1921 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1922 if (val & 0x20000) {
1923 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1924 }
1925#else
1926 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1927 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1928 load_eflags(ldl_phys(sm_state + 0x7ff4),
1929 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1930 env->eip = ldl_phys(sm_state + 0x7ff0);
1931 EDI = ldl_phys(sm_state + 0x7fec);
1932 ESI = ldl_phys(sm_state + 0x7fe8);
1933 EBP = ldl_phys(sm_state + 0x7fe4);
1934 ESP = ldl_phys(sm_state + 0x7fe0);
1935 EBX = ldl_phys(sm_state + 0x7fdc);
1936 EDX = ldl_phys(sm_state + 0x7fd8);
1937 ECX = ldl_phys(sm_state + 0x7fd4);
1938 EAX = ldl_phys(sm_state + 0x7fd0);
1939 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1940 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1941
1942 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1943 env->tr.base = ldl_phys(sm_state + 0x7f64);
1944 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1945 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1946
1947 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1948 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1949 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1950 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1951
1952 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1953 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1954
1955 env->idt.base = ldl_phys(sm_state + 0x7f58);
1956 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1957
1958 for(i = 0; i < 6; i++) {
1959 if (i < 3)
1960 offset = 0x7f84 + i * 12;
1961 else
1962 offset = 0x7f2c + (i - 3) * 12;
1963 cpu_x86_load_seg_cache(env, i,
1964 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1965 ldl_phys(sm_state + offset + 8),
1966 ldl_phys(sm_state + offset + 4),
1967 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1968 }
1969 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1970
1971 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1972 if (val & 0x20000) {
1973 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1974 }
1975#endif
1976 CC_OP = CC_OP_EFLAGS;
1977 env->hflags &= ~HF_SMM_MASK;
1978 cpu_smm_update(env);
1979
1980 if (loglevel & CPU_LOG_INT) {
1981 fprintf(logfile, "SMM: after RSM\n");
1982 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1983 }
1984#endif /* !VBOX */
1985}
1986
1987#endif /* !CONFIG_USER_ONLY */
1988
1989
1990/* division, flags are undefined */
1991
1992void helper_divb_AL(target_ulong t0)
1993{
1994 unsigned int num, den, q, r;
1995
1996 num = (EAX & 0xffff);
1997 den = (t0 & 0xff);
1998 if (den == 0) {
1999 raise_exception(EXCP00_DIVZ);
2000 }
2001 q = (num / den);
2002 if (q > 0xff)
2003 raise_exception(EXCP00_DIVZ);
2004 q &= 0xff;
2005 r = (num % den) & 0xff;
2006 EAX = (EAX & ~0xffff) | (r << 8) | q;
2007}
2008
2009void helper_idivb_AL(target_ulong t0)
2010{
2011 int num, den, q, r;
2012
2013 num = (int16_t)EAX;
2014 den = (int8_t)t0;
2015 if (den == 0) {
2016 raise_exception(EXCP00_DIVZ);
2017 }
2018 q = (num / den);
2019 if (q != (int8_t)q)
2020 raise_exception(EXCP00_DIVZ);
2021 q &= 0xff;
2022 r = (num % den) & 0xff;
2023 EAX = (EAX & ~0xffff) | (r << 8) | q;
2024}
2025
2026void helper_divw_AX(target_ulong t0)
2027{
2028 unsigned int num, den, q, r;
2029
2030 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2031 den = (t0 & 0xffff);
2032 if (den == 0) {
2033 raise_exception(EXCP00_DIVZ);
2034 }
2035 q = (num / den);
2036 if (q > 0xffff)
2037 raise_exception(EXCP00_DIVZ);
2038 q &= 0xffff;
2039 r = (num % den) & 0xffff;
2040 EAX = (EAX & ~0xffff) | q;
2041 EDX = (EDX & ~0xffff) | r;
2042}
2043
2044void helper_idivw_AX(target_ulong t0)
2045{
2046 int num, den, q, r;
2047
2048 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2049 den = (int16_t)t0;
2050 if (den == 0) {
2051 raise_exception(EXCP00_DIVZ);
2052 }
2053 q = (num / den);
2054 if (q != (int16_t)q)
2055 raise_exception(EXCP00_DIVZ);
2056 q &= 0xffff;
2057 r = (num % den) & 0xffff;
2058 EAX = (EAX & ~0xffff) | q;
2059 EDX = (EDX & ~0xffff) | r;
2060}
2061
2062void helper_divl_EAX(target_ulong t0)
2063{
2064 unsigned int den, r;
2065 uint64_t num, q;
2066
2067 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2068 den = t0;
2069 if (den == 0) {
2070 raise_exception(EXCP00_DIVZ);
2071 }
2072 q = (num / den);
2073 r = (num % den);
2074 if (q > 0xffffffff)
2075 raise_exception(EXCP00_DIVZ);
2076 EAX = (uint32_t)q;
2077 EDX = (uint32_t)r;
2078}
2079
2080void helper_idivl_EAX(target_ulong t0)
2081{
2082 int den, r;
2083 int64_t num, q;
2084
2085 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2086 den = t0;
2087 if (den == 0) {
2088 raise_exception(EXCP00_DIVZ);
2089 }
2090 q = (num / den);
2091 r = (num % den);
2092 if (q != (int32_t)q)
2093 raise_exception(EXCP00_DIVZ);
2094 EAX = (uint32_t)q;
2095 EDX = (uint32_t)r;
2096}
2097
2098/* bcd */
2099
2100/* XXX: exception */
2101void helper_aam(int base)
2102{
2103 int al, ah;
2104 al = EAX & 0xff;
2105 ah = al / base;
2106 al = al % base;
2107 EAX = (EAX & ~0xffff) | al | (ah << 8);
2108 CC_DST = al;
2109}
2110
2111void helper_aad(int base)
2112{
2113 int al, ah;
2114 al = EAX & 0xff;
2115 ah = (EAX >> 8) & 0xff;
2116 al = ((ah * base) + al) & 0xff;
2117 EAX = (EAX & ~0xffff) | al;
2118 CC_DST = al;
2119}
2120
2121void helper_aaa(void)
2122{
2123 int icarry;
2124 int al, ah, af;
2125 int eflags;
2126
2127 eflags = cc_table[CC_OP].compute_all();
2128 af = eflags & CC_A;
2129 al = EAX & 0xff;
2130 ah = (EAX >> 8) & 0xff;
2131
2132 icarry = (al > 0xf9);
2133 if (((al & 0x0f) > 9 ) || af) {
2134 al = (al + 6) & 0x0f;
2135 ah = (ah + 1 + icarry) & 0xff;
2136 eflags |= CC_C | CC_A;
2137 } else {
2138 eflags &= ~(CC_C | CC_A);
2139 al &= 0x0f;
2140 }
2141 EAX = (EAX & ~0xffff) | al | (ah << 8);
2142 CC_SRC = eflags;
2143 FORCE_RET();
2144}
2145
2146void helper_aas(void)
2147{
2148 int icarry;
2149 int al, ah, af;
2150 int eflags;
2151
2152 eflags = cc_table[CC_OP].compute_all();
2153 af = eflags & CC_A;
2154 al = EAX & 0xff;
2155 ah = (EAX >> 8) & 0xff;
2156
2157 icarry = (al < 6);
2158 if (((al & 0x0f) > 9 ) || af) {
2159 al = (al - 6) & 0x0f;
2160 ah = (ah - 1 - icarry) & 0xff;
2161 eflags |= CC_C | CC_A;
2162 } else {
2163 eflags &= ~(CC_C | CC_A);
2164 al &= 0x0f;
2165 }
2166 EAX = (EAX & ~0xffff) | al | (ah << 8);
2167 CC_SRC = eflags;
2168 FORCE_RET();
2169}
2170
2171void helper_daa(void)
2172{
2173 int al, af, cf;
2174 int eflags;
2175
2176 eflags = cc_table[CC_OP].compute_all();
2177 cf = eflags & CC_C;
2178 af = eflags & CC_A;
2179 al = EAX & 0xff;
2180
2181 eflags = 0;
2182 if (((al & 0x0f) > 9 ) || af) {
2183 al = (al + 6) & 0xff;
2184 eflags |= CC_A;
2185 }
2186 if ((al > 0x9f) || cf) {
2187 al = (al + 0x60) & 0xff;
2188 eflags |= CC_C;
2189 }
2190 EAX = (EAX & ~0xff) | al;
2191 /* well, speed is not an issue here, so we compute the flags by hand */
2192 eflags |= (al == 0) << 6; /* zf */
2193 eflags |= parity_table[al]; /* pf */
2194 eflags |= (al & 0x80); /* sf */
2195 CC_SRC = eflags;
2196 FORCE_RET();
2197}
2198
2199void helper_das(void)
2200{
2201 int al, al1, af, cf;
2202 int eflags;
2203
2204 eflags = cc_table[CC_OP].compute_all();
2205 cf = eflags & CC_C;
2206 af = eflags & CC_A;
2207 al = EAX & 0xff;
2208
2209 eflags = 0;
2210 al1 = al;
2211 if (((al & 0x0f) > 9 ) || af) {
2212 eflags |= CC_A;
2213 if (al < 6 || cf)
2214 eflags |= CC_C;
2215 al = (al - 6) & 0xff;
2216 }
2217 if ((al1 > 0x99) || cf) {
2218 al = (al - 0x60) & 0xff;
2219 eflags |= CC_C;
2220 }
2221 EAX = (EAX & ~0xff) | al;
2222 /* well, speed is not an issue here, so we compute the flags by hand */
2223 eflags |= (al == 0) << 6; /* zf */
2224 eflags |= parity_table[al]; /* pf */
2225 eflags |= (al & 0x80); /* sf */
2226 CC_SRC = eflags;
2227 FORCE_RET();
2228}
2229
2230void helper_into(int next_eip_addend)
2231{
2232 int eflags;
2233 eflags = cc_table[CC_OP].compute_all();
2234 if (eflags & CC_O) {
2235 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2236 }
2237}
2238
2239void helper_cmpxchg8b(target_ulong a0)
2240{
2241 uint64_t d;
2242 int eflags;
2243
2244 eflags = cc_table[CC_OP].compute_all();
2245 d = ldq(a0);
2246 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2247 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2248 eflags |= CC_Z;
2249 } else {
2250 /* always do the store */
2251 stq(a0, d);
2252 EDX = (uint32_t)(d >> 32);
2253 EAX = (uint32_t)d;
2254 eflags &= ~CC_Z;
2255 }
2256 CC_SRC = eflags;
2257}
2258
2259#ifdef TARGET_X86_64
2260void helper_cmpxchg16b(target_ulong a0)
2261{
2262 uint64_t d0, d1;
2263 int eflags;
2264
2265 if ((a0 & 0xf) != 0)
2266 raise_exception(EXCP0D_GPF);
2267 eflags = cc_table[CC_OP].compute_all();
2268 d0 = ldq(a0);
2269 d1 = ldq(a0 + 8);
2270 if (d0 == EAX && d1 == EDX) {
2271 stq(a0, EBX);
2272 stq(a0 + 8, ECX);
2273 eflags |= CC_Z;
2274 } else {
2275 /* always do the store */
2276 stq(a0, d0);
2277 stq(a0 + 8, d1);
2278 EDX = d1;
2279 EAX = d0;
2280 eflags &= ~CC_Z;
2281 }
2282 CC_SRC = eflags;
2283}
2284#endif
2285
2286void helper_single_step(void)
2287{
2288 env->dr[6] |= 0x4000;
2289 raise_exception(EXCP01_SSTP);
2290}
2291
2292void helper_cpuid(void)
2293{
2294#ifndef VBOX
2295 uint32_t index;
2296
2297 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2298
2299 index = (uint32_t)EAX;
2300 /* test if maximum index reached */
2301 if (index & 0x80000000) {
2302 if (index > env->cpuid_xlevel)
2303 index = env->cpuid_level;
2304 } else {
2305 if (index > env->cpuid_level)
2306 index = env->cpuid_level;
2307 }
2308
2309 switch(index) {
2310 case 0:
2311 EAX = env->cpuid_level;
2312 EBX = env->cpuid_vendor1;
2313 EDX = env->cpuid_vendor2;
2314 ECX = env->cpuid_vendor3;
2315 break;
2316 case 1:
2317 EAX = env->cpuid_version;
2318 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2319 ECX = env->cpuid_ext_features;
2320 EDX = env->cpuid_features;
2321 break;
2322 case 2:
2323 /* cache info: needed for Pentium Pro compatibility */
2324 EAX = 1;
2325 EBX = 0;
2326 ECX = 0;
2327 EDX = 0x2c307d;
2328 break;
2329 case 4:
2330 /* cache info: needed for Core compatibility */
2331 switch (ECX) {
2332 case 0: /* L1 dcache info */
2333 EAX = 0x0000121;
2334 EBX = 0x1c0003f;
2335 ECX = 0x000003f;
2336 EDX = 0x0000001;
2337 break;
2338 case 1: /* L1 icache info */
2339 EAX = 0x0000122;
2340 EBX = 0x1c0003f;
2341 ECX = 0x000003f;
2342 EDX = 0x0000001;
2343 break;
2344 case 2: /* L2 cache info */
2345 EAX = 0x0000143;
2346 EBX = 0x3c0003f;
2347 ECX = 0x0000fff;
2348 EDX = 0x0000001;
2349 break;
2350 default: /* end of info */
2351 EAX = 0;
2352 EBX = 0;
2353 ECX = 0;
2354 EDX = 0;
2355 break;
2356 }
2357
2358 break;
2359 case 5:
2360 /* mwait info: needed for Core compatibility */
2361 EAX = 0; /* Smallest monitor-line size in bytes */
2362 EBX = 0; /* Largest monitor-line size in bytes */
2363 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2364 EDX = 0;
2365 break;
2366 case 6:
2367 /* Thermal and Power Leaf */
2368 EAX = 0;
2369 EBX = 0;
2370 ECX = 0;
2371 EDX = 0;
2372 break;
2373 case 9:
2374 /* Direct Cache Access Information Leaf */
2375 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2376 EBX = 0;
2377 ECX = 0;
2378 EDX = 0;
2379 break;
2380 case 0xA:
2381 /* Architectural Performance Monitoring Leaf */
2382 EAX = 0;
2383 EBX = 0;
2384 ECX = 0;
2385 EDX = 0;
2386 break;
2387 case 0x80000000:
2388 EAX = env->cpuid_xlevel;
2389 EBX = env->cpuid_vendor1;
2390 EDX = env->cpuid_vendor2;
2391 ECX = env->cpuid_vendor3;
2392 break;
2393 case 0x80000001:
2394 EAX = env->cpuid_features;
2395 EBX = 0;
2396 ECX = env->cpuid_ext3_features;
2397 EDX = env->cpuid_ext2_features;
2398 break;
2399 case 0x80000002:
2400 case 0x80000003:
2401 case 0x80000004:
2402 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2403 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2404 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2405 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2406 break;
2407 case 0x80000005:
2408 /* cache info (L1 cache) */
2409 EAX = 0x01ff01ff;
2410 EBX = 0x01ff01ff;
2411 ECX = 0x40020140;
2412 EDX = 0x40020140;
2413 break;
2414 case 0x80000006:
2415 /* cache info (L2 cache) */
2416 EAX = 0;
2417 EBX = 0x42004200;
2418 ECX = 0x02008140;
2419 EDX = 0;
2420 break;
2421 case 0x80000008:
2422 /* virtual & phys address size in low 2 bytes. */
2423/* XXX: This value must match the one used in the MMU code. */
2424 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2425 /* 64 bit processor */
2426#if defined(USE_KQEMU)
2427 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2428#else
2429/* XXX: The physical address space is limited to 42 bits in exec.c. */
2430 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2431#endif
2432 } else {
2433#if defined(USE_KQEMU)
2434 EAX = 0x00000020; /* 32 bits physical */
2435#else
2436 if (env->cpuid_features & CPUID_PSE36)
2437 EAX = 0x00000024; /* 36 bits physical */
2438 else
2439 EAX = 0x00000020; /* 32 bits physical */
2440#endif
2441 }
2442 EBX = 0;
2443 ECX = 0;
2444 EDX = 0;
2445 break;
2446 case 0x8000000A:
2447 EAX = 0x00000001;
2448 EBX = 0;
2449 ECX = 0;
2450 EDX = 0;
2451 break;
2452 default:
2453 /* reserved values: zero */
2454 EAX = 0;
2455 EBX = 0;
2456 ECX = 0;
2457 EDX = 0;
2458 break;
2459 }
2460#else /* VBOX */
2461 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2462#endif /* VBOX */
2463}
2464
2465void helper_enter_level(int level, int data32, target_ulong t1)
2466{
2467 target_ulong ssp;
2468 uint32_t esp_mask, esp, ebp;
2469
2470 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2471 ssp = env->segs[R_SS].base;
2472 ebp = EBP;
2473 esp = ESP;
2474 if (data32) {
2475 /* 32 bit */
2476 esp -= 4;
2477 while (--level) {
2478 esp -= 4;
2479 ebp -= 4;
2480 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2481 }
2482 esp -= 4;
2483 stl(ssp + (esp & esp_mask), t1);
2484 } else {
2485 /* 16 bit */
2486 esp -= 2;
2487 while (--level) {
2488 esp -= 2;
2489 ebp -= 2;
2490 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2491 }
2492 esp -= 2;
2493 stw(ssp + (esp & esp_mask), t1);
2494 }
2495}
2496
2497#ifdef TARGET_X86_64
2498void helper_enter64_level(int level, int data64, target_ulong t1)
2499{
2500 target_ulong esp, ebp;
2501 ebp = EBP;
2502 esp = ESP;
2503
2504 if (data64) {
2505 /* 64 bit */
2506 esp -= 8;
2507 while (--level) {
2508 esp -= 8;
2509 ebp -= 8;
2510 stq(esp, ldq(ebp));
2511 }
2512 esp -= 8;
2513 stq(esp, t1);
2514 } else {
2515 /* 16 bit */
2516 esp -= 2;
2517 while (--level) {
2518 esp -= 2;
2519 ebp -= 2;
2520 stw(esp, lduw(ebp));
2521 }
2522 esp -= 2;
2523 stw(esp, t1);
2524 }
2525}
2526#endif
2527
2528void helper_lldt(int selector)
2529{
2530 SegmentCache *dt;
2531 uint32_t e1, e2;
2532#ifndef VBOX
2533 int index, entry_limit;
2534#else
2535 unsigned int index, entry_limit;
2536#endif
2537 target_ulong ptr;
2538
2539#ifdef VBOX
2540 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2541 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2542#endif
2543
2544 selector &= 0xffff;
2545 if ((selector & 0xfffc) == 0) {
2546 /* XXX: NULL selector case: invalid LDT */
2547 env->ldt.base = 0;
2548 env->ldt.limit = 0;
2549 } else {
2550 if (selector & 0x4)
2551 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2552 dt = &env->gdt;
2553 index = selector & ~7;
2554#ifdef TARGET_X86_64
2555 if (env->hflags & HF_LMA_MASK)
2556 entry_limit = 15;
2557 else
2558#endif
2559 entry_limit = 7;
2560 if ((index + entry_limit) > dt->limit)
2561 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2562 ptr = dt->base + index;
2563 e1 = ldl_kernel(ptr);
2564 e2 = ldl_kernel(ptr + 4);
2565 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2566 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2567 if (!(e2 & DESC_P_MASK))
2568 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2569#ifdef TARGET_X86_64
2570 if (env->hflags & HF_LMA_MASK) {
2571 uint32_t e3;
2572 e3 = ldl_kernel(ptr + 8);
2573 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2574 env->ldt.base |= (target_ulong)e3 << 32;
2575 } else
2576#endif
2577 {
2578 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2579 }
2580 }
2581 env->ldt.selector = selector;
2582#ifdef VBOX
2583 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2584 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2585#endif
2586}
2587
2588void helper_ltr(int selector)
2589{
2590 SegmentCache *dt;
2591 uint32_t e1, e2;
2592#ifndef VBOX
2593 int index, type, entry_limit;
2594#else
2595 unsigned int index;
2596 int type, entry_limit;
2597#endif
2598 target_ulong ptr;
2599
2600#ifdef VBOX
2601 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2602 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2603 env->tr.flags, (RTSEL)(selector & 0xffff)));
2604#endif
2605 selector &= 0xffff;
2606 if ((selector & 0xfffc) == 0) {
2607 /* NULL selector case: invalid TR */
2608 env->tr.base = 0;
2609 env->tr.limit = 0;
2610 env->tr.flags = 0;
2611 } else {
2612 if (selector & 0x4)
2613 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2614 dt = &env->gdt;
2615 index = selector & ~7;
2616#ifdef TARGET_X86_64
2617 if (env->hflags & HF_LMA_MASK)
2618 entry_limit = 15;
2619 else
2620#endif
2621 entry_limit = 7;
2622 if ((index + entry_limit) > dt->limit)
2623 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2624 ptr = dt->base + index;
2625 e1 = ldl_kernel(ptr);
2626 e2 = ldl_kernel(ptr + 4);
2627 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2628 if ((e2 & DESC_S_MASK) ||
2629 (type != 1 && type != 9))
2630 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2631 if (!(e2 & DESC_P_MASK))
2632 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2633#ifdef TARGET_X86_64
2634 if (env->hflags & HF_LMA_MASK) {
2635 uint32_t e3, e4;
2636 e3 = ldl_kernel(ptr + 8);
2637 e4 = ldl_kernel(ptr + 12);
2638 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2639 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2640 load_seg_cache_raw_dt(&env->tr, e1, e2);
2641 env->tr.base |= (target_ulong)e3 << 32;
2642 } else
2643#endif
2644 {
2645 load_seg_cache_raw_dt(&env->tr, e1, e2);
2646 }
2647 e2 |= DESC_TSS_BUSY_MASK;
2648 stl_kernel(ptr + 4, e2);
2649 }
2650 env->tr.selector = selector;
2651#ifdef VBOX
2652 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2653 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2654 env->tr.flags, (RTSEL)(selector & 0xffff)));
2655#endif
2656}
2657
2658/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2659void helper_load_seg(int seg_reg, int selector)
2660{
2661 uint32_t e1, e2;
2662 int cpl, dpl, rpl;
2663 SegmentCache *dt;
2664#ifndef VBOX
2665 int index;
2666#else
2667 unsigned int index;
2668#endif
2669 target_ulong ptr;
2670
2671 selector &= 0xffff;
2672 cpl = env->hflags & HF_CPL_MASK;
2673
2674#ifdef VBOX
2675 /* Trying to load a selector with CPL=1? */
2676 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2677 {
2678 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2679 selector = selector & 0xfffc;
2680 }
2681#endif
2682 if ((selector & 0xfffc) == 0) {
2683 /* null selector case */
2684 if (seg_reg == R_SS
2685#ifdef TARGET_X86_64
2686 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2687#endif
2688 )
2689 raise_exception_err(EXCP0D_GPF, 0);
2690 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2691 } else {
2692
2693 if (selector & 0x4)
2694 dt = &env->ldt;
2695 else
2696 dt = &env->gdt;
2697 index = selector & ~7;
2698 if ((index + 7) > dt->limit)
2699 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2700 ptr = dt->base + index;
2701 e1 = ldl_kernel(ptr);
2702 e2 = ldl_kernel(ptr + 4);
2703
2704 if (!(e2 & DESC_S_MASK))
2705 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2706 rpl = selector & 3;
2707 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2708 if (seg_reg == R_SS) {
2709 /* must be writable segment */
2710 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2711 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2712 if (rpl != cpl || dpl != cpl)
2713 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2714 } else {
2715 /* must be readable segment */
2716 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2717 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2718
2719 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2720 /* if not conforming code, test rights */
2721 if (dpl < cpl || dpl < rpl)
2722 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2723 }
2724 }
2725
2726 if (!(e2 & DESC_P_MASK)) {
2727 if (seg_reg == R_SS)
2728 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2729 else
2730 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2731 }
2732
2733 /* set the access bit if not already set */
2734 if (!(e2 & DESC_A_MASK)) {
2735 e2 |= DESC_A_MASK;
2736 stl_kernel(ptr + 4, e2);
2737 }
2738
2739 cpu_x86_load_seg_cache(env, seg_reg, selector,
2740 get_seg_base(e1, e2),
2741 get_seg_limit(e1, e2),
2742 e2);
2743#if 0
2744 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2745 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2746#endif
2747 }
2748}
2749
2750/* protected mode jump */
2751void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2752 int next_eip_addend)
2753{
2754 int gate_cs, type;
2755 uint32_t e1, e2, cpl, dpl, rpl, limit;
2756 target_ulong next_eip;
2757
2758#ifdef VBOX
2759 e1 = e2 = 0;
2760#endif
2761 if ((new_cs & 0xfffc) == 0)
2762 raise_exception_err(EXCP0D_GPF, 0);
2763 if (load_segment(&e1, &e2, new_cs) != 0)
2764 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2765 cpl = env->hflags & HF_CPL_MASK;
2766 if (e2 & DESC_S_MASK) {
2767 if (!(e2 & DESC_CS_MASK))
2768 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2769 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2770 if (e2 & DESC_C_MASK) {
2771 /* conforming code segment */
2772 if (dpl > cpl)
2773 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2774 } else {
2775 /* non conforming code segment */
2776 rpl = new_cs & 3;
2777 if (rpl > cpl)
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779 if (dpl != cpl)
2780 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2781 }
2782 if (!(e2 & DESC_P_MASK))
2783 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2784 limit = get_seg_limit(e1, e2);
2785 if (new_eip > limit &&
2786 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2787 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2788 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2789 get_seg_base(e1, e2), limit, e2);
2790 EIP = new_eip;
2791 } else {
2792 /* jump to call or task gate */
2793 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2794 rpl = new_cs & 3;
2795 cpl = env->hflags & HF_CPL_MASK;
2796 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2797 switch(type) {
2798 case 1: /* 286 TSS */
2799 case 9: /* 386 TSS */
2800 case 5: /* task gate */
2801 if (dpl < cpl || dpl < rpl)
2802 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2803 next_eip = env->eip + next_eip_addend;
2804 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2805 CC_OP = CC_OP_EFLAGS;
2806 break;
2807 case 4: /* 286 call gate */
2808 case 12: /* 386 call gate */
2809 if ((dpl < cpl) || (dpl < rpl))
2810 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2811 if (!(e2 & DESC_P_MASK))
2812 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2813 gate_cs = e1 >> 16;
2814 new_eip = (e1 & 0xffff);
2815 if (type == 12)
2816 new_eip |= (e2 & 0xffff0000);
2817 if (load_segment(&e1, &e2, gate_cs) != 0)
2818 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2819 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2820 /* must be code segment */
2821 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2822 (DESC_S_MASK | DESC_CS_MASK)))
2823 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2824 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2825 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2826 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2827 if (!(e2 & DESC_P_MASK))
2828#ifdef VBOX /* See page 3-514 of 253666.pdf */
2829 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2830#else
2831 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2832#endif
2833 limit = get_seg_limit(e1, e2);
2834 if (new_eip > limit)
2835 raise_exception_err(EXCP0D_GPF, 0);
2836 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2837 get_seg_base(e1, e2), limit, e2);
2838 EIP = new_eip;
2839 break;
2840 default:
2841 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2842 break;
2843 }
2844 }
2845}
2846
2847/* real mode call */
2848void helper_lcall_real(int new_cs, target_ulong new_eip1,
2849 int shift, int next_eip)
2850{
2851 int new_eip;
2852 uint32_t esp, esp_mask;
2853 target_ulong ssp;
2854
2855 new_eip = new_eip1;
2856 esp = ESP;
2857 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2858 ssp = env->segs[R_SS].base;
2859 if (shift) {
2860 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2861 PUSHL(ssp, esp, esp_mask, next_eip);
2862 } else {
2863 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2864 PUSHW(ssp, esp, esp_mask, next_eip);
2865 }
2866
2867 SET_ESP(esp, esp_mask);
2868 env->eip = new_eip;
2869 env->segs[R_CS].selector = new_cs;
2870 env->segs[R_CS].base = (new_cs << 4);
2871}
2872
2873/* protected mode call */
2874void helper_lcall_protected(int new_cs, target_ulong new_eip,
2875 int shift, int next_eip_addend)
2876{
2877 int new_stack, i;
2878 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2879 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2880 uint32_t val, limit, old_sp_mask;
2881 target_ulong ssp, old_ssp, next_eip;
2882
2883#ifdef VBOX
2884 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2885#endif
2886 next_eip = env->eip + next_eip_addend;
2887#ifdef DEBUG_PCALL
2888 if (loglevel & CPU_LOG_PCALL) {
2889 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2890 new_cs, (uint32_t)new_eip, shift);
2891 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2892 }
2893#endif
2894 if ((new_cs & 0xfffc) == 0)
2895 raise_exception_err(EXCP0D_GPF, 0);
2896 if (load_segment(&e1, &e2, new_cs) != 0)
2897 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2898 cpl = env->hflags & HF_CPL_MASK;
2899#ifdef DEBUG_PCALL
2900 if (loglevel & CPU_LOG_PCALL) {
2901 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2902 }
2903#endif
2904 if (e2 & DESC_S_MASK) {
2905 if (!(e2 & DESC_CS_MASK))
2906 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2907 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2908 if (e2 & DESC_C_MASK) {
2909 /* conforming code segment */
2910 if (dpl > cpl)
2911 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2912 } else {
2913 /* non conforming code segment */
2914 rpl = new_cs & 3;
2915 if (rpl > cpl)
2916 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2917 if (dpl != cpl)
2918 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2919 }
2920 if (!(e2 & DESC_P_MASK))
2921 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2922
2923#ifdef TARGET_X86_64
2924 /* XXX: check 16/32 bit cases in long mode */
2925 if (shift == 2) {
2926 target_ulong rsp;
2927 /* 64 bit case */
2928 rsp = ESP;
2929 PUSHQ(rsp, env->segs[R_CS].selector);
2930 PUSHQ(rsp, next_eip);
2931 /* from this point, not restartable */
2932 ESP = rsp;
2933 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2934 get_seg_base(e1, e2),
2935 get_seg_limit(e1, e2), e2);
2936 EIP = new_eip;
2937 } else
2938#endif
2939 {
2940 sp = ESP;
2941 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2942 ssp = env->segs[R_SS].base;
2943 if (shift) {
2944 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2945 PUSHL(ssp, sp, sp_mask, next_eip);
2946 } else {
2947 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2948 PUSHW(ssp, sp, sp_mask, next_eip);
2949 }
2950
2951 limit = get_seg_limit(e1, e2);
2952 if (new_eip > limit)
2953 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2954 /* from this point, not restartable */
2955 SET_ESP(sp, sp_mask);
2956 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2957 get_seg_base(e1, e2), limit, e2);
2958 EIP = new_eip;
2959 }
2960 } else {
2961 /* check gate type */
2962 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2963 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2964 rpl = new_cs & 3;
2965 switch(type) {
2966 case 1: /* available 286 TSS */
2967 case 9: /* available 386 TSS */
2968 case 5: /* task gate */
2969 if (dpl < cpl || dpl < rpl)
2970 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2971 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2972 CC_OP = CC_OP_EFLAGS;
2973 return;
2974 case 4: /* 286 call gate */
2975 case 12: /* 386 call gate */
2976 break;
2977 default:
2978 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2979 break;
2980 }
2981 shift = type >> 3;
2982
2983 if (dpl < cpl || dpl < rpl)
2984 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2985 /* check valid bit */
2986 if (!(e2 & DESC_P_MASK))
2987 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2988 selector = e1 >> 16;
2989 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2990 param_count = e2 & 0x1f;
2991 if ((selector & 0xfffc) == 0)
2992 raise_exception_err(EXCP0D_GPF, 0);
2993
2994 if (load_segment(&e1, &e2, selector) != 0)
2995 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2996 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2997 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2998 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2999 if (dpl > cpl)
3000 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3001 if (!(e2 & DESC_P_MASK))
3002 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3003
3004 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3005 /* to inner privilege */
3006 get_ss_esp_from_tss(&ss, &sp, dpl);
3007#ifdef DEBUG_PCALL
3008 if (loglevel & CPU_LOG_PCALL)
3009 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3010 ss, sp, param_count, ESP);
3011#endif
3012 if ((ss & 0xfffc) == 0)
3013 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3014 if ((ss & 3) != dpl)
3015 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3016 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3017 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3018 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3019 if (ss_dpl != dpl)
3020 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3021 if (!(ss_e2 & DESC_S_MASK) ||
3022 (ss_e2 & DESC_CS_MASK) ||
3023 !(ss_e2 & DESC_W_MASK))
3024 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3025 if (!(ss_e2 & DESC_P_MASK))
3026#ifdef VBOX /* See page 3-99 of 253666.pdf */
3027 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3028#else
3029 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3030#endif
3031
3032 // push_size = ((param_count * 2) + 8) << shift;
3033
3034 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3035 old_ssp = env->segs[R_SS].base;
3036
3037 sp_mask = get_sp_mask(ss_e2);
3038 ssp = get_seg_base(ss_e1, ss_e2);
3039 if (shift) {
3040 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3041 PUSHL(ssp, sp, sp_mask, ESP);
3042 for(i = param_count - 1; i >= 0; i--) {
3043 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3044 PUSHL(ssp, sp, sp_mask, val);
3045 }
3046 } else {
3047 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3048 PUSHW(ssp, sp, sp_mask, ESP);
3049 for(i = param_count - 1; i >= 0; i--) {
3050 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3051 PUSHW(ssp, sp, sp_mask, val);
3052 }
3053 }
3054 new_stack = 1;
3055 } else {
3056 /* to same privilege */
3057 sp = ESP;
3058 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3059 ssp = env->segs[R_SS].base;
3060 // push_size = (4 << shift);
3061 new_stack = 0;
3062 }
3063
3064 if (shift) {
3065 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3066 PUSHL(ssp, sp, sp_mask, next_eip);
3067 } else {
3068 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3069 PUSHW(ssp, sp, sp_mask, next_eip);
3070 }
3071
3072 /* from this point, not restartable */
3073
3074 if (new_stack) {
3075 ss = (ss & ~3) | dpl;
3076 cpu_x86_load_seg_cache(env, R_SS, ss,
3077 ssp,
3078 get_seg_limit(ss_e1, ss_e2),
3079 ss_e2);
3080 }
3081
3082 selector = (selector & ~3) | dpl;
3083 cpu_x86_load_seg_cache(env, R_CS, selector,
3084 get_seg_base(e1, e2),
3085 get_seg_limit(e1, e2),
3086 e2);
3087 cpu_x86_set_cpl(env, dpl);
3088 SET_ESP(sp, sp_mask);
3089 EIP = offset;
3090 }
3091#ifdef USE_KQEMU
3092 if (kqemu_is_ok(env)) {
3093 env->exception_index = -1;
3094 cpu_loop_exit();
3095 }
3096#endif
3097}
3098
3099/* real and vm86 mode iret */
3100void helper_iret_real(int shift)
3101{
3102 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3103 target_ulong ssp;
3104 int eflags_mask;
3105#ifdef VBOX
3106 bool fVME = false;
3107
3108 remR3TrapClear(env->pVM);
3109#endif /* VBOX */
3110
3111 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3112 sp = ESP;
3113 ssp = env->segs[R_SS].base;
3114 if (shift == 1) {
3115 /* 32 bits */
3116 POPL(ssp, sp, sp_mask, new_eip);
3117 POPL(ssp, sp, sp_mask, new_cs);
3118 new_cs &= 0xffff;
3119 POPL(ssp, sp, sp_mask, new_eflags);
3120 } else {
3121 /* 16 bits */
3122 POPW(ssp, sp, sp_mask, new_eip);
3123 POPW(ssp, sp, sp_mask, new_cs);
3124 POPW(ssp, sp, sp_mask, new_eflags);
3125 }
3126#ifdef VBOX
3127 if ( (env->eflags & VM_MASK)
3128 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3129 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3130 {
3131 fVME = true;
3132 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3133 /* if TF will be set -> #GP */
3134 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3135 || (new_eflags & TF_MASK))
3136 raise_exception(EXCP0D_GPF);
3137 }
3138#endif /* VBOX */
3139 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3140 env->segs[R_CS].selector = new_cs;
3141 env->segs[R_CS].base = (new_cs << 4);
3142 env->eip = new_eip;
3143#ifdef VBOX
3144 if (fVME)
3145 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3146 else
3147#endif
3148 if (env->eflags & VM_MASK)
3149 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3150 else
3151 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3152 if (shift == 0)
3153 eflags_mask &= 0xffff;
3154 load_eflags(new_eflags, eflags_mask);
3155 env->hflags2 &= ~HF2_NMI_MASK;
3156#ifdef VBOX
3157 if (fVME)
3158 {
3159 if (new_eflags & IF_MASK)
3160 env->eflags |= VIF_MASK;
3161 else
3162 env->eflags &= ~VIF_MASK;
3163 }
3164#endif /* VBOX */
3165}
3166
3167#ifndef VBOX
3168static inline void validate_seg(int seg_reg, int cpl)
3169#else /* VBOX */
3170DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3171#endif /* VBOX */
3172{
3173 int dpl;
3174 uint32_t e2;
3175
3176 /* XXX: on x86_64, we do not want to nullify FS and GS because
3177 they may still contain a valid base. I would be interested to
3178 know how a real x86_64 CPU behaves */
3179 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3180 (env->segs[seg_reg].selector & 0xfffc) == 0)
3181 return;
3182
3183 e2 = env->segs[seg_reg].flags;
3184 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3185 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3186 /* data or non conforming code segment */
3187 if (dpl < cpl) {
3188 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3189 }
3190 }
3191}
3192
3193/* protected mode iret */
3194#ifndef VBOX
3195static inline void helper_ret_protected(int shift, int is_iret, int addend)
3196#else /* VBOX */
3197DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3198#endif /* VBOX */
3199{
3200 uint32_t new_cs, new_eflags, new_ss;
3201 uint32_t new_es, new_ds, new_fs, new_gs;
3202 uint32_t e1, e2, ss_e1, ss_e2;
3203 int cpl, dpl, rpl, eflags_mask, iopl;
3204 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3205
3206#ifdef VBOX
3207 ss_e1 = ss_e2 = e1 = e2 = 0;
3208#endif
3209
3210#ifdef TARGET_X86_64
3211 if (shift == 2)
3212 sp_mask = -1;
3213 else
3214#endif
3215 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3216 sp = ESP;
3217 ssp = env->segs[R_SS].base;
3218 new_eflags = 0; /* avoid warning */
3219#ifdef TARGET_X86_64
3220 if (shift == 2) {
3221 POPQ(sp, new_eip);
3222 POPQ(sp, new_cs);
3223 new_cs &= 0xffff;
3224 if (is_iret) {
3225 POPQ(sp, new_eflags);
3226 }
3227 } else
3228#endif
3229 if (shift == 1) {
3230 /* 32 bits */
3231 POPL(ssp, sp, sp_mask, new_eip);
3232 POPL(ssp, sp, sp_mask, new_cs);
3233 new_cs &= 0xffff;
3234 if (is_iret) {
3235 POPL(ssp, sp, sp_mask, new_eflags);
3236#if defined(VBOX) && defined(DEBUG)
3237 printf("iret: new CS %04X\n", new_cs);
3238 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3239 printf("iret: new EFLAGS %08X\n", new_eflags);
3240 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3241#endif
3242 if (new_eflags & VM_MASK)
3243 goto return_to_vm86;
3244 }
3245#ifdef VBOX
3246 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3247 {
3248#ifdef DEBUG
3249 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3250#endif
3251 new_cs = new_cs & 0xfffc;
3252 }
3253#endif
3254 } else {
3255 /* 16 bits */
3256 POPW(ssp, sp, sp_mask, new_eip);
3257 POPW(ssp, sp, sp_mask, new_cs);
3258 if (is_iret)
3259 POPW(ssp, sp, sp_mask, new_eflags);
3260 }
3261#ifdef DEBUG_PCALL
3262 if (loglevel & CPU_LOG_PCALL) {
3263 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3264 new_cs, new_eip, shift, addend);
3265 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3266 }
3267#endif
3268 if ((new_cs & 0xfffc) == 0)
3269 {
3270#if defined(VBOX) && defined(DEBUG)
3271 printf("new_cs & 0xfffc) == 0\n");
3272#endif
3273 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3274 }
3275 if (load_segment(&e1, &e2, new_cs) != 0)
3276 {
3277#if defined(VBOX) && defined(DEBUG)
3278 printf("load_segment failed\n");
3279#endif
3280 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3281 }
3282 if (!(e2 & DESC_S_MASK) ||
3283 !(e2 & DESC_CS_MASK))
3284 {
3285#if defined(VBOX) && defined(DEBUG)
3286 printf("e2 mask %08x\n", e2);
3287#endif
3288 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3289 }
3290 cpl = env->hflags & HF_CPL_MASK;
3291 rpl = new_cs & 3;
3292 if (rpl < cpl)
3293 {
3294#if defined(VBOX) && defined(DEBUG)
3295 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3296#endif
3297 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3298 }
3299 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3300 if (e2 & DESC_C_MASK) {
3301 if (dpl > rpl)
3302 {
3303#if defined(VBOX) && defined(DEBUG)
3304 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3305#endif
3306 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3307 }
3308 } else {
3309 if (dpl != rpl)
3310 {
3311#if defined(VBOX) && defined(DEBUG)
3312 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3313#endif
3314 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3315 }
3316 }
3317 if (!(e2 & DESC_P_MASK))
3318 {
3319#if defined(VBOX) && defined(DEBUG)
3320 printf("DESC_P_MASK e2=%08x\n", e2);
3321#endif
3322 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3323 }
3324
3325 sp += addend;
3326 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3327 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3328 /* return to same privilege level */
3329 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3330 get_seg_base(e1, e2),
3331 get_seg_limit(e1, e2),
3332 e2);
3333 } else {
3334 /* return to different privilege level */
3335#ifdef TARGET_X86_64
3336 if (shift == 2) {
3337 POPQ(sp, new_esp);
3338 POPQ(sp, new_ss);
3339 new_ss &= 0xffff;
3340 } else
3341#endif
3342 if (shift == 1) {
3343 /* 32 bits */
3344 POPL(ssp, sp, sp_mask, new_esp);
3345 POPL(ssp, sp, sp_mask, new_ss);
3346 new_ss &= 0xffff;
3347 } else {
3348 /* 16 bits */
3349 POPW(ssp, sp, sp_mask, new_esp);
3350 POPW(ssp, sp, sp_mask, new_ss);
3351 }
3352#ifdef DEBUG_PCALL
3353 if (loglevel & CPU_LOG_PCALL) {
3354 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3355 new_ss, new_esp);
3356 }
3357#endif
3358 if ((new_ss & 0xfffc) == 0) {
3359#ifdef TARGET_X86_64
3360 /* NULL ss is allowed in long mode if cpl != 3*/
3361 /* XXX: test CS64 ? */
3362 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3363 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3364 0, 0xffffffff,
3365 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3366 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3367 DESC_W_MASK | DESC_A_MASK);
3368 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3369 } else
3370#endif
3371 {
3372 raise_exception_err(EXCP0D_GPF, 0);
3373 }
3374 } else {
3375 if ((new_ss & 3) != rpl)
3376 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3377 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3378 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3379 if (!(ss_e2 & DESC_S_MASK) ||
3380 (ss_e2 & DESC_CS_MASK) ||
3381 !(ss_e2 & DESC_W_MASK))
3382 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3383 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3384 if (dpl != rpl)
3385 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3386 if (!(ss_e2 & DESC_P_MASK))
3387 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3388 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3389 get_seg_base(ss_e1, ss_e2),
3390 get_seg_limit(ss_e1, ss_e2),
3391 ss_e2);
3392 }
3393
3394 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3395 get_seg_base(e1, e2),
3396 get_seg_limit(e1, e2),
3397 e2);
3398 cpu_x86_set_cpl(env, rpl);
3399 sp = new_esp;
3400#ifdef TARGET_X86_64
3401 if (env->hflags & HF_CS64_MASK)
3402 sp_mask = -1;
3403 else
3404#endif
3405 sp_mask = get_sp_mask(ss_e2);
3406
3407 /* validate data segments */
3408 validate_seg(R_ES, rpl);
3409 validate_seg(R_DS, rpl);
3410 validate_seg(R_FS, rpl);
3411 validate_seg(R_GS, rpl);
3412
3413 sp += addend;
3414 }
3415 SET_ESP(sp, sp_mask);
3416 env->eip = new_eip;
3417 if (is_iret) {
3418 /* NOTE: 'cpl' is the _old_ CPL */
3419 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3420 if (cpl == 0)
3421#ifdef VBOX
3422 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3423#else
3424 eflags_mask |= IOPL_MASK;
3425#endif
3426 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3427 if (cpl <= iopl)
3428 eflags_mask |= IF_MASK;
3429 if (shift == 0)
3430 eflags_mask &= 0xffff;
3431 load_eflags(new_eflags, eflags_mask);
3432 }
3433 return;
3434
3435 return_to_vm86:
3436 POPL(ssp, sp, sp_mask, new_esp);
3437 POPL(ssp, sp, sp_mask, new_ss);
3438 POPL(ssp, sp, sp_mask, new_es);
3439 POPL(ssp, sp, sp_mask, new_ds);
3440 POPL(ssp, sp, sp_mask, new_fs);
3441 POPL(ssp, sp, sp_mask, new_gs);
3442
3443 /* modify processor state */
3444 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3445 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3446 load_seg_vm(R_CS, new_cs & 0xffff);
3447 cpu_x86_set_cpl(env, 3);
3448 load_seg_vm(R_SS, new_ss & 0xffff);
3449 load_seg_vm(R_ES, new_es & 0xffff);
3450 load_seg_vm(R_DS, new_ds & 0xffff);
3451 load_seg_vm(R_FS, new_fs & 0xffff);
3452 load_seg_vm(R_GS, new_gs & 0xffff);
3453
3454 env->eip = new_eip & 0xffff;
3455 ESP = new_esp;
3456}
3457
3458void helper_iret_protected(int shift, int next_eip)
3459{
3460 int tss_selector, type;
3461 uint32_t e1, e2;
3462
3463#ifdef VBOX
3464 e1 = e2 = 0;
3465 remR3TrapClear(env->pVM);
3466#endif
3467
3468 /* specific case for TSS */
3469 if (env->eflags & NT_MASK) {
3470#ifdef TARGET_X86_64
3471 if (env->hflags & HF_LMA_MASK)
3472 raise_exception_err(EXCP0D_GPF, 0);
3473#endif
3474 tss_selector = lduw_kernel(env->tr.base + 0);
3475 if (tss_selector & 4)
3476 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3477 if (load_segment(&e1, &e2, tss_selector) != 0)
3478 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3479 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3480 /* NOTE: we check both segment and busy TSS */
3481 if (type != 3)
3482 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3483 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3484 } else {
3485 helper_ret_protected(shift, 1, 0);
3486 }
3487 env->hflags2 &= ~HF2_NMI_MASK;
3488#ifdef USE_KQEMU
3489 if (kqemu_is_ok(env)) {
3490 CC_OP = CC_OP_EFLAGS;
3491 env->exception_index = -1;
3492 cpu_loop_exit();
3493 }
3494#endif
3495}
3496
3497void helper_lret_protected(int shift, int addend)
3498{
3499 helper_ret_protected(shift, 0, addend);
3500#ifdef USE_KQEMU
3501 if (kqemu_is_ok(env)) {
3502 env->exception_index = -1;
3503 cpu_loop_exit();
3504 }
3505#endif
3506}
3507
3508void helper_sysenter(void)
3509{
3510 if (env->sysenter_cs == 0) {
3511 raise_exception_err(EXCP0D_GPF, 0);
3512 }
3513 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3514 cpu_x86_set_cpl(env, 0);
3515
3516#ifdef TARGET_X86_64
3517 if (env->hflags & HF_LMA_MASK) {
3518 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3519 0, 0xffffffff,
3520 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3521 DESC_S_MASK |
3522 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3523 } else
3524#endif
3525 {
3526 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3527 0, 0xffffffff,
3528 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3529 DESC_S_MASK |
3530 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3531 }
3532 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3533 0, 0xffffffff,
3534 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3535 DESC_S_MASK |
3536 DESC_W_MASK | DESC_A_MASK);
3537 ESP = env->sysenter_esp;
3538 EIP = env->sysenter_eip;
3539}
3540
3541void helper_sysexit(int dflag)
3542{
3543 int cpl;
3544
3545 cpl = env->hflags & HF_CPL_MASK;
3546 if (env->sysenter_cs == 0 || cpl != 0) {
3547 raise_exception_err(EXCP0D_GPF, 0);
3548 }
3549 cpu_x86_set_cpl(env, 3);
3550#ifdef TARGET_X86_64
3551 if (dflag == 2) {
3552 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3553 0, 0xffffffff,
3554 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3555 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3556 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3557 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3558 0, 0xffffffff,
3559 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3560 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3561 DESC_W_MASK | DESC_A_MASK);
3562 } else
3563#endif
3564 {
3565 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3566 0, 0xffffffff,
3567 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3568 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3569 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3570 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3571 0, 0xffffffff,
3572 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3573 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3574 DESC_W_MASK | DESC_A_MASK);
3575 }
3576 ESP = ECX;
3577 EIP = EDX;
3578#ifdef USE_KQEMU
3579 if (kqemu_is_ok(env)) {
3580 env->exception_index = -1;
3581 cpu_loop_exit();
3582 }
3583#endif
3584}
3585
3586#if defined(CONFIG_USER_ONLY)
3587target_ulong helper_read_crN(int reg)
3588{
3589 return 0;
3590}
3591
3592void helper_write_crN(int reg, target_ulong t0)
3593{
3594}
3595#else
3596target_ulong helper_read_crN(int reg)
3597{
3598 target_ulong val;
3599
3600 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3601 switch(reg) {
3602 default:
3603 val = env->cr[reg];
3604 break;
3605 case 8:
3606 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3607 val = cpu_get_apic_tpr(env);
3608 } else {
3609 val = env->v_tpr;
3610 }
3611 break;
3612 }
3613 return val;
3614}
3615
3616void helper_write_crN(int reg, target_ulong t0)
3617{
3618 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3619 switch(reg) {
3620 case 0:
3621 cpu_x86_update_cr0(env, t0);
3622 break;
3623 case 3:
3624 cpu_x86_update_cr3(env, t0);
3625 break;
3626 case 4:
3627 cpu_x86_update_cr4(env, t0);
3628 break;
3629 case 8:
3630 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3631 cpu_set_apic_tpr(env, t0);
3632 }
3633 env->v_tpr = t0 & 0x0f;
3634 break;
3635 default:
3636 env->cr[reg] = t0;
3637 break;
3638 }
3639}
3640#endif
3641
3642void helper_lmsw(target_ulong t0)
3643{
3644 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3645 if already set to one. */
3646 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3647 helper_write_crN(0, t0);
3648}
3649
3650void helper_clts(void)
3651{
3652 env->cr[0] &= ~CR0_TS_MASK;
3653 env->hflags &= ~HF_TS_MASK;
3654}
3655
3656/* XXX: do more */
3657void helper_movl_drN_T0(int reg, target_ulong t0)
3658{
3659 env->dr[reg] = t0;
3660}
3661
3662void helper_invlpg(target_ulong addr)
3663{
3664 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3665 tlb_flush_page(env, addr);
3666}
3667
3668void helper_rdtsc(void)
3669{
3670 uint64_t val;
3671
3672 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3673 raise_exception(EXCP0D_GPF);
3674 }
3675 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3676
3677 val = cpu_get_tsc(env) + env->tsc_offset;
3678 EAX = (uint32_t)(val);
3679 EDX = (uint32_t)(val >> 32);
3680}
3681
3682#ifdef VBOX
3683void helper_rdtscp(void)
3684{
3685 uint64_t val;
3686 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3687 raise_exception(EXCP0D_GPF);
3688 }
3689
3690 val = cpu_get_tsc(env);
3691 EAX = (uint32_t)(val);
3692 EDX = (uint32_t)(val >> 32);
3693 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3694 ECX = (uint32_t)(val);
3695 else
3696 ECX = 0;
3697}
3698#endif
3699
3700void helper_rdpmc(void)
3701{
3702#ifdef VBOX
3703 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3704 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3705 raise_exception(EXCP0D_GPF);
3706 }
3707 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3708 EAX = 0;
3709 EDX = 0;
3710#else
3711 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3712 raise_exception(EXCP0D_GPF);
3713 }
3714 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3715
3716 /* currently unimplemented */
3717 raise_exception_err(EXCP06_ILLOP, 0);
3718#endif
3719}
3720
3721#if defined(CONFIG_USER_ONLY)
3722void helper_wrmsr(void)
3723{
3724}
3725
3726void helper_rdmsr(void)
3727{
3728}
3729#else
3730void helper_wrmsr(void)
3731{
3732 uint64_t val;
3733
3734 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3735
3736 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3737
3738 switch((uint32_t)ECX) {
3739 case MSR_IA32_SYSENTER_CS:
3740 env->sysenter_cs = val & 0xffff;
3741 break;
3742 case MSR_IA32_SYSENTER_ESP:
3743 env->sysenter_esp = val;
3744 break;
3745 case MSR_IA32_SYSENTER_EIP:
3746 env->sysenter_eip = val;
3747 break;
3748 case MSR_IA32_APICBASE:
3749#ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3750 cpu_set_apic_base(env, val);
3751#endif
3752 break;
3753 case MSR_EFER:
3754 {
3755 uint64_t update_mask;
3756 update_mask = 0;
3757 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3758 update_mask |= MSR_EFER_SCE;
3759 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3760 update_mask |= MSR_EFER_LME;
3761 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3762 update_mask |= MSR_EFER_FFXSR;
3763 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3764 update_mask |= MSR_EFER_NXE;
3765 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3766 update_mask |= MSR_EFER_SVME;
3767 cpu_load_efer(env, (env->efer & ~update_mask) |
3768 (val & update_mask));
3769 }
3770 break;
3771 case MSR_STAR:
3772 env->star = val;
3773 break;
3774 case MSR_PAT:
3775 env->pat = val;
3776 break;
3777 case MSR_VM_HSAVE_PA:
3778 env->vm_hsave = val;
3779 break;
3780#ifdef TARGET_X86_64
3781 case MSR_LSTAR:
3782 env->lstar = val;
3783 break;
3784 case MSR_CSTAR:
3785 env->cstar = val;
3786 break;
3787 case MSR_FMASK:
3788 env->fmask = val;
3789 break;
3790 case MSR_FSBASE:
3791 env->segs[R_FS].base = val;
3792 break;
3793 case MSR_GSBASE:
3794 env->segs[R_GS].base = val;
3795 break;
3796 case MSR_KERNELGSBASE:
3797 env->kernelgsbase = val;
3798 break;
3799#endif
3800 default:
3801#ifndef VBOX
3802 /* XXX: exception ? */
3803#endif
3804 break;
3805 }
3806
3807#ifdef VBOX
3808 /* call CPUM. */
3809 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3810 {
3811 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3812 }
3813#endif
3814}
3815
3816void helper_rdmsr(void)
3817{
3818 uint64_t val;
3819 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3820
3821 switch((uint32_t)ECX) {
3822 case MSR_IA32_SYSENTER_CS:
3823 val = env->sysenter_cs;
3824 break;
3825 case MSR_IA32_SYSENTER_ESP:
3826 val = env->sysenter_esp;
3827 break;
3828 case MSR_IA32_SYSENTER_EIP:
3829 val = env->sysenter_eip;
3830 break;
3831 case MSR_IA32_APICBASE:
3832 val = cpu_get_apic_base(env);
3833 break;
3834 case MSR_EFER:
3835 val = env->efer;
3836 break;
3837 case MSR_STAR:
3838 val = env->star;
3839 break;
3840 case MSR_PAT:
3841 val = env->pat;
3842 break;
3843 case MSR_VM_HSAVE_PA:
3844 val = env->vm_hsave;
3845 break;
3846#ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3847 case MSR_IA32_PERF_STATUS:
3848 /* tsc_increment_by_tick */
3849 val = 1000ULL;
3850 /* CPU multiplier */
3851 val |= ((uint64_t)4ULL << 40);
3852 break;
3853#endif
3854#ifdef TARGET_X86_64
3855 case MSR_LSTAR:
3856 val = env->lstar;
3857 break;
3858 case MSR_CSTAR:
3859 val = env->cstar;
3860 break;
3861 case MSR_FMASK:
3862 val = env->fmask;
3863 break;
3864 case MSR_FSBASE:
3865 val = env->segs[R_FS].base;
3866 break;
3867 case MSR_GSBASE:
3868 val = env->segs[R_GS].base;
3869 break;
3870 case MSR_KERNELGSBASE:
3871 val = env->kernelgsbase;
3872 break;
3873#endif
3874#ifdef USE_KQEMU
3875 case MSR_QPI_COMMBASE:
3876 if (env->kqemu_enabled) {
3877 val = kqemu_comm_base;
3878 } else {
3879 val = 0;
3880 }
3881 break;
3882#endif
3883 default:
3884#ifndef VBOX
3885 /* XXX: exception ? */
3886 val = 0;
3887#else /* VBOX */
3888 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3889 {
3890 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3891 val = 0;
3892 }
3893#endif
3894 break;
3895 }
3896 EAX = (uint32_t)(val);
3897 EDX = (uint32_t)(val >> 32);
3898
3899#ifdef VBOX_STRICT
3900 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3901 val = 0;
3902 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3903#endif
3904}
3905#endif
3906
3907target_ulong helper_lsl(target_ulong selector1)
3908{
3909 unsigned int limit;
3910 uint32_t e1, e2, eflags, selector;
3911 int rpl, dpl, cpl, type;
3912
3913 selector = selector1 & 0xffff;
3914 eflags = cc_table[CC_OP].compute_all();
3915 if (load_segment(&e1, &e2, selector) != 0)
3916 goto fail;
3917 rpl = selector & 3;
3918 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3919 cpl = env->hflags & HF_CPL_MASK;
3920 if (e2 & DESC_S_MASK) {
3921 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3922 /* conforming */
3923 } else {
3924 if (dpl < cpl || dpl < rpl)
3925 goto fail;
3926 }
3927 } else {
3928 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3929 switch(type) {
3930 case 1:
3931 case 2:
3932 case 3:
3933 case 9:
3934 case 11:
3935 break;
3936 default:
3937 goto fail;
3938 }
3939 if (dpl < cpl || dpl < rpl) {
3940 fail:
3941 CC_SRC = eflags & ~CC_Z;
3942 return 0;
3943 }
3944 }
3945 limit = get_seg_limit(e1, e2);
3946 CC_SRC = eflags | CC_Z;
3947 return limit;
3948}
3949
3950target_ulong helper_lar(target_ulong selector1)
3951{
3952 uint32_t e1, e2, eflags, selector;
3953 int rpl, dpl, cpl, type;
3954
3955 selector = selector1 & 0xffff;
3956 eflags = cc_table[CC_OP].compute_all();
3957 if ((selector & 0xfffc) == 0)
3958 goto fail;
3959 if (load_segment(&e1, &e2, selector) != 0)
3960 goto fail;
3961 rpl = selector & 3;
3962 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3963 cpl = env->hflags & HF_CPL_MASK;
3964 if (e2 & DESC_S_MASK) {
3965 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3966 /* conforming */
3967 } else {
3968 if (dpl < cpl || dpl < rpl)
3969 goto fail;
3970 }
3971 } else {
3972 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3973 switch(type) {
3974 case 1:
3975 case 2:
3976 case 3:
3977 case 4:
3978 case 5:
3979 case 9:
3980 case 11:
3981 case 12:
3982 break;
3983 default:
3984 goto fail;
3985 }
3986 if (dpl < cpl || dpl < rpl) {
3987 fail:
3988 CC_SRC = eflags & ~CC_Z;
3989 return 0;
3990 }
3991 }
3992 CC_SRC = eflags | CC_Z;
3993 return e2 & 0x00f0ff00;
3994}
3995
3996void helper_verr(target_ulong selector1)
3997{
3998 uint32_t e1, e2, eflags, selector;
3999 int rpl, dpl, cpl;
4000
4001 selector = selector1 & 0xffff;
4002 eflags = cc_table[CC_OP].compute_all();
4003 if ((selector & 0xfffc) == 0)
4004 goto fail;
4005 if (load_segment(&e1, &e2, selector) != 0)
4006 goto fail;
4007 if (!(e2 & DESC_S_MASK))
4008 goto fail;
4009 rpl = selector & 3;
4010 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4011 cpl = env->hflags & HF_CPL_MASK;
4012 if (e2 & DESC_CS_MASK) {
4013 if (!(e2 & DESC_R_MASK))
4014 goto fail;
4015 if (!(e2 & DESC_C_MASK)) {
4016 if (dpl < cpl || dpl < rpl)
4017 goto fail;
4018 }
4019 } else {
4020 if (dpl < cpl || dpl < rpl) {
4021 fail:
4022 CC_SRC = eflags & ~CC_Z;
4023 return;
4024 }
4025 }
4026 CC_SRC = eflags | CC_Z;
4027}
4028
4029void helper_verw(target_ulong selector1)
4030{
4031 uint32_t e1, e2, eflags, selector;
4032 int rpl, dpl, cpl;
4033
4034 selector = selector1 & 0xffff;
4035 eflags = cc_table[CC_OP].compute_all();
4036 if ((selector & 0xfffc) == 0)
4037 goto fail;
4038 if (load_segment(&e1, &e2, selector) != 0)
4039 goto fail;
4040 if (!(e2 & DESC_S_MASK))
4041 goto fail;
4042 rpl = selector & 3;
4043 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4044 cpl = env->hflags & HF_CPL_MASK;
4045 if (e2 & DESC_CS_MASK) {
4046 goto fail;
4047 } else {
4048 if (dpl < cpl || dpl < rpl)
4049 goto fail;
4050 if (!(e2 & DESC_W_MASK)) {
4051 fail:
4052 CC_SRC = eflags & ~CC_Z;
4053 return;
4054 }
4055 }
4056 CC_SRC = eflags | CC_Z;
4057}
4058
4059/* x87 FPU helpers */
4060
4061static void fpu_set_exception(int mask)
4062{
4063 env->fpus |= mask;
4064 if (env->fpus & (~env->fpuc & FPUC_EM))
4065 env->fpus |= FPUS_SE | FPUS_B;
4066}
4067
4068#ifndef VBOX
4069static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4070#else /* VBOX */
4071DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4072#endif /* VBOX */
4073{
4074 if (b == 0.0)
4075 fpu_set_exception(FPUS_ZE);
4076 return a / b;
4077}
4078
4079void fpu_raise_exception(void)
4080{
4081 if (env->cr[0] & CR0_NE_MASK) {
4082 raise_exception(EXCP10_COPR);
4083 }
4084#if !defined(CONFIG_USER_ONLY)
4085 else {
4086 cpu_set_ferr(env);
4087 }
4088#endif
4089}
4090
4091void helper_flds_FT0(uint32_t val)
4092{
4093 union {
4094 float32 f;
4095 uint32_t i;
4096 } u;
4097 u.i = val;
4098 FT0 = float32_to_floatx(u.f, &env->fp_status);
4099}
4100
4101void helper_fldl_FT0(uint64_t val)
4102{
4103 union {
4104 float64 f;
4105 uint64_t i;
4106 } u;
4107 u.i = val;
4108 FT0 = float64_to_floatx(u.f, &env->fp_status);
4109}
4110
4111void helper_fildl_FT0(int32_t val)
4112{
4113 FT0 = int32_to_floatx(val, &env->fp_status);
4114}
4115
4116void helper_flds_ST0(uint32_t val)
4117{
4118 int new_fpstt;
4119 union {
4120 float32 f;
4121 uint32_t i;
4122 } u;
4123 new_fpstt = (env->fpstt - 1) & 7;
4124 u.i = val;
4125 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4126 env->fpstt = new_fpstt;
4127 env->fptags[new_fpstt] = 0; /* validate stack entry */
4128}
4129
4130void helper_fldl_ST0(uint64_t val)
4131{
4132 int new_fpstt;
4133 union {
4134 float64 f;
4135 uint64_t i;
4136 } u;
4137 new_fpstt = (env->fpstt - 1) & 7;
4138 u.i = val;
4139 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4140 env->fpstt = new_fpstt;
4141 env->fptags[new_fpstt] = 0; /* validate stack entry */
4142}
4143
4144void helper_fildl_ST0(int32_t val)
4145{
4146 int new_fpstt;
4147 new_fpstt = (env->fpstt - 1) & 7;
4148 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4149 env->fpstt = new_fpstt;
4150 env->fptags[new_fpstt] = 0; /* validate stack entry */
4151}
4152
4153void helper_fildll_ST0(int64_t val)
4154{
4155 int new_fpstt;
4156 new_fpstt = (env->fpstt - 1) & 7;
4157 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4158 env->fpstt = new_fpstt;
4159 env->fptags[new_fpstt] = 0; /* validate stack entry */
4160}
4161
4162#ifndef VBOX
4163uint32_t helper_fsts_ST0(void)
4164#else
4165RTCCUINTREG helper_fsts_ST0(void)
4166#endif
4167{
4168 union {
4169 float32 f;
4170 uint32_t i;
4171 } u;
4172 u.f = floatx_to_float32(ST0, &env->fp_status);
4173 return u.i;
4174}
4175
4176uint64_t helper_fstl_ST0(void)
4177{
4178 union {
4179 float64 f;
4180 uint64_t i;
4181 } u;
4182 u.f = floatx_to_float64(ST0, &env->fp_status);
4183 return u.i;
4184}
4185#ifndef VBOX
4186int32_t helper_fist_ST0(void)
4187#else
4188RTCCINTREG helper_fist_ST0(void)
4189#endif
4190{
4191 int32_t val;
4192 val = floatx_to_int32(ST0, &env->fp_status);
4193 if (val != (int16_t)val)
4194 val = -32768;
4195 return val;
4196}
4197
4198#ifndef VBOX
4199int32_t helper_fistl_ST0(void)
4200#else
4201RTCCINTREG helper_fistl_ST0(void)
4202#endif
4203{
4204 int32_t val;
4205 val = floatx_to_int32(ST0, &env->fp_status);
4206 return val;
4207}
4208
4209int64_t helper_fistll_ST0(void)
4210{
4211 int64_t val;
4212 val = floatx_to_int64(ST0, &env->fp_status);
4213 return val;
4214}
4215
4216#ifndef VBOX
4217int32_t helper_fistt_ST0(void)
4218#else
4219RTCCINTREG helper_fistt_ST0(void)
4220#endif
4221{
4222 int32_t val;
4223 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4224 if (val != (int16_t)val)
4225 val = -32768;
4226 return val;
4227}
4228
4229#ifndef VBOX
4230int32_t helper_fisttl_ST0(void)
4231#else
4232RTCCINTREG helper_fisttl_ST0(void)
4233#endif
4234{
4235 int32_t val;
4236 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4237 return val;
4238}
4239
4240int64_t helper_fisttll_ST0(void)
4241{
4242 int64_t val;
4243 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4244 return val;
4245}
4246
4247void helper_fldt_ST0(target_ulong ptr)
4248{
4249 int new_fpstt;
4250 new_fpstt = (env->fpstt - 1) & 7;
4251 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4252 env->fpstt = new_fpstt;
4253 env->fptags[new_fpstt] = 0; /* validate stack entry */
4254}
4255
4256void helper_fstt_ST0(target_ulong ptr)
4257{
4258 helper_fstt(ST0, ptr);
4259}
4260
4261void helper_fpush(void)
4262{
4263 fpush();
4264}
4265
4266void helper_fpop(void)
4267{
4268 fpop();
4269}
4270
4271void helper_fdecstp(void)
4272{
4273 env->fpstt = (env->fpstt - 1) & 7;
4274 env->fpus &= (~0x4700);
4275}
4276
4277void helper_fincstp(void)
4278{
4279 env->fpstt = (env->fpstt + 1) & 7;
4280 env->fpus &= (~0x4700);
4281}
4282
4283/* FPU move */
4284
4285void helper_ffree_STN(int st_index)
4286{
4287 env->fptags[(env->fpstt + st_index) & 7] = 1;
4288}
4289
4290void helper_fmov_ST0_FT0(void)
4291{
4292 ST0 = FT0;
4293}
4294
4295void helper_fmov_FT0_STN(int st_index)
4296{
4297 FT0 = ST(st_index);
4298}
4299
4300void helper_fmov_ST0_STN(int st_index)
4301{
4302 ST0 = ST(st_index);
4303}
4304
4305void helper_fmov_STN_ST0(int st_index)
4306{
4307 ST(st_index) = ST0;
4308}
4309
4310void helper_fxchg_ST0_STN(int st_index)
4311{
4312 CPU86_LDouble tmp;
4313 tmp = ST(st_index);
4314 ST(st_index) = ST0;
4315 ST0 = tmp;
4316}
4317
4318/* FPU operations */
4319
4320static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4321
4322void helper_fcom_ST0_FT0(void)
4323{
4324 int ret;
4325
4326 ret = floatx_compare(ST0, FT0, &env->fp_status);
4327 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4328 FORCE_RET();
4329}
4330
4331void helper_fucom_ST0_FT0(void)
4332{
4333 int ret;
4334
4335 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4336 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4337 FORCE_RET();
4338}
4339
4340static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4341
4342void helper_fcomi_ST0_FT0(void)
4343{
4344 int eflags;
4345 int ret;
4346
4347 ret = floatx_compare(ST0, FT0, &env->fp_status);
4348 eflags = cc_table[CC_OP].compute_all();
4349 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4350 CC_SRC = eflags;
4351 FORCE_RET();
4352}
4353
4354void helper_fucomi_ST0_FT0(void)
4355{
4356 int eflags;
4357 int ret;
4358
4359 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4360 eflags = cc_table[CC_OP].compute_all();
4361 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4362 CC_SRC = eflags;
4363 FORCE_RET();
4364}
4365
4366void helper_fadd_ST0_FT0(void)
4367{
4368 ST0 += FT0;
4369}
4370
4371void helper_fmul_ST0_FT0(void)
4372{
4373 ST0 *= FT0;
4374}
4375
4376void helper_fsub_ST0_FT0(void)
4377{
4378 ST0 -= FT0;
4379}
4380
4381void helper_fsubr_ST0_FT0(void)
4382{
4383 ST0 = FT0 - ST0;
4384}
4385
4386void helper_fdiv_ST0_FT0(void)
4387{
4388 ST0 = helper_fdiv(ST0, FT0);
4389}
4390
4391void helper_fdivr_ST0_FT0(void)
4392{
4393 ST0 = helper_fdiv(FT0, ST0);
4394}
4395
4396/* fp operations between STN and ST0 */
4397
4398void helper_fadd_STN_ST0(int st_index)
4399{
4400 ST(st_index) += ST0;
4401}
4402
4403void helper_fmul_STN_ST0(int st_index)
4404{
4405 ST(st_index) *= ST0;
4406}
4407
4408void helper_fsub_STN_ST0(int st_index)
4409{
4410 ST(st_index) -= ST0;
4411}
4412
4413void helper_fsubr_STN_ST0(int st_index)
4414{
4415 CPU86_LDouble *p;
4416 p = &ST(st_index);
4417 *p = ST0 - *p;
4418}
4419
4420void helper_fdiv_STN_ST0(int st_index)
4421{
4422 CPU86_LDouble *p;
4423 p = &ST(st_index);
4424 *p = helper_fdiv(*p, ST0);
4425}
4426
4427void helper_fdivr_STN_ST0(int st_index)
4428{
4429 CPU86_LDouble *p;
4430 p = &ST(st_index);
4431 *p = helper_fdiv(ST0, *p);
4432}
4433
4434/* misc FPU operations */
4435void helper_fchs_ST0(void)
4436{
4437 ST0 = floatx_chs(ST0);
4438}
4439
4440void helper_fabs_ST0(void)
4441{
4442 ST0 = floatx_abs(ST0);
4443}
4444
4445void helper_fld1_ST0(void)
4446{
4447 ST0 = f15rk[1];
4448}
4449
4450void helper_fldl2t_ST0(void)
4451{
4452 ST0 = f15rk[6];
4453}
4454
4455void helper_fldl2e_ST0(void)
4456{
4457 ST0 = f15rk[5];
4458}
4459
4460void helper_fldpi_ST0(void)
4461{
4462 ST0 = f15rk[2];
4463}
4464
4465void helper_fldlg2_ST0(void)
4466{
4467 ST0 = f15rk[3];
4468}
4469
4470void helper_fldln2_ST0(void)
4471{
4472 ST0 = f15rk[4];
4473}
4474
4475void helper_fldz_ST0(void)
4476{
4477 ST0 = f15rk[0];
4478}
4479
4480void helper_fldz_FT0(void)
4481{
4482 FT0 = f15rk[0];
4483}
4484
4485#ifndef VBOX
4486uint32_t helper_fnstsw(void)
4487#else
4488RTCCUINTREG helper_fnstsw(void)
4489#endif
4490{
4491 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4492}
4493
4494#ifndef VBOX
4495uint32_t helper_fnstcw(void)
4496#else
4497RTCCUINTREG helper_fnstcw(void)
4498#endif
4499{
4500 return env->fpuc;
4501}
4502
4503static void update_fp_status(void)
4504{
4505 int rnd_type;
4506
4507 /* set rounding mode */
4508 switch(env->fpuc & RC_MASK) {
4509 default:
4510 case RC_NEAR:
4511 rnd_type = float_round_nearest_even;
4512 break;
4513 case RC_DOWN:
4514 rnd_type = float_round_down;
4515 break;
4516 case RC_UP:
4517 rnd_type = float_round_up;
4518 break;
4519 case RC_CHOP:
4520 rnd_type = float_round_to_zero;
4521 break;
4522 }
4523 set_float_rounding_mode(rnd_type, &env->fp_status);
4524#ifdef FLOATX80
4525 switch((env->fpuc >> 8) & 3) {
4526 case 0:
4527 rnd_type = 32;
4528 break;
4529 case 2:
4530 rnd_type = 64;
4531 break;
4532 case 3:
4533 default:
4534 rnd_type = 80;
4535 break;
4536 }
4537 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4538#endif
4539}
4540
4541void helper_fldcw(uint32_t val)
4542{
4543 env->fpuc = val;
4544 update_fp_status();
4545}
4546
4547void helper_fclex(void)
4548{
4549 env->fpus &= 0x7f00;
4550}
4551
4552void helper_fwait(void)
4553{
4554 if (env->fpus & FPUS_SE)
4555 fpu_raise_exception();
4556 FORCE_RET();
4557}
4558
4559void helper_fninit(void)
4560{
4561 env->fpus = 0;
4562 env->fpstt = 0;
4563 env->fpuc = 0x37f;
4564 env->fptags[0] = 1;
4565 env->fptags[1] = 1;
4566 env->fptags[2] = 1;
4567 env->fptags[3] = 1;
4568 env->fptags[4] = 1;
4569 env->fptags[5] = 1;
4570 env->fptags[6] = 1;
4571 env->fptags[7] = 1;
4572}
4573
4574/* BCD ops */
4575
4576void helper_fbld_ST0(target_ulong ptr)
4577{
4578 CPU86_LDouble tmp;
4579 uint64_t val;
4580 unsigned int v;
4581 int i;
4582
4583 val = 0;
4584 for(i = 8; i >= 0; i--) {
4585 v = ldub(ptr + i);
4586 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4587 }
4588 tmp = val;
4589 if (ldub(ptr + 9) & 0x80)
4590 tmp = -tmp;
4591 fpush();
4592 ST0 = tmp;
4593}
4594
4595void helper_fbst_ST0(target_ulong ptr)
4596{
4597 int v;
4598 target_ulong mem_ref, mem_end;
4599 int64_t val;
4600
4601 val = floatx_to_int64(ST0, &env->fp_status);
4602 mem_ref = ptr;
4603 mem_end = mem_ref + 9;
4604 if (val < 0) {
4605 stb(mem_end, 0x80);
4606 val = -val;
4607 } else {
4608 stb(mem_end, 0x00);
4609 }
4610 while (mem_ref < mem_end) {
4611 if (val == 0)
4612 break;
4613 v = val % 100;
4614 val = val / 100;
4615 v = ((v / 10) << 4) | (v % 10);
4616 stb(mem_ref++, v);
4617 }
4618 while (mem_ref < mem_end) {
4619 stb(mem_ref++, 0);
4620 }
4621}
4622
4623void helper_f2xm1(void)
4624{
4625 ST0 = pow(2.0,ST0) - 1.0;
4626}
4627
4628void helper_fyl2x(void)
4629{
4630 CPU86_LDouble fptemp;
4631
4632 fptemp = ST0;
4633 if (fptemp>0.0){
4634 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4635 ST1 *= fptemp;
4636 fpop();
4637 } else {
4638 env->fpus &= (~0x4700);
4639 env->fpus |= 0x400;
4640 }
4641}
4642
4643void helper_fptan(void)
4644{
4645 CPU86_LDouble fptemp;
4646
4647 fptemp = ST0;
4648 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4649 env->fpus |= 0x400;
4650 } else {
4651 ST0 = tan(fptemp);
4652 fpush();
4653 ST0 = 1.0;
4654 env->fpus &= (~0x400); /* C2 <-- 0 */
4655 /* the above code is for |arg| < 2**52 only */
4656 }
4657}
4658
4659void helper_fpatan(void)
4660{
4661 CPU86_LDouble fptemp, fpsrcop;
4662
4663 fpsrcop = ST1;
4664 fptemp = ST0;
4665 ST1 = atan2(fpsrcop,fptemp);
4666 fpop();
4667}
4668
4669void helper_fxtract(void)
4670{
4671 CPU86_LDoubleU temp;
4672 unsigned int expdif;
4673
4674 temp.d = ST0;
4675 expdif = EXPD(temp) - EXPBIAS;
4676 /*DP exponent bias*/
4677 ST0 = expdif;
4678 fpush();
4679 BIASEXPONENT(temp);
4680 ST0 = temp.d;
4681}
4682
4683#ifdef VBOX
4684#ifdef _MSC_VER
4685/* MSC cannot divide by zero */
4686extern double _Nan;
4687#define NaN _Nan
4688#else
4689#define NaN (0.0 / 0.0)
4690#endif
4691#endif /* VBOX */
4692
4693void helper_fprem1(void)
4694{
4695 CPU86_LDouble dblq, fpsrcop, fptemp;
4696 CPU86_LDoubleU fpsrcop1, fptemp1;
4697 int expdif;
4698 signed long long int q;
4699
4700#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4701 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4702#else
4703 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4704#endif
4705 ST0 = 0.0 / 0.0; /* NaN */
4706 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4707 return;
4708 }
4709
4710 fpsrcop = ST0;
4711 fptemp = ST1;
4712 fpsrcop1.d = fpsrcop;
4713 fptemp1.d = fptemp;
4714 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4715
4716 if (expdif < 0) {
4717 /* optimisation? taken from the AMD docs */
4718 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4719 /* ST0 is unchanged */
4720 return;
4721 }
4722
4723 if (expdif < 53) {
4724 dblq = fpsrcop / fptemp;
4725 /* round dblq towards nearest integer */
4726 dblq = rint(dblq);
4727 ST0 = fpsrcop - fptemp * dblq;
4728
4729 /* convert dblq to q by truncating towards zero */
4730 if (dblq < 0.0)
4731 q = (signed long long int)(-dblq);
4732 else
4733 q = (signed long long int)dblq;
4734
4735 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4736 /* (C0,C3,C1) <-- (q2,q1,q0) */
4737 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4738 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4739 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4740 } else {
4741 env->fpus |= 0x400; /* C2 <-- 1 */
4742 fptemp = pow(2.0, expdif - 50);
4743 fpsrcop = (ST0 / ST1) / fptemp;
4744 /* fpsrcop = integer obtained by chopping */
4745 fpsrcop = (fpsrcop < 0.0) ?
4746 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4747 ST0 -= (ST1 * fpsrcop * fptemp);
4748 }
4749}
4750
4751void helper_fprem(void)
4752{
4753 CPU86_LDouble dblq, fpsrcop, fptemp;
4754 CPU86_LDoubleU fpsrcop1, fptemp1;
4755 int expdif;
4756 signed long long int q;
4757
4758#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4759 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4760#else
4761 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4762#endif
4763 ST0 = 0.0 / 0.0; /* NaN */
4764 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4765 return;
4766 }
4767
4768 fpsrcop = (CPU86_LDouble)ST0;
4769 fptemp = (CPU86_LDouble)ST1;
4770 fpsrcop1.d = fpsrcop;
4771 fptemp1.d = fptemp;
4772 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4773
4774 if (expdif < 0) {
4775 /* optimisation? taken from the AMD docs */
4776 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4777 /* ST0 is unchanged */
4778 return;
4779 }
4780
4781 if ( expdif < 53 ) {
4782 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4783 /* round dblq towards zero */
4784 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4785 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4786
4787 /* convert dblq to q by truncating towards zero */
4788 if (dblq < 0.0)
4789 q = (signed long long int)(-dblq);
4790 else
4791 q = (signed long long int)dblq;
4792
4793 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4794 /* (C0,C3,C1) <-- (q2,q1,q0) */
4795 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4796 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4797 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4798 } else {
4799 int N = 32 + (expdif % 32); /* as per AMD docs */
4800 env->fpus |= 0x400; /* C2 <-- 1 */
4801 fptemp = pow(2.0, (double)(expdif - N));
4802 fpsrcop = (ST0 / ST1) / fptemp;
4803 /* fpsrcop = integer obtained by chopping */
4804 fpsrcop = (fpsrcop < 0.0) ?
4805 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4806 ST0 -= (ST1 * fpsrcop * fptemp);
4807 }
4808}
4809
4810void helper_fyl2xp1(void)
4811{
4812 CPU86_LDouble fptemp;
4813
4814 fptemp = ST0;
4815 if ((fptemp+1.0)>0.0) {
4816 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4817 ST1 *= fptemp;
4818 fpop();
4819 } else {
4820 env->fpus &= (~0x4700);
4821 env->fpus |= 0x400;
4822 }
4823}
4824
4825void helper_fsqrt(void)
4826{
4827 CPU86_LDouble fptemp;
4828
4829 fptemp = ST0;
4830 if (fptemp<0.0) {
4831 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4832 env->fpus |= 0x400;
4833 }
4834 ST0 = sqrt(fptemp);
4835}
4836
4837void helper_fsincos(void)
4838{
4839 CPU86_LDouble fptemp;
4840
4841 fptemp = ST0;
4842 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4843 env->fpus |= 0x400;
4844 } else {
4845 ST0 = sin(fptemp);
4846 fpush();
4847 ST0 = cos(fptemp);
4848 env->fpus &= (~0x400); /* C2 <-- 0 */
4849 /* the above code is for |arg| < 2**63 only */
4850 }
4851}
4852
4853void helper_frndint(void)
4854{
4855 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4856}
4857
4858void helper_fscale(void)
4859{
4860 ST0 = ldexp (ST0, (int)(ST1));
4861}
4862
4863void helper_fsin(void)
4864{
4865 CPU86_LDouble fptemp;
4866
4867 fptemp = ST0;
4868 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4869 env->fpus |= 0x400;
4870 } else {
4871 ST0 = sin(fptemp);
4872 env->fpus &= (~0x400); /* C2 <-- 0 */
4873 /* the above code is for |arg| < 2**53 only */
4874 }
4875}
4876
4877void helper_fcos(void)
4878{
4879 CPU86_LDouble fptemp;
4880
4881 fptemp = ST0;
4882 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4883 env->fpus |= 0x400;
4884 } else {
4885 ST0 = cos(fptemp);
4886 env->fpus &= (~0x400); /* C2 <-- 0 */
4887 /* the above code is for |arg5 < 2**63 only */
4888 }
4889}
4890
4891void helper_fxam_ST0(void)
4892{
4893 CPU86_LDoubleU temp;
4894 int expdif;
4895
4896 temp.d = ST0;
4897
4898 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4899 if (SIGND(temp))
4900 env->fpus |= 0x200; /* C1 <-- 1 */
4901
4902 /* XXX: test fptags too */
4903 expdif = EXPD(temp);
4904 if (expdif == MAXEXPD) {
4905#ifdef USE_X86LDOUBLE
4906 if (MANTD(temp) == 0x8000000000000000ULL)
4907#else
4908 if (MANTD(temp) == 0)
4909#endif
4910 env->fpus |= 0x500 /*Infinity*/;
4911 else
4912 env->fpus |= 0x100 /*NaN*/;
4913 } else if (expdif == 0) {
4914 if (MANTD(temp) == 0)
4915 env->fpus |= 0x4000 /*Zero*/;
4916 else
4917 env->fpus |= 0x4400 /*Denormal*/;
4918 } else {
4919 env->fpus |= 0x400;
4920 }
4921}
4922
4923void helper_fstenv(target_ulong ptr, int data32)
4924{
4925 int fpus, fptag, exp, i;
4926 uint64_t mant;
4927 CPU86_LDoubleU tmp;
4928
4929 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4930 fptag = 0;
4931 for (i=7; i>=0; i--) {
4932 fptag <<= 2;
4933 if (env->fptags[i]) {
4934 fptag |= 3;
4935 } else {
4936 tmp.d = env->fpregs[i].d;
4937 exp = EXPD(tmp);
4938 mant = MANTD(tmp);
4939 if (exp == 0 && mant == 0) {
4940 /* zero */
4941 fptag |= 1;
4942 } else if (exp == 0 || exp == MAXEXPD
4943#ifdef USE_X86LDOUBLE
4944 || (mant & (1LL << 63)) == 0
4945#endif
4946 ) {
4947 /* NaNs, infinity, denormal */
4948 fptag |= 2;
4949 }
4950 }
4951 }
4952 if (data32) {
4953 /* 32 bit */
4954 stl(ptr, env->fpuc);
4955 stl(ptr + 4, fpus);
4956 stl(ptr + 8, fptag);
4957 stl(ptr + 12, 0); /* fpip */
4958 stl(ptr + 16, 0); /* fpcs */
4959 stl(ptr + 20, 0); /* fpoo */
4960 stl(ptr + 24, 0); /* fpos */
4961 } else {
4962 /* 16 bit */
4963 stw(ptr, env->fpuc);
4964 stw(ptr + 2, fpus);
4965 stw(ptr + 4, fptag);
4966 stw(ptr + 6, 0);
4967 stw(ptr + 8, 0);
4968 stw(ptr + 10, 0);
4969 stw(ptr + 12, 0);
4970 }
4971}
4972
4973void helper_fldenv(target_ulong ptr, int data32)
4974{
4975 int i, fpus, fptag;
4976
4977 if (data32) {
4978 env->fpuc = lduw(ptr);
4979 fpus = lduw(ptr + 4);
4980 fptag = lduw(ptr + 8);
4981 }
4982 else {
4983 env->fpuc = lduw(ptr);
4984 fpus = lduw(ptr + 2);
4985 fptag = lduw(ptr + 4);
4986 }
4987 env->fpstt = (fpus >> 11) & 7;
4988 env->fpus = fpus & ~0x3800;
4989 for(i = 0;i < 8; i++) {
4990 env->fptags[i] = ((fptag & 3) == 3);
4991 fptag >>= 2;
4992 }
4993}
4994
4995void helper_fsave(target_ulong ptr, int data32)
4996{
4997 CPU86_LDouble tmp;
4998 int i;
4999
5000 helper_fstenv(ptr, data32);
5001
5002 ptr += (14 << data32);
5003 for(i = 0;i < 8; i++) {
5004 tmp = ST(i);
5005 helper_fstt(tmp, ptr);
5006 ptr += 10;
5007 }
5008
5009 /* fninit */
5010 env->fpus = 0;
5011 env->fpstt = 0;
5012 env->fpuc = 0x37f;
5013 env->fptags[0] = 1;
5014 env->fptags[1] = 1;
5015 env->fptags[2] = 1;
5016 env->fptags[3] = 1;
5017 env->fptags[4] = 1;
5018 env->fptags[5] = 1;
5019 env->fptags[6] = 1;
5020 env->fptags[7] = 1;
5021}
5022
5023void helper_frstor(target_ulong ptr, int data32)
5024{
5025 CPU86_LDouble tmp;
5026 int i;
5027
5028 helper_fldenv(ptr, data32);
5029 ptr += (14 << data32);
5030
5031 for(i = 0;i < 8; i++) {
5032 tmp = helper_fldt(ptr);
5033 ST(i) = tmp;
5034 ptr += 10;
5035 }
5036}
5037
5038void helper_fxsave(target_ulong ptr, int data64)
5039{
5040 int fpus, fptag, i, nb_xmm_regs;
5041 CPU86_LDouble tmp;
5042 target_ulong addr;
5043
5044 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5045 fptag = 0;
5046 for(i = 0; i < 8; i++) {
5047 fptag |= (env->fptags[i] << i);
5048 }
5049 stw(ptr, env->fpuc);
5050 stw(ptr + 2, fpus);
5051 stw(ptr + 4, fptag ^ 0xff);
5052#ifdef TARGET_X86_64
5053 if (data64) {
5054 stq(ptr + 0x08, 0); /* rip */
5055 stq(ptr + 0x10, 0); /* rdp */
5056 } else
5057#endif
5058 {
5059 stl(ptr + 0x08, 0); /* eip */
5060 stl(ptr + 0x0c, 0); /* sel */
5061 stl(ptr + 0x10, 0); /* dp */
5062 stl(ptr + 0x14, 0); /* sel */
5063 }
5064
5065 addr = ptr + 0x20;
5066 for(i = 0;i < 8; i++) {
5067 tmp = ST(i);
5068 helper_fstt(tmp, addr);
5069 addr += 16;
5070 }
5071
5072 if (env->cr[4] & CR4_OSFXSR_MASK) {
5073 /* XXX: finish it */
5074 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5075 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5076 if (env->hflags & HF_CS64_MASK)
5077 nb_xmm_regs = 16;
5078 else
5079 nb_xmm_regs = 8;
5080 addr = ptr + 0xa0;
5081 for(i = 0; i < nb_xmm_regs; i++) {
5082 stq(addr, env->xmm_regs[i].XMM_Q(0));
5083 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5084 addr += 16;
5085 }
5086 }
5087}
5088
5089void helper_fxrstor(target_ulong ptr, int data64)
5090{
5091 int i, fpus, fptag, nb_xmm_regs;
5092 CPU86_LDouble tmp;
5093 target_ulong addr;
5094
5095 env->fpuc = lduw(ptr);
5096 fpus = lduw(ptr + 2);
5097 fptag = lduw(ptr + 4);
5098 env->fpstt = (fpus >> 11) & 7;
5099 env->fpus = fpus & ~0x3800;
5100 fptag ^= 0xff;
5101 for(i = 0;i < 8; i++) {
5102 env->fptags[i] = ((fptag >> i) & 1);
5103 }
5104
5105 addr = ptr + 0x20;
5106 for(i = 0;i < 8; i++) {
5107 tmp = helper_fldt(addr);
5108 ST(i) = tmp;
5109 addr += 16;
5110 }
5111
5112 if (env->cr[4] & CR4_OSFXSR_MASK) {
5113 /* XXX: finish it */
5114 env->mxcsr = ldl(ptr + 0x18);
5115 //ldl(ptr + 0x1c);
5116 if (env->hflags & HF_CS64_MASK)
5117 nb_xmm_regs = 16;
5118 else
5119 nb_xmm_regs = 8;
5120 addr = ptr + 0xa0;
5121 for(i = 0; i < nb_xmm_regs; i++) {
5122#if !defined(VBOX) || __GNUC__ < 4
5123 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5124 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5125#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5126# if 1
5127 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5128 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5129 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5130 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5131# else
5132 /* this works fine on Mac OS X, gcc 4.0.1 */
5133 uint64_t u64 = ldq(addr);
5134 env->xmm_regs[i].XMM_Q(0);
5135 u64 = ldq(addr + 4);
5136 env->xmm_regs[i].XMM_Q(1) = u64;
5137# endif
5138#endif
5139 addr += 16;
5140 }
5141 }
5142}
5143
5144#ifndef USE_X86LDOUBLE
5145
5146void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5147{
5148 CPU86_LDoubleU temp;
5149 int e;
5150
5151 temp.d = f;
5152 /* mantissa */
5153 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5154 /* exponent + sign */
5155 e = EXPD(temp) - EXPBIAS + 16383;
5156 e |= SIGND(temp) >> 16;
5157 *pexp = e;
5158}
5159
5160CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5161{
5162 CPU86_LDoubleU temp;
5163 int e;
5164 uint64_t ll;
5165
5166 /* XXX: handle overflow ? */
5167 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5168 e |= (upper >> 4) & 0x800; /* sign */
5169 ll = (mant >> 11) & ((1LL << 52) - 1);
5170#ifdef __arm__
5171 temp.l.upper = (e << 20) | (ll >> 32);
5172 temp.l.lower = ll;
5173#else
5174 temp.ll = ll | ((uint64_t)e << 52);
5175#endif
5176 return temp.d;
5177}
5178
5179#else
5180
5181void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5182{
5183 CPU86_LDoubleU temp;
5184
5185 temp.d = f;
5186 *pmant = temp.l.lower;
5187 *pexp = temp.l.upper;
5188}
5189
5190CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5191{
5192 CPU86_LDoubleU temp;
5193
5194 temp.l.upper = upper;
5195 temp.l.lower = mant;
5196 return temp.d;
5197}
5198#endif
5199
5200#ifdef TARGET_X86_64
5201
5202//#define DEBUG_MULDIV
5203
5204static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5205{
5206 *plow += a;
5207 /* carry test */
5208 if (*plow < a)
5209 (*phigh)++;
5210 *phigh += b;
5211}
5212
5213static void neg128(uint64_t *plow, uint64_t *phigh)
5214{
5215 *plow = ~ *plow;
5216 *phigh = ~ *phigh;
5217 add128(plow, phigh, 1, 0);
5218}
5219
5220/* return TRUE if overflow */
5221static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5222{
5223 uint64_t q, r, a1, a0;
5224 int i, qb, ab;
5225
5226 a0 = *plow;
5227 a1 = *phigh;
5228 if (a1 == 0) {
5229 q = a0 / b;
5230 r = a0 % b;
5231 *plow = q;
5232 *phigh = r;
5233 } else {
5234 if (a1 >= b)
5235 return 1;
5236 /* XXX: use a better algorithm */
5237 for(i = 0; i < 64; i++) {
5238 ab = a1 >> 63;
5239 a1 = (a1 << 1) | (a0 >> 63);
5240 if (ab || a1 >= b) {
5241 a1 -= b;
5242 qb = 1;
5243 } else {
5244 qb = 0;
5245 }
5246 a0 = (a0 << 1) | qb;
5247 }
5248#if defined(DEBUG_MULDIV)
5249 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5250 *phigh, *plow, b, a0, a1);
5251#endif
5252 *plow = a0;
5253 *phigh = a1;
5254 }
5255 return 0;
5256}
5257
5258/* return TRUE if overflow */
5259static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5260{
5261 int sa, sb;
5262 sa = ((int64_t)*phigh < 0);
5263 if (sa)
5264 neg128(plow, phigh);
5265 sb = (b < 0);
5266 if (sb)
5267 b = -b;
5268 if (div64(plow, phigh, b) != 0)
5269 return 1;
5270 if (sa ^ sb) {
5271 if (*plow > (1ULL << 63))
5272 return 1;
5273 *plow = - *plow;
5274 } else {
5275 if (*plow >= (1ULL << 63))
5276 return 1;
5277 }
5278 if (sa)
5279 *phigh = - *phigh;
5280 return 0;
5281}
5282
5283void helper_mulq_EAX_T0(target_ulong t0)
5284{
5285 uint64_t r0, r1;
5286
5287 mulu64(&r0, &r1, EAX, t0);
5288 EAX = r0;
5289 EDX = r1;
5290 CC_DST = r0;
5291 CC_SRC = r1;
5292}
5293
5294void helper_imulq_EAX_T0(target_ulong t0)
5295{
5296 uint64_t r0, r1;
5297
5298 muls64(&r0, &r1, EAX, t0);
5299 EAX = r0;
5300 EDX = r1;
5301 CC_DST = r0;
5302 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5303}
5304
5305target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5306{
5307 uint64_t r0, r1;
5308
5309 muls64(&r0, &r1, t0, t1);
5310 CC_DST = r0;
5311 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5312 return r0;
5313}
5314
5315void helper_divq_EAX(target_ulong t0)
5316{
5317 uint64_t r0, r1;
5318 if (t0 == 0) {
5319 raise_exception(EXCP00_DIVZ);
5320 }
5321 r0 = EAX;
5322 r1 = EDX;
5323 if (div64(&r0, &r1, t0))
5324 raise_exception(EXCP00_DIVZ);
5325 EAX = r0;
5326 EDX = r1;
5327}
5328
5329void helper_idivq_EAX(target_ulong t0)
5330{
5331 uint64_t r0, r1;
5332 if (t0 == 0) {
5333 raise_exception(EXCP00_DIVZ);
5334 }
5335 r0 = EAX;
5336 r1 = EDX;
5337 if (idiv64(&r0, &r1, t0))
5338 raise_exception(EXCP00_DIVZ);
5339 EAX = r0;
5340 EDX = r1;
5341}
5342#endif
5343
5344static void do_hlt(void)
5345{
5346 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5347 env->halted = 1;
5348 env->exception_index = EXCP_HLT;
5349 cpu_loop_exit();
5350}
5351
5352void helper_hlt(int next_eip_addend)
5353{
5354 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5355 EIP += next_eip_addend;
5356
5357 do_hlt();
5358}
5359
5360void helper_monitor(target_ulong ptr)
5361{
5362#ifdef VBOX
5363 if ((uint32_t)ECX > 1)
5364 raise_exception(EXCP0D_GPF);
5365#else
5366 if ((uint32_t)ECX != 0)
5367 raise_exception(EXCP0D_GPF);
5368#endif
5369 /* XXX: store address ? */
5370 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5371}
5372
5373void helper_mwait(int next_eip_addend)
5374{
5375 if ((uint32_t)ECX != 0)
5376 raise_exception(EXCP0D_GPF);
5377#ifdef VBOX
5378 helper_hlt(next_eip_addend);
5379#else
5380 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5381 EIP += next_eip_addend;
5382
5383 /* XXX: not complete but not completely erroneous */
5384 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5385 /* more than one CPU: do not sleep because another CPU may
5386 wake this one */
5387 } else {
5388 do_hlt();
5389 }
5390#endif
5391}
5392
5393void helper_debug(void)
5394{
5395 env->exception_index = EXCP_DEBUG;
5396 cpu_loop_exit();
5397}
5398
5399void helper_raise_interrupt(int intno, int next_eip_addend)
5400{
5401 raise_interrupt(intno, 1, 0, next_eip_addend);
5402}
5403
5404void helper_raise_exception(int exception_index)
5405{
5406 raise_exception(exception_index);
5407}
5408
5409void helper_cli(void)
5410{
5411 env->eflags &= ~IF_MASK;
5412}
5413
5414void helper_sti(void)
5415{
5416 env->eflags |= IF_MASK;
5417}
5418
5419#ifdef VBOX
5420void helper_cli_vme(void)
5421{
5422 env->eflags &= ~VIF_MASK;
5423}
5424
5425void helper_sti_vme(void)
5426{
5427 /* First check, then change eflags according to the AMD manual */
5428 if (env->eflags & VIP_MASK) {
5429 raise_exception(EXCP0D_GPF);
5430 }
5431 env->eflags |= VIF_MASK;
5432}
5433#endif
5434
5435#if 0
5436/* vm86plus instructions */
5437void helper_cli_vm(void)
5438{
5439 env->eflags &= ~VIF_MASK;
5440}
5441
5442void helper_sti_vm(void)
5443{
5444 env->eflags |= VIF_MASK;
5445 if (env->eflags & VIP_MASK) {
5446 raise_exception(EXCP0D_GPF);
5447 }
5448}
5449#endif
5450
5451void helper_set_inhibit_irq(void)
5452{
5453 env->hflags |= HF_INHIBIT_IRQ_MASK;
5454}
5455
5456void helper_reset_inhibit_irq(void)
5457{
5458 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5459}
5460
5461void helper_boundw(target_ulong a0, int v)
5462{
5463 int low, high;
5464 low = ldsw(a0);
5465 high = ldsw(a0 + 2);
5466 v = (int16_t)v;
5467 if (v < low || v > high) {
5468 raise_exception(EXCP05_BOUND);
5469 }
5470 FORCE_RET();
5471}
5472
5473void helper_boundl(target_ulong a0, int v)
5474{
5475 int low, high;
5476 low = ldl(a0);
5477 high = ldl(a0 + 4);
5478 if (v < low || v > high) {
5479 raise_exception(EXCP05_BOUND);
5480 }
5481 FORCE_RET();
5482}
5483
5484static float approx_rsqrt(float a)
5485{
5486 return 1.0 / sqrt(a);
5487}
5488
5489static float approx_rcp(float a)
5490{
5491 return 1.0 / a;
5492}
5493
5494#if !defined(CONFIG_USER_ONLY)
5495
5496#define MMUSUFFIX _mmu
5497
5498#define SHIFT 0
5499#include "softmmu_template.h"
5500
5501#define SHIFT 1
5502#include "softmmu_template.h"
5503
5504#define SHIFT 2
5505#include "softmmu_template.h"
5506
5507#define SHIFT 3
5508#include "softmmu_template.h"
5509
5510#endif
5511
5512#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5513/* This code assumes real physical address always fit into host CPU reg,
5514 which is wrong in general, but true for our current use cases. */
5515RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5516{
5517 return remR3PhysReadS8(addr);
5518}
5519RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5520{
5521 return remR3PhysReadU8(addr);
5522}
5523void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5524{
5525 remR3PhysWriteU8(addr, val);
5526}
5527RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5528{
5529 return remR3PhysReadS16(addr);
5530}
5531RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5532{
5533 return remR3PhysReadU16(addr);
5534}
5535void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5536{
5537 remR3PhysWriteU16(addr, val);
5538}
5539RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5540{
5541 return remR3PhysReadS32(addr);
5542}
5543RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5544{
5545 return remR3PhysReadU32(addr);
5546}
5547void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5548{
5549 remR3PhysWriteU32(addr, val);
5550}
5551uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5552{
5553 return remR3PhysReadU64(addr);
5554}
5555void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5556{
5557 remR3PhysWriteU64(addr, val);
5558}
5559#endif
5560
5561/* try to fill the TLB and return an exception if error. If retaddr is
5562 NULL, it means that the function was called in C code (i.e. not
5563 from generated code or from helper.c) */
5564/* XXX: fix it to restore all registers */
5565void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5566{
5567 TranslationBlock *tb;
5568 int ret;
5569 unsigned long pc;
5570 CPUX86State *saved_env;
5571
5572 /* XXX: hack to restore env in all cases, even if not called from
5573 generated code */
5574 saved_env = env;
5575 env = cpu_single_env;
5576
5577 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5578 if (ret) {
5579 if (retaddr) {
5580 /* now we have a real cpu fault */
5581 pc = (unsigned long)retaddr;
5582 tb = tb_find_pc(pc);
5583 if (tb) {
5584 /* the PC is inside the translated code. It means that we have
5585 a virtual CPU fault */
5586 cpu_restore_state(tb, env, pc, NULL);
5587 }
5588 }
5589 raise_exception_err(env->exception_index, env->error_code);
5590 }
5591 env = saved_env;
5592}
5593
5594#ifdef VBOX
5595
5596/**
5597 * Correctly computes the eflags.
5598 * @returns eflags.
5599 * @param env1 CPU environment.
5600 */
5601uint32_t raw_compute_eflags(CPUX86State *env1)
5602{
5603 CPUX86State *savedenv = env;
5604 uint32_t efl;
5605 env = env1;
5606 efl = compute_eflags();
5607 env = savedenv;
5608 return efl;
5609}
5610
5611/**
5612 * Reads byte from virtual address in guest memory area.
5613 * XXX: is it working for any addresses? swapped out pages?
5614 * @returns read data byte.
5615 * @param env1 CPU environment.
5616 * @param pvAddr GC Virtual address.
5617 */
5618uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5619{
5620 CPUX86State *savedenv = env;
5621 uint8_t u8;
5622 env = env1;
5623 u8 = ldub_kernel(addr);
5624 env = savedenv;
5625 return u8;
5626}
5627
5628/**
5629 * Reads byte from virtual address in guest memory area.
5630 * XXX: is it working for any addresses? swapped out pages?
5631 * @returns read data byte.
5632 * @param env1 CPU environment.
5633 * @param pvAddr GC Virtual address.
5634 */
5635uint16_t read_word(CPUX86State *env1, target_ulong addr)
5636{
5637 CPUX86State *savedenv = env;
5638 uint16_t u16;
5639 env = env1;
5640 u16 = lduw_kernel(addr);
5641 env = savedenv;
5642 return u16;
5643}
5644
5645/**
5646 * Reads byte from virtual address in guest memory area.
5647 * XXX: is it working for any addresses? swapped out pages?
5648 * @returns read data byte.
5649 * @param env1 CPU environment.
5650 * @param pvAddr GC Virtual address.
5651 */
5652uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5653{
5654 CPUX86State *savedenv = env;
5655 uint32_t u32;
5656 env = env1;
5657 u32 = ldl_kernel(addr);
5658 env = savedenv;
5659 return u32;
5660}
5661
5662/**
5663 * Writes byte to virtual address in guest memory area.
5664 * XXX: is it working for any addresses? swapped out pages?
5665 * @returns read data byte.
5666 * @param env1 CPU environment.
5667 * @param pvAddr GC Virtual address.
5668 * @param val byte value
5669 */
5670void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5671{
5672 CPUX86State *savedenv = env;
5673 env = env1;
5674 stb(addr, val);
5675 env = savedenv;
5676}
5677
5678void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5679{
5680 CPUX86State *savedenv = env;
5681 env = env1;
5682 stw(addr, val);
5683 env = savedenv;
5684}
5685
5686void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5687{
5688 CPUX86State *savedenv = env;
5689 env = env1;
5690 stl(addr, val);
5691 env = savedenv;
5692}
5693
5694/**
5695 * Correctly loads selector into segment register with updating internal
5696 * qemu data/caches.
5697 * @param env1 CPU environment.
5698 * @param seg_reg Segment register.
5699 * @param selector Selector to load.
5700 */
5701void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5702{
5703 CPUX86State *savedenv = env;
5704#ifdef FORCE_SEGMENT_SYNC
5705 jmp_buf old_buf;
5706#endif
5707
5708 env = env1;
5709
5710 if ( env->eflags & X86_EFL_VM
5711 || !(env->cr[0] & X86_CR0_PE))
5712 {
5713 load_seg_vm(seg_reg, selector);
5714
5715 env = savedenv;
5716
5717 /* Successful sync. */
5718 env1->segs[seg_reg].newselector = 0;
5719 }
5720 else
5721 {
5722 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5723 time critical - let's not do that */
5724#ifdef FORCE_SEGMENT_SYNC
5725 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5726#endif
5727 if (setjmp(env1->jmp_env) == 0)
5728 {
5729 if (seg_reg == R_CS)
5730 {
5731 uint32_t e1, e2;
5732 e1 = e2 = 0;
5733 load_segment(&e1, &e2, selector);
5734 cpu_x86_load_seg_cache(env, R_CS, selector,
5735 get_seg_base(e1, e2),
5736 get_seg_limit(e1, e2),
5737 e2);
5738 }
5739 else
5740 helper_load_seg(seg_reg, selector);
5741 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5742 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5743
5744 env = savedenv;
5745
5746 /* Successful sync. */
5747 env1->segs[seg_reg].newselector = 0;
5748 }
5749 else
5750 {
5751 env = savedenv;
5752
5753 /* Postpone sync until the guest uses the selector. */
5754 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5755 env1->segs[seg_reg].newselector = selector;
5756 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5757 env1->exception_index = -1;
5758 env1->error_code = 0;
5759 env1->old_exception = -1;
5760 }
5761#ifdef FORCE_SEGMENT_SYNC
5762 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5763#endif
5764 }
5765
5766}
5767
5768DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5769{
5770 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5771}
5772
5773
5774int emulate_single_instr(CPUX86State *env1)
5775{
5776 TranslationBlock *tb;
5777 TranslationBlock *current;
5778 int flags;
5779 uint8_t *tc_ptr;
5780 target_ulong old_eip;
5781
5782 /* ensures env is loaded! */
5783 CPUX86State *savedenv = env;
5784 env = env1;
5785
5786 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5787
5788 current = env->current_tb;
5789 env->current_tb = NULL;
5790 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5791
5792 /*
5793 * Translate only one instruction.
5794 */
5795 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5796 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5797 env->segs[R_CS].base, flags, 0);
5798
5799 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5800
5801
5802 /* tb_link_phys: */
5803 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5804 tb->jmp_next[0] = NULL;
5805 tb->jmp_next[1] = NULL;
5806 Assert(tb->jmp_next[0] == NULL);
5807 Assert(tb->jmp_next[1] == NULL);
5808 if (tb->tb_next_offset[0] != 0xffff)
5809 tb_reset_jump(tb, 0);
5810 if (tb->tb_next_offset[1] != 0xffff)
5811 tb_reset_jump(tb, 1);
5812
5813 /*
5814 * Execute it using emulation
5815 */
5816 old_eip = env->eip;
5817 env->current_tb = tb;
5818
5819 /*
5820 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5821 * perhaps not a very safe hack
5822 */
5823 while(old_eip == env->eip)
5824 {
5825 tc_ptr = tb->tc_ptr;
5826
5827#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5828 int fake_ret;
5829 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5830#else
5831 tcg_qemu_tb_exec(tc_ptr);
5832#endif
5833 /*
5834 * Exit once we detect an external interrupt and interrupts are enabled
5835 */
5836 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5837 ( (env->eflags & IF_MASK) &&
5838 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5839 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5840 {
5841 break;
5842 }
5843 }
5844 env->current_tb = current;
5845
5846 tb_phys_invalidate(tb, -1);
5847 tb_free(tb);
5848/*
5849 Assert(tb->tb_next_offset[0] == 0xffff);
5850 Assert(tb->tb_next_offset[1] == 0xffff);
5851 Assert(tb->tb_next[0] == 0xffff);
5852 Assert(tb->tb_next[1] == 0xffff);
5853 Assert(tb->jmp_next[0] == NULL);
5854 Assert(tb->jmp_next[1] == NULL);
5855 Assert(tb->jmp_first == NULL); */
5856
5857 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5858
5859 /*
5860 * Execute the next instruction when we encounter instruction fusing.
5861 */
5862 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5863 {
5864 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5865 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5866 emulate_single_instr(env);
5867 }
5868
5869 env = savedenv;
5870 return 0;
5871}
5872
5873/**
5874 * Correctly loads a new ldtr selector.
5875 *
5876 * @param env1 CPU environment.
5877 * @param selector Selector to load.
5878 */
5879void sync_ldtr(CPUX86State *env1, int selector)
5880{
5881 CPUX86State *saved_env = env;
5882 if (setjmp(env1->jmp_env) == 0)
5883 {
5884 env = env1;
5885 helper_lldt(selector);
5886 env = saved_env;
5887 }
5888 else
5889 {
5890 env = saved_env;
5891#ifdef VBOX_STRICT
5892 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5893#endif
5894 }
5895}
5896
5897int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5898 uint32_t *esp_ptr, int dpl)
5899{
5900 int type, index, shift;
5901
5902 CPUX86State *savedenv = env;
5903 env = env1;
5904
5905 if (!(env->tr.flags & DESC_P_MASK))
5906 cpu_abort(env, "invalid tss");
5907 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5908 if ((type & 7) != 1)
5909 cpu_abort(env, "invalid tss type %d", type);
5910 shift = type >> 3;
5911 index = (dpl * 4 + 2) << shift;
5912 if (index + (4 << shift) - 1 > env->tr.limit)
5913 {
5914 env = savedenv;
5915 return 0;
5916 }
5917 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5918
5919 if (shift == 0) {
5920 *esp_ptr = lduw_kernel(env->tr.base + index);
5921 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5922 } else {
5923 *esp_ptr = ldl_kernel(env->tr.base + index);
5924 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5925 }
5926
5927 env = savedenv;
5928 return 1;
5929}
5930
5931//*****************************************************************************
5932// Needs to be at the bottom of the file (overriding macros)
5933
5934#ifndef VBOX
5935static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5936#else /* VBOX */
5937DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5938#endif /* VBOX */
5939{
5940 return *(CPU86_LDouble *)ptr;
5941}
5942
5943#ifndef VBOX
5944static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5945#else /* VBOX */
5946DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5947#endif /* VBOX */
5948{
5949 *(CPU86_LDouble *)ptr = f;
5950}
5951
5952#undef stw
5953#undef stl
5954#undef stq
5955#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5956#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5957#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5958
5959//*****************************************************************************
5960void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5961{
5962 int fpus, fptag, i, nb_xmm_regs;
5963 CPU86_LDouble tmp;
5964 uint8_t *addr;
5965 int data64 = !!(env->hflags & HF_LMA_MASK);
5966
5967 if (env->cpuid_features & CPUID_FXSR)
5968 {
5969 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5970 fptag = 0;
5971 for(i = 0; i < 8; i++) {
5972 fptag |= (env->fptags[i] << i);
5973 }
5974 stw(ptr, env->fpuc);
5975 stw(ptr + 2, fpus);
5976 stw(ptr + 4, fptag ^ 0xff);
5977
5978 addr = ptr + 0x20;
5979 for(i = 0;i < 8; i++) {
5980 tmp = ST(i);
5981 helper_fstt_raw(tmp, addr);
5982 addr += 16;
5983 }
5984
5985 if (env->cr[4] & CR4_OSFXSR_MASK) {
5986 /* XXX: finish it */
5987 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5988 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5989 nb_xmm_regs = 8 << data64;
5990 addr = ptr + 0xa0;
5991 for(i = 0; i < nb_xmm_regs; i++) {
5992#if __GNUC__ < 4
5993 stq(addr, env->xmm_regs[i].XMM_Q(0));
5994 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5995#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5996 stl(addr, env->xmm_regs[i].XMM_L(0));
5997 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5998 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5999 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6000#endif
6001 addr += 16;
6002 }
6003 }
6004 }
6005 else
6006 {
6007 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6008 int fptag;
6009
6010 fp->FCW = env->fpuc;
6011 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6012 fptag = 0;
6013 for (i=7; i>=0; i--) {
6014 fptag <<= 2;
6015 if (env->fptags[i]) {
6016 fptag |= 3;
6017 } else {
6018 /* the FPU automatically computes it */
6019 }
6020 }
6021 fp->FTW = fptag;
6022
6023 for(i = 0;i < 8; i++) {
6024 tmp = ST(i);
6025 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6026 }
6027 }
6028}
6029
6030//*****************************************************************************
6031#undef lduw
6032#undef ldl
6033#undef ldq
6034#define lduw(a) *(uint16_t *)(a)
6035#define ldl(a) *(uint32_t *)(a)
6036#define ldq(a) *(uint64_t *)(a)
6037//*****************************************************************************
6038void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6039{
6040 int i, fpus, fptag, nb_xmm_regs;
6041 CPU86_LDouble tmp;
6042 uint8_t *addr;
6043 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6044
6045 if (env->cpuid_features & CPUID_FXSR)
6046 {
6047 env->fpuc = lduw(ptr);
6048 fpus = lduw(ptr + 2);
6049 fptag = lduw(ptr + 4);
6050 env->fpstt = (fpus >> 11) & 7;
6051 env->fpus = fpus & ~0x3800;
6052 fptag ^= 0xff;
6053 for(i = 0;i < 8; i++) {
6054 env->fptags[i] = ((fptag >> i) & 1);
6055 }
6056
6057 addr = ptr + 0x20;
6058 for(i = 0;i < 8; i++) {
6059 tmp = helper_fldt_raw(addr);
6060 ST(i) = tmp;
6061 addr += 16;
6062 }
6063
6064 if (env->cr[4] & CR4_OSFXSR_MASK) {
6065 /* XXX: finish it, endianness */
6066 env->mxcsr = ldl(ptr + 0x18);
6067 //ldl(ptr + 0x1c);
6068 nb_xmm_regs = 8 << data64;
6069 addr = ptr + 0xa0;
6070 for(i = 0; i < nb_xmm_regs; i++) {
6071#if HC_ARCH_BITS == 32
6072 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6073 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6074 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6075 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6076 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6077#else
6078 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6079 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6080#endif
6081 addr += 16;
6082 }
6083 }
6084 }
6085 else
6086 {
6087 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6088 int fptag, j;
6089
6090 env->fpuc = fp->FCW;
6091 env->fpstt = (fp->FSW >> 11) & 7;
6092 env->fpus = fp->FSW & ~0x3800;
6093 fptag = fp->FTW;
6094 for(i = 0;i < 8; i++) {
6095 env->fptags[i] = ((fptag & 3) == 3);
6096 fptag >>= 2;
6097 }
6098 j = env->fpstt;
6099 for(i = 0;i < 8; i++) {
6100 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6101 ST(i) = tmp;
6102 }
6103 }
6104}
6105//*****************************************************************************
6106//*****************************************************************************
6107
6108#endif /* VBOX */
6109
6110/* Secure Virtual Machine helpers */
6111
6112#if defined(CONFIG_USER_ONLY)
6113
6114void helper_vmrun(int aflag, int next_eip_addend)
6115{
6116}
6117void helper_vmmcall(void)
6118{
6119}
6120void helper_vmload(int aflag)
6121{
6122}
6123void helper_vmsave(int aflag)
6124{
6125}
6126void helper_stgi(void)
6127{
6128}
6129void helper_clgi(void)
6130{
6131}
6132void helper_skinit(void)
6133{
6134}
6135void helper_invlpga(int aflag)
6136{
6137}
6138void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6139{
6140}
6141void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6142{
6143}
6144
6145void helper_svm_check_io(uint32_t port, uint32_t param,
6146 uint32_t next_eip_addend)
6147{
6148}
6149#else
6150
6151#ifndef VBOX
6152static inline void svm_save_seg(target_phys_addr_t addr,
6153#else /* VBOX */
6154DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6155#endif /* VBOX */
6156 const SegmentCache *sc)
6157{
6158 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6159 sc->selector);
6160 stq_phys(addr + offsetof(struct vmcb_seg, base),
6161 sc->base);
6162 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6163 sc->limit);
6164 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6165 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6166}
6167
6168#ifndef VBOX
6169static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6170#else /* VBOX */
6171DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6172#endif /* VBOX */
6173{
6174 unsigned int flags;
6175
6176 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6177 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6178 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6179 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6180 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6181}
6182
6183#ifndef VBOX
6184static inline void svm_load_seg_cache(target_phys_addr_t addr,
6185#else /* VBOX */
6186DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6187#endif /* VBOX */
6188 CPUState *env, int seg_reg)
6189{
6190 SegmentCache sc1, *sc = &sc1;
6191 svm_load_seg(addr, sc);
6192 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6193 sc->base, sc->limit, sc->flags);
6194}
6195
6196void helper_vmrun(int aflag, int next_eip_addend)
6197{
6198 target_ulong addr;
6199 uint32_t event_inj;
6200 uint32_t int_ctl;
6201
6202 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6203
6204 if (aflag == 2)
6205 addr = EAX;
6206 else
6207 addr = (uint32_t)EAX;
6208
6209 if (loglevel & CPU_LOG_TB_IN_ASM)
6210 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6211
6212 env->vm_vmcb = addr;
6213
6214 /* save the current CPU state in the hsave page */
6215 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6216 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6217
6218 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6219 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6220
6221 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6222 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6223 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6224 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6225 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6226 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6227
6228 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6229 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6230
6231 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6232 &env->segs[R_ES]);
6233 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6234 &env->segs[R_CS]);
6235 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6236 &env->segs[R_SS]);
6237 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6238 &env->segs[R_DS]);
6239
6240 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6241 EIP + next_eip_addend);
6242 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6243 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6244
6245 /* load the interception bitmaps so we do not need to access the
6246 vmcb in svm mode */
6247 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6248 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6249 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6250 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6251 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6252 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6253
6254 /* enable intercepts */
6255 env->hflags |= HF_SVMI_MASK;
6256
6257 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6258
6259 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6260 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6261
6262 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6263 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6264
6265 /* clear exit_info_2 so we behave like the real hardware */
6266 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6267
6268 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6269 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6270 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6271 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6272 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6273 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6274 if (int_ctl & V_INTR_MASKING_MASK) {
6275 env->v_tpr = int_ctl & V_TPR_MASK;
6276 env->hflags2 |= HF2_VINTR_MASK;
6277 if (env->eflags & IF_MASK)
6278 env->hflags2 |= HF2_HIF_MASK;
6279 }
6280
6281 cpu_load_efer(env,
6282 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6283 env->eflags = 0;
6284 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6285 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6286 CC_OP = CC_OP_EFLAGS;
6287
6288 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6289 env, R_ES);
6290 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6291 env, R_CS);
6292 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6293 env, R_SS);
6294 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6295 env, R_DS);
6296
6297 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6298 env->eip = EIP;
6299 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6300 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6301 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6302 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6303 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6304
6305 /* FIXME: guest state consistency checks */
6306
6307 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6308 case TLB_CONTROL_DO_NOTHING:
6309 break;
6310 case TLB_CONTROL_FLUSH_ALL_ASID:
6311 /* FIXME: this is not 100% correct but should work for now */
6312 tlb_flush(env, 1);
6313 break;
6314 }
6315
6316 env->hflags2 |= HF2_GIF_MASK;
6317
6318 if (int_ctl & V_IRQ_MASK) {
6319 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6320 }
6321
6322 /* maybe we need to inject an event */
6323 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6324 if (event_inj & SVM_EVTINJ_VALID) {
6325 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6326 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6327 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6328 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6329
6330 if (loglevel & CPU_LOG_TB_IN_ASM)
6331 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6332 /* FIXME: need to implement valid_err */
6333 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6334 case SVM_EVTINJ_TYPE_INTR:
6335 env->exception_index = vector;
6336 env->error_code = event_inj_err;
6337 env->exception_is_int = 0;
6338 env->exception_next_eip = -1;
6339 if (loglevel & CPU_LOG_TB_IN_ASM)
6340 fprintf(logfile, "INTR");
6341 /* XXX: is it always correct ? */
6342 do_interrupt(vector, 0, 0, 0, 1);
6343 break;
6344 case SVM_EVTINJ_TYPE_NMI:
6345 env->exception_index = EXCP02_NMI;
6346 env->error_code = event_inj_err;
6347 env->exception_is_int = 0;
6348 env->exception_next_eip = EIP;
6349 if (loglevel & CPU_LOG_TB_IN_ASM)
6350 fprintf(logfile, "NMI");
6351 cpu_loop_exit();
6352 break;
6353 case SVM_EVTINJ_TYPE_EXEPT:
6354 env->exception_index = vector;
6355 env->error_code = event_inj_err;
6356 env->exception_is_int = 0;
6357 env->exception_next_eip = -1;
6358 if (loglevel & CPU_LOG_TB_IN_ASM)
6359 fprintf(logfile, "EXEPT");
6360 cpu_loop_exit();
6361 break;
6362 case SVM_EVTINJ_TYPE_SOFT:
6363 env->exception_index = vector;
6364 env->error_code = event_inj_err;
6365 env->exception_is_int = 1;
6366 env->exception_next_eip = EIP;
6367 if (loglevel & CPU_LOG_TB_IN_ASM)
6368 fprintf(logfile, "SOFT");
6369 cpu_loop_exit();
6370 break;
6371 }
6372 if (loglevel & CPU_LOG_TB_IN_ASM)
6373 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6374 }
6375}
6376
6377void helper_vmmcall(void)
6378{
6379 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6380 raise_exception(EXCP06_ILLOP);
6381}
6382
6383void helper_vmload(int aflag)
6384{
6385 target_ulong addr;
6386 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6387
6388 if (aflag == 2)
6389 addr = EAX;
6390 else
6391 addr = (uint32_t)EAX;
6392
6393 if (loglevel & CPU_LOG_TB_IN_ASM)
6394 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6395 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6396 env->segs[R_FS].base);
6397
6398 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6399 env, R_FS);
6400 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6401 env, R_GS);
6402 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6403 &env->tr);
6404 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6405 &env->ldt);
6406
6407#ifdef TARGET_X86_64
6408 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6409 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6410 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6411 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6412#endif
6413 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6414 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6415 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6416 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6417}
6418
6419void helper_vmsave(int aflag)
6420{
6421 target_ulong addr;
6422 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6423
6424 if (aflag == 2)
6425 addr = EAX;
6426 else
6427 addr = (uint32_t)EAX;
6428
6429 if (loglevel & CPU_LOG_TB_IN_ASM)
6430 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6431 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6432 env->segs[R_FS].base);
6433
6434 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6435 &env->segs[R_FS]);
6436 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6437 &env->segs[R_GS]);
6438 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6439 &env->tr);
6440 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6441 &env->ldt);
6442
6443#ifdef TARGET_X86_64
6444 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6445 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6446 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6447 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6448#endif
6449 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6450 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6451 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6452 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6453}
6454
6455void helper_stgi(void)
6456{
6457 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6458 env->hflags2 |= HF2_GIF_MASK;
6459}
6460
6461void helper_clgi(void)
6462{
6463 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6464 env->hflags2 &= ~HF2_GIF_MASK;
6465}
6466
6467void helper_skinit(void)
6468{
6469 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6470 /* XXX: not implemented */
6471 raise_exception(EXCP06_ILLOP);
6472}
6473
6474void helper_invlpga(int aflag)
6475{
6476 target_ulong addr;
6477 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6478
6479 if (aflag == 2)
6480 addr = EAX;
6481 else
6482 addr = (uint32_t)EAX;
6483
6484 /* XXX: could use the ASID to see if it is needed to do the
6485 flush */
6486 tlb_flush_page(env, addr);
6487}
6488
6489void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6490{
6491 if (likely(!(env->hflags & HF_SVMI_MASK)))
6492 return;
6493#ifndef VBOX
6494 switch(type) {
6495#ifndef VBOX
6496 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6497#else
6498 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6499 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6500 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6501#endif
6502 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6503 helper_vmexit(type, param);
6504 }
6505 break;
6506#ifndef VBOX
6507 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6508#else
6509 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6510 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6511 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6512#endif
6513 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6514 helper_vmexit(type, param);
6515 }
6516 break;
6517 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6518 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6519 helper_vmexit(type, param);
6520 }
6521 break;
6522 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6523 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6524 helper_vmexit(type, param);
6525 }
6526 break;
6527 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6528 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6529 helper_vmexit(type, param);
6530 }
6531 break;
6532 case SVM_EXIT_MSR:
6533 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6534 /* FIXME: this should be read in at vmrun (faster this way?) */
6535 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6536 uint32_t t0, t1;
6537 switch((uint32_t)ECX) {
6538 case 0 ... 0x1fff:
6539 t0 = (ECX * 2) % 8;
6540 t1 = ECX / 8;
6541 break;
6542 case 0xc0000000 ... 0xc0001fff:
6543 t0 = (8192 + ECX - 0xc0000000) * 2;
6544 t1 = (t0 / 8);
6545 t0 %= 8;
6546 break;
6547 case 0xc0010000 ... 0xc0011fff:
6548 t0 = (16384 + ECX - 0xc0010000) * 2;
6549 t1 = (t0 / 8);
6550 t0 %= 8;
6551 break;
6552 default:
6553 helper_vmexit(type, param);
6554 t0 = 0;
6555 t1 = 0;
6556 break;
6557 }
6558 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6559 helper_vmexit(type, param);
6560 }
6561 break;
6562 default:
6563 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6564 helper_vmexit(type, param);
6565 }
6566 break;
6567 }
6568#else
6569 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6570#endif
6571}
6572
6573void helper_svm_check_io(uint32_t port, uint32_t param,
6574 uint32_t next_eip_addend)
6575{
6576 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6577 /* FIXME: this should be read in at vmrun (faster this way?) */
6578 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6579 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6580 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6581 /* next EIP */
6582 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6583 env->eip + next_eip_addend);
6584 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6585 }
6586 }
6587}
6588
6589/* Note: currently only 32 bits of exit_code are used */
6590void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6591{
6592 uint32_t int_ctl;
6593
6594 if (loglevel & CPU_LOG_TB_IN_ASM)
6595 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6596 exit_code, exit_info_1,
6597 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6598 EIP);
6599
6600 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6601 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6602 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6603 } else {
6604 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6605 }
6606
6607 /* Save the VM state in the vmcb */
6608 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6609 &env->segs[R_ES]);
6610 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6611 &env->segs[R_CS]);
6612 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6613 &env->segs[R_SS]);
6614 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6615 &env->segs[R_DS]);
6616
6617 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6618 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6619
6620 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6621 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6622
6623 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6624 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6625 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6626 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6627 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6628
6629 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6630 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6631 int_ctl |= env->v_tpr & V_TPR_MASK;
6632 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6633 int_ctl |= V_IRQ_MASK;
6634 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6635
6636 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6637 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6638 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6639 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6640 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6641 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6642 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6643
6644 /* Reload the host state from vm_hsave */
6645 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6646 env->hflags &= ~HF_SVMI_MASK;
6647 env->intercept = 0;
6648 env->intercept_exceptions = 0;
6649 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6650 env->tsc_offset = 0;
6651
6652 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6653 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6654
6655 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6656 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6657
6658 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6659 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6660 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6661 /* we need to set the efer after the crs so the hidden flags get
6662 set properly */
6663 cpu_load_efer(env,
6664 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6665 env->eflags = 0;
6666 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6667 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6668 CC_OP = CC_OP_EFLAGS;
6669
6670 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6671 env, R_ES);
6672 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6673 env, R_CS);
6674 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6675 env, R_SS);
6676 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6677 env, R_DS);
6678
6679 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6680 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6681 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6682
6683 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6684 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6685
6686 /* other setups */
6687 cpu_x86_set_cpl(env, 0);
6688 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6689 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6690
6691 env->hflags2 &= ~HF2_GIF_MASK;
6692 /* FIXME: Resets the current ASID register to zero (host ASID). */
6693
6694 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6695
6696 /* Clears the TSC_OFFSET inside the processor. */
6697
6698 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6699 from the page table indicated the host's CR3. If the PDPEs contain
6700 illegal state, the processor causes a shutdown. */
6701
6702 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6703 env->cr[0] |= CR0_PE_MASK;
6704 env->eflags &= ~VM_MASK;
6705
6706 /* Disables all breakpoints in the host DR7 register. */
6707
6708 /* Checks the reloaded host state for consistency. */
6709
6710 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6711 host's code segment or non-canonical (in the case of long mode), a
6712 #GP fault is delivered inside the host.) */
6713
6714 /* remove any pending exception */
6715 env->exception_index = -1;
6716 env->error_code = 0;
6717 env->old_exception = -1;
6718
6719 cpu_loop_exit();
6720}
6721
6722#endif
6723
6724/* MMX/SSE */
6725/* XXX: optimize by storing fptt and fptags in the static cpu state */
6726void helper_enter_mmx(void)
6727{
6728 env->fpstt = 0;
6729 *(uint32_t *)(env->fptags) = 0;
6730 *(uint32_t *)(env->fptags + 4) = 0;
6731}
6732
6733void helper_emms(void)
6734{
6735 /* set to empty state */
6736 *(uint32_t *)(env->fptags) = 0x01010101;
6737 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6738}
6739
6740/* XXX: suppress */
6741void helper_movq(uint64_t *d, uint64_t *s)
6742{
6743 *d = *s;
6744}
6745
6746#define SHIFT 0
6747#include "ops_sse.h"
6748
6749#define SHIFT 1
6750#include "ops_sse.h"
6751
6752#define SHIFT 0
6753#include "helper_template.h"
6754#undef SHIFT
6755
6756#define SHIFT 1
6757#include "helper_template.h"
6758#undef SHIFT
6759
6760#define SHIFT 2
6761#include "helper_template.h"
6762#undef SHIFT
6763
6764#ifdef TARGET_X86_64
6765
6766#define SHIFT 3
6767#include "helper_template.h"
6768#undef SHIFT
6769
6770#endif
6771
6772/* bit operations */
6773target_ulong helper_bsf(target_ulong t0)
6774{
6775 int count;
6776 target_ulong res;
6777
6778 res = t0;
6779 count = 0;
6780 while ((res & 1) == 0) {
6781 count++;
6782 res >>= 1;
6783 }
6784 return count;
6785}
6786
6787target_ulong helper_bsr(target_ulong t0)
6788{
6789 int count;
6790 target_ulong res, mask;
6791
6792 res = t0;
6793 count = TARGET_LONG_BITS - 1;
6794 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6795 while ((res & mask) == 0) {
6796 count--;
6797 res <<= 1;
6798 }
6799 return count;
6800}
6801
6802
6803static int compute_all_eflags(void)
6804{
6805 return CC_SRC;
6806}
6807
6808static int compute_c_eflags(void)
6809{
6810 return CC_SRC & CC_C;
6811}
6812
6813#ifndef VBOX
6814CCTable cc_table[CC_OP_NB] = {
6815 [CC_OP_DYNAMIC] = { /* should never happen */ },
6816
6817 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6818
6819 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6820 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6821 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6822
6823 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6824 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6825 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6826
6827 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6828 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6829 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6830
6831 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6832 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6833 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6834
6835 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6836 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6837 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6838
6839 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6840 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6841 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6842
6843 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6844 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6845 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6846
6847 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6848 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6849 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6850
6851 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6852 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6853 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6854
6855 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6856 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6857 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6858
6859#ifdef TARGET_X86_64
6860 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6861
6862 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6863
6864 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6865
6866 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6867
6868 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6869
6870 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6871
6872 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6873
6874 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6875
6876 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6877
6878 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6879#endif
6880};
6881#else /* VBOX */
6882/* Sync carefully with cpu.h */
6883CCTable cc_table[CC_OP_NB] = {
6884 /* CC_OP_DYNAMIC */ { 0, 0 },
6885
6886 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6887
6888 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6889 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6890 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6891#ifdef TARGET_X86_64
6892 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6893#else
6894 /* CC_OP_MULQ */ { 0, 0 },
6895#endif
6896
6897 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6898 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6899 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6900#ifdef TARGET_X86_64
6901 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6902#else
6903 /* CC_OP_ADDQ */ { 0, 0 },
6904#endif
6905
6906 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6907 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6908 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6909#ifdef TARGET_X86_64
6910 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6911#else
6912 /* CC_OP_ADCQ */ { 0, 0 },
6913#endif
6914
6915 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6916 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6917 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6918#ifdef TARGET_X86_64
6919 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6920#else
6921 /* CC_OP_SUBQ */ { 0, 0 },
6922#endif
6923
6924 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6925 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6926 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6927#ifdef TARGET_X86_64
6928 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6929#else
6930 /* CC_OP_SBBQ */ { 0, 0 },
6931#endif
6932
6933 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6934 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6935 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6936#ifdef TARGET_X86_64
6937 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6938#else
6939 /* CC_OP_LOGICQ */ { 0, 0 },
6940#endif
6941
6942 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6943 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6944 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6945#ifdef TARGET_X86_64
6946 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6947#else
6948 /* CC_OP_INCQ */ { 0, 0 },
6949#endif
6950
6951 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6952 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6953 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6954#ifdef TARGET_X86_64
6955 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6956#else
6957 /* CC_OP_DECQ */ { 0, 0 },
6958#endif
6959
6960 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6961 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6962 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6963#ifdef TARGET_X86_64
6964 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6965#else
6966 /* CC_OP_SHLQ */ { 0, 0 },
6967#endif
6968
6969 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6970 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6971 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6972#ifdef TARGET_X86_64
6973 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6974#else
6975 /* CC_OP_SARQ */ { 0, 0 },
6976#endif
6977};
6978#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette