VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 32050

Last change on this file since 32050 was 31532, checked in by vboxsync, 14 years ago

REM: logging; No doxygen @note in code.

  • Property svn:eol-style set to native
File size: 194.6 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34#include "qemu-common.h"
35#include <math.h>
36#include "tcg.h"
37#endif
38//#define DEBUG_PCALL
39
40#if 0
41#define raise_exception_err(a, b)\
42do {\
43 if (logfile)\
44 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
45 (raise_exception_err)(a, b);\
46} while (0)
47#endif
48
49const uint8_t parity_table[256] = {
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82};
83
84/* modulo 17 table */
85const uint8_t rclw_table[32] = {
86 0, 1, 2, 3, 4, 5, 6, 7,
87 8, 9,10,11,12,13,14,15,
88 16, 0, 1, 2, 3, 4, 5, 6,
89 7, 8, 9,10,11,12,13,14,
90};
91
92/* modulo 9 table */
93const uint8_t rclb_table[32] = {
94 0, 1, 2, 3, 4, 5, 6, 7,
95 8, 0, 1, 2, 3, 4, 5, 6,
96 7, 8, 0, 1, 2, 3, 4, 5,
97 6, 7, 8, 0, 1, 2, 3, 4,
98};
99
100const CPU86_LDouble f15rk[7] =
101{
102 0.00000000000000000000L,
103 1.00000000000000000000L,
104 3.14159265358979323851L, /*pi*/
105 0.30102999566398119523L, /*lg2*/
106 0.69314718055994530943L, /*ln2*/
107 1.44269504088896340739L, /*l2e*/
108 3.32192809488736234781L, /*l2t*/
109};
110
111/* broken thread support */
112
113spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
114
115void helper_lock(void)
116{
117 spin_lock(&global_cpu_lock);
118}
119
120void helper_unlock(void)
121{
122 spin_unlock(&global_cpu_lock);
123}
124
125void helper_write_eflags(target_ulong t0, uint32_t update_mask)
126{
127 load_eflags(t0, update_mask);
128}
129
130target_ulong helper_read_eflags(void)
131{
132 uint32_t eflags;
133 eflags = cc_table[CC_OP].compute_all();
134 eflags |= (DF & DF_MASK);
135 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
136 return eflags;
137}
138
139#ifdef VBOX
140void helper_write_eflags_vme(target_ulong t0)
141{
142 unsigned int new_eflags = t0;
143
144 assert(env->eflags & (1<<VM_SHIFT));
145
146 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
147 /* if TF will be set -> #GP */
148 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
149 || (new_eflags & TF_MASK)) {
150 raise_exception(EXCP0D_GPF);
151 } else {
152 load_eflags(new_eflags,
153 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
154
155 if (new_eflags & IF_MASK) {
156 env->eflags |= VIF_MASK;
157 } else {
158 env->eflags &= ~VIF_MASK;
159 }
160 }
161}
162
163target_ulong helper_read_eflags_vme(void)
164{
165 uint32_t eflags;
166 eflags = cc_table[CC_OP].compute_all();
167 eflags |= (DF & DF_MASK);
168 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
169 if (env->eflags & VIF_MASK)
170 eflags |= IF_MASK;
171 else
172 eflags &= ~IF_MASK;
173
174 /* According to AMD manual, should be read with IOPL == 3 */
175 eflags |= (3 << IOPL_SHIFT);
176
177 /* We only use helper_read_eflags_vme() in 16-bits mode */
178 return eflags & 0xffff;
179}
180
181void helper_dump_state()
182{
183 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
184 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
185 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
186 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
187 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
188 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
189 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
190}
191#endif
192
193/* return non zero if error */
194#ifndef VBOX
195static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
196#else /* VBOX */
197DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
198#endif /* VBOX */
199 int selector)
200{
201 SegmentCache *dt;
202 int index;
203 target_ulong ptr;
204
205#ifdef VBOX
206 /* Trying to load a selector with CPL=1? */
207 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
208 {
209 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
210 selector = selector & 0xfffc;
211 }
212#endif
213
214 if (selector & 0x4)
215 dt = &env->ldt;
216 else
217 dt = &env->gdt;
218 index = selector & ~7;
219 if ((index + 7) > dt->limit)
220 return -1;
221 ptr = dt->base + index;
222 *e1_ptr = ldl_kernel(ptr);
223 *e2_ptr = ldl_kernel(ptr + 4);
224 return 0;
225}
226
227#ifndef VBOX
228static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
229#else /* VBOX */
230DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
231#endif /* VBOX */
232{
233 unsigned int limit;
234 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
235 if (e2 & DESC_G_MASK)
236 limit = (limit << 12) | 0xfff;
237 return limit;
238}
239
240#ifndef VBOX
241static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
242#else /* VBOX */
243DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
244#endif /* VBOX */
245{
246 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
247}
248
249#ifndef VBOX
250static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
251#else /* VBOX */
252DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
253#endif /* VBOX */
254{
255 sc->base = get_seg_base(e1, e2);
256 sc->limit = get_seg_limit(e1, e2);
257 sc->flags = e2;
258}
259
260/* init the segment cache in vm86 mode. */
261#ifndef VBOX
262static inline void load_seg_vm(int seg, int selector)
263#else /* VBOX */
264DECLINLINE(void) load_seg_vm(int seg, int selector)
265#endif /* VBOX */
266{
267 selector &= 0xffff;
268#ifdef VBOX
269 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
270 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
271 flags |= (3 << DESC_DPL_SHIFT);
272
273 cpu_x86_load_seg_cache(env, seg, selector,
274 (selector << 4), 0xffff, flags);
275#else
276 cpu_x86_load_seg_cache(env, seg, selector,
277 (selector << 4), 0xffff, 0);
278#endif
279}
280
281#ifndef VBOX
282static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
283#else /* VBOX */
284DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
285#endif /* VBOX */
286 uint32_t *esp_ptr, int dpl)
287{
288#ifndef VBOX
289 int type, index, shift;
290#else
291 unsigned int type, index, shift;
292#endif
293
294#if 0
295 {
296 int i;
297 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
298 for(i=0;i<env->tr.limit;i++) {
299 printf("%02x ", env->tr.base[i]);
300 if ((i & 7) == 7) printf("\n");
301 }
302 printf("\n");
303 }
304#endif
305
306 if (!(env->tr.flags & DESC_P_MASK))
307 cpu_abort(env, "invalid tss");
308 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
309 if ((type & 7) != 1)
310 cpu_abort(env, "invalid tss type");
311 shift = type >> 3;
312 index = (dpl * 4 + 2) << shift;
313 if (index + (4 << shift) - 1 > env->tr.limit)
314 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
315 if (shift == 0) {
316 *esp_ptr = lduw_kernel(env->tr.base + index);
317 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
318 } else {
319 *esp_ptr = ldl_kernel(env->tr.base + index);
320 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
321 }
322}
323
324/* XXX: merge with load_seg() */
325static void tss_load_seg(int seg_reg, int selector)
326{
327 uint32_t e1, e2;
328 int rpl, dpl, cpl;
329
330#ifdef VBOX
331 e1 = e2 = 0;
332 cpl = env->hflags & HF_CPL_MASK;
333 /* Trying to load a selector with CPL=1? */
334 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
335 {
336 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
337 selector = selector & 0xfffc;
338 }
339#endif
340
341 if ((selector & 0xfffc) != 0) {
342 if (load_segment(&e1, &e2, selector) != 0)
343 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
344 if (!(e2 & DESC_S_MASK))
345 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346 rpl = selector & 3;
347 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
348 cpl = env->hflags & HF_CPL_MASK;
349 if (seg_reg == R_CS) {
350 if (!(e2 & DESC_CS_MASK))
351 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
352 /* XXX: is it correct ? */
353 if (dpl != rpl)
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 if ((e2 & DESC_C_MASK) && dpl > rpl)
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 } else if (seg_reg == R_SS) {
358 /* SS must be writable data */
359 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 if (dpl != cpl || dpl != rpl)
362 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
363 } else {
364 /* not readable code */
365 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 /* if data or non conforming code, checks the rights */
368 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
369 if (dpl < cpl || dpl < rpl)
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 }
372 }
373 if (!(e2 & DESC_P_MASK))
374 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
375 cpu_x86_load_seg_cache(env, seg_reg, selector,
376 get_seg_base(e1, e2),
377 get_seg_limit(e1, e2),
378 e2);
379 } else {
380 if (seg_reg == R_SS || seg_reg == R_CS)
381 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
382#ifdef VBOX
383#if 0
384 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
385 cpu_x86_load_seg_cache(env, seg_reg, selector,
386 0, 0, 0);
387#endif
388#endif
389 }
390}
391
392#define SWITCH_TSS_JMP 0
393#define SWITCH_TSS_IRET 1
394#define SWITCH_TSS_CALL 2
395
396/* XXX: restore CPU state in registers (PowerPC case) */
397static void switch_tss(int tss_selector,
398 uint32_t e1, uint32_t e2, int source,
399 uint32_t next_eip)
400{
401 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
402 target_ulong tss_base;
403 uint32_t new_regs[8], new_segs[6];
404 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
405 uint32_t old_eflags, eflags_mask;
406 SegmentCache *dt;
407#ifndef VBOX
408 int index;
409#else
410 unsigned int index;
411#endif
412 target_ulong ptr;
413
414 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
415#ifdef DEBUG_PCALL
416 if (loglevel & CPU_LOG_PCALL)
417 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
418#endif
419
420#if defined(VBOX) && defined(DEBUG)
421 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
422#endif
423
424 /* if task gate, we read the TSS segment and we load it */
425 if (type == 5) {
426 if (!(e2 & DESC_P_MASK))
427 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
428 tss_selector = e1 >> 16;
429 if (tss_selector & 4)
430 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
431 if (load_segment(&e1, &e2, tss_selector) != 0)
432 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
433 if (e2 & DESC_S_MASK)
434 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
435 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
436 if ((type & 7) != 1)
437 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
438 }
439
440 if (!(e2 & DESC_P_MASK))
441 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
442
443 if (type & 8)
444 tss_limit_max = 103;
445 else
446 tss_limit_max = 43;
447 tss_limit = get_seg_limit(e1, e2);
448 tss_base = get_seg_base(e1, e2);
449 if ((tss_selector & 4) != 0 ||
450 tss_limit < tss_limit_max)
451 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
452 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
453 if (old_type & 8)
454 old_tss_limit_max = 103;
455 else
456 old_tss_limit_max = 43;
457
458 /* read all the registers from the new TSS */
459 if (type & 8) {
460 /* 32 bit */
461 new_cr3 = ldl_kernel(tss_base + 0x1c);
462 new_eip = ldl_kernel(tss_base + 0x20);
463 new_eflags = ldl_kernel(tss_base + 0x24);
464 for(i = 0; i < 8; i++)
465 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
466 for(i = 0; i < 6; i++)
467 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
468 new_ldt = lduw_kernel(tss_base + 0x60);
469 new_trap = ldl_kernel(tss_base + 0x64);
470 } else {
471 /* 16 bit */
472 new_cr3 = 0;
473 new_eip = lduw_kernel(tss_base + 0x0e);
474 new_eflags = lduw_kernel(tss_base + 0x10);
475 for(i = 0; i < 8; i++)
476 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
477 for(i = 0; i < 4; i++)
478 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
479 new_ldt = lduw_kernel(tss_base + 0x2a);
480 new_segs[R_FS] = 0;
481 new_segs[R_GS] = 0;
482 new_trap = 0;
483 }
484
485 /* NOTE: we must avoid memory exceptions during the task switch,
486 so we make dummy accesses before */
487 /* XXX: it can still fail in some cases, so a bigger hack is
488 necessary to valid the TLB after having done the accesses */
489
490 v1 = ldub_kernel(env->tr.base);
491 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
492 stb_kernel(env->tr.base, v1);
493 stb_kernel(env->tr.base + old_tss_limit_max, v2);
494
495 /* clear busy bit (it is restartable) */
496 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
497 target_ulong ptr;
498 uint32_t e2;
499 ptr = env->gdt.base + (env->tr.selector & ~7);
500 e2 = ldl_kernel(ptr + 4);
501 e2 &= ~DESC_TSS_BUSY_MASK;
502 stl_kernel(ptr + 4, e2);
503 }
504 old_eflags = compute_eflags();
505 if (source == SWITCH_TSS_IRET)
506 old_eflags &= ~NT_MASK;
507
508 /* save the current state in the old TSS */
509 if (type & 8) {
510 /* 32 bit */
511 stl_kernel(env->tr.base + 0x20, next_eip);
512 stl_kernel(env->tr.base + 0x24, old_eflags);
513 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
514 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
515 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
516 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
517 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
518 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
519 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
520 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
521 for(i = 0; i < 6; i++)
522 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
523#ifdef VBOX
524 /* Must store the ldt as it gets reloaded and might have been changed. */
525 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
526#endif
527#if defined(VBOX) && defined(DEBUG)
528 printf("TSS 32 bits switch\n");
529 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
530#endif
531 } else {
532 /* 16 bit */
533 stw_kernel(env->tr.base + 0x0e, next_eip);
534 stw_kernel(env->tr.base + 0x10, old_eflags);
535 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
536 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
537 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
538 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
539 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
540 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
541 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
542 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
543 for(i = 0; i < 4; i++)
544 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
545#ifdef VBOX
546 /* Must store the ldt as it gets reloaded and might have been changed. */
547 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
548#endif
549 }
550
551 /* now if an exception occurs, it will occurs in the next task
552 context */
553
554 if (source == SWITCH_TSS_CALL) {
555 stw_kernel(tss_base, env->tr.selector);
556 new_eflags |= NT_MASK;
557 }
558
559 /* set busy bit */
560 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
561 target_ulong ptr;
562 uint32_t e2;
563 ptr = env->gdt.base + (tss_selector & ~7);
564 e2 = ldl_kernel(ptr + 4);
565 e2 |= DESC_TSS_BUSY_MASK;
566 stl_kernel(ptr + 4, e2);
567 }
568
569 /* set the new CPU state */
570 /* from this point, any exception which occurs can give problems */
571 env->cr[0] |= CR0_TS_MASK;
572 env->hflags |= HF_TS_MASK;
573 env->tr.selector = tss_selector;
574 env->tr.base = tss_base;
575 env->tr.limit = tss_limit;
576 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
577
578 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
579 cpu_x86_update_cr3(env, new_cr3);
580 }
581
582 /* load all registers without an exception, then reload them with
583 possible exception */
584 env->eip = new_eip;
585 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
586 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
587 if (!(type & 8))
588 eflags_mask &= 0xffff;
589 load_eflags(new_eflags, eflags_mask);
590 /* XXX: what to do in 16 bit case ? */
591 EAX = new_regs[0];
592 ECX = new_regs[1];
593 EDX = new_regs[2];
594 EBX = new_regs[3];
595 ESP = new_regs[4];
596 EBP = new_regs[5];
597 ESI = new_regs[6];
598 EDI = new_regs[7];
599 if (new_eflags & VM_MASK) {
600 for(i = 0; i < 6; i++)
601 load_seg_vm(i, new_segs[i]);
602 /* in vm86, CPL is always 3 */
603 cpu_x86_set_cpl(env, 3);
604 } else {
605 /* CPL is set the RPL of CS */
606 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
607 /* first just selectors as the rest may trigger exceptions */
608 for(i = 0; i < 6; i++)
609 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
610 }
611
612 env->ldt.selector = new_ldt & ~4;
613 env->ldt.base = 0;
614 env->ldt.limit = 0;
615 env->ldt.flags = 0;
616
617 /* load the LDT */
618 if (new_ldt & 4)
619 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
620
621 if ((new_ldt & 0xfffc) != 0) {
622 dt = &env->gdt;
623 index = new_ldt & ~7;
624 if ((index + 7) > dt->limit)
625 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
626 ptr = dt->base + index;
627 e1 = ldl_kernel(ptr);
628 e2 = ldl_kernel(ptr + 4);
629 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
630 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
631 if (!(e2 & DESC_P_MASK))
632 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
633 load_seg_cache_raw_dt(&env->ldt, e1, e2);
634 }
635
636 /* load the segments */
637 if (!(new_eflags & VM_MASK)) {
638 tss_load_seg(R_CS, new_segs[R_CS]);
639 tss_load_seg(R_SS, new_segs[R_SS]);
640 tss_load_seg(R_ES, new_segs[R_ES]);
641 tss_load_seg(R_DS, new_segs[R_DS]);
642 tss_load_seg(R_FS, new_segs[R_FS]);
643 tss_load_seg(R_GS, new_segs[R_GS]);
644 }
645
646 /* check that EIP is in the CS segment limits */
647 if (new_eip > env->segs[R_CS].limit) {
648 /* XXX: different exception if CALL ? */
649 raise_exception_err(EXCP0D_GPF, 0);
650 }
651}
652
653/* check if Port I/O is allowed in TSS */
654#ifndef VBOX
655static inline void check_io(int addr, int size)
656{
657 int io_offset, val, mask;
658
659#else /* VBOX */
660DECLINLINE(void) check_io(int addr, int size)
661{
662 int val, mask;
663 unsigned int io_offset;
664#endif /* VBOX */
665 /* TSS must be a valid 32 bit one */
666 if (!(env->tr.flags & DESC_P_MASK) ||
667 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
668 env->tr.limit < 103)
669 goto fail;
670 io_offset = lduw_kernel(env->tr.base + 0x66);
671 io_offset += (addr >> 3);
672 /* Note: the check needs two bytes */
673 if ((io_offset + 1) > env->tr.limit)
674 goto fail;
675 val = lduw_kernel(env->tr.base + io_offset);
676 val >>= (addr & 7);
677 mask = (1 << size) - 1;
678 /* all bits must be zero to allow the I/O */
679 if ((val & mask) != 0) {
680 fail:
681 raise_exception_err(EXCP0D_GPF, 0);
682 }
683}
684
685#ifdef VBOX
686/* Keep in sync with gen_check_external_event() */
687void helper_check_external_event()
688{
689 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
690 | CPU_INTERRUPT_EXTERNAL_TIMER
691 | CPU_INTERRUPT_EXTERNAL_DMA))
692 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
693 && (env->eflags & IF_MASK)
694 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
695 {
696 helper_external_event();
697 }
698
699}
700
701void helper_sync_seg(uint32_t reg)
702{
703 if (env->segs[reg].newselector)
704 sync_seg(env, reg, env->segs[reg].newselector);
705}
706#endif
707
708void helper_check_iob(uint32_t t0)
709{
710 check_io(t0, 1);
711}
712
713void helper_check_iow(uint32_t t0)
714{
715 check_io(t0, 2);
716}
717
718void helper_check_iol(uint32_t t0)
719{
720 check_io(t0, 4);
721}
722
723void helper_outb(uint32_t port, uint32_t data)
724{
725 cpu_outb(env, port, data & 0xff);
726}
727
728target_ulong helper_inb(uint32_t port)
729{
730 return cpu_inb(env, port);
731}
732
733void helper_outw(uint32_t port, uint32_t data)
734{
735 cpu_outw(env, port, data & 0xffff);
736}
737
738target_ulong helper_inw(uint32_t port)
739{
740 return cpu_inw(env, port);
741}
742
743void helper_outl(uint32_t port, uint32_t data)
744{
745 cpu_outl(env, port, data);
746}
747
748target_ulong helper_inl(uint32_t port)
749{
750 return cpu_inl(env, port);
751}
752
753#ifndef VBOX
754static inline unsigned int get_sp_mask(unsigned int e2)
755#else /* VBOX */
756DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
757#endif /* VBOX */
758{
759 if (e2 & DESC_B_MASK)
760 return 0xffffffff;
761 else
762 return 0xffff;
763}
764
765#ifdef TARGET_X86_64
766#define SET_ESP(val, sp_mask)\
767do {\
768 if ((sp_mask) == 0xffff)\
769 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
770 else if ((sp_mask) == 0xffffffffLL)\
771 ESP = (uint32_t)(val);\
772 else\
773 ESP = (val);\
774} while (0)
775#else
776#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
777#endif
778
779/* in 64-bit machines, this can overflow. So this segment addition macro
780 * can be used to trim the value to 32-bit whenever needed */
781#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
782
783/* XXX: add a is_user flag to have proper security support */
784#define PUSHW(ssp, sp, sp_mask, val)\
785{\
786 sp -= 2;\
787 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
788}
789
790#define PUSHL(ssp, sp, sp_mask, val)\
791{\
792 sp -= 4;\
793 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
794}
795
796#define POPW(ssp, sp, sp_mask, val)\
797{\
798 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
799 sp += 2;\
800}
801
802#define POPL(ssp, sp, sp_mask, val)\
803{\
804 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
805 sp += 4;\
806}
807
808/* protected mode interrupt */
809static void do_interrupt_protected(int intno, int is_int, int error_code,
810 unsigned int next_eip, int is_hw)
811{
812 SegmentCache *dt;
813 target_ulong ptr, ssp;
814 int type, dpl, selector, ss_dpl, cpl;
815 int has_error_code, new_stack, shift;
816 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
817 uint32_t old_eip, sp_mask;
818
819#ifdef VBOX
820 ss = ss_e1 = ss_e2 = 0;
821 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
822 cpu_loop_exit();
823#endif
824
825 has_error_code = 0;
826 if (!is_int && !is_hw) {
827 switch(intno) {
828 case 8:
829 case 10:
830 case 11:
831 case 12:
832 case 13:
833 case 14:
834 case 17:
835 has_error_code = 1;
836 break;
837 }
838 }
839 if (is_int)
840 old_eip = next_eip;
841 else
842 old_eip = env->eip;
843
844 dt = &env->idt;
845#ifndef VBOX
846 if (intno * 8 + 7 > dt->limit)
847#else
848 if ((unsigned)intno * 8 + 7 > dt->limit)
849#endif
850 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
851 ptr = dt->base + intno * 8;
852 e1 = ldl_kernel(ptr);
853 e2 = ldl_kernel(ptr + 4);
854 /* check gate type */
855 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
856 switch(type) {
857 case 5: /* task gate */
858 /* must do that check here to return the correct error code */
859 if (!(e2 & DESC_P_MASK))
860 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
861 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
862 if (has_error_code) {
863 int type;
864 uint32_t mask;
865 /* push the error code */
866 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
867 shift = type >> 3;
868 if (env->segs[R_SS].flags & DESC_B_MASK)
869 mask = 0xffffffff;
870 else
871 mask = 0xffff;
872 esp = (ESP - (2 << shift)) & mask;
873 ssp = env->segs[R_SS].base + esp;
874 if (shift)
875 stl_kernel(ssp, error_code);
876 else
877 stw_kernel(ssp, error_code);
878 SET_ESP(esp, mask);
879 }
880 return;
881 case 6: /* 286 interrupt gate */
882 case 7: /* 286 trap gate */
883 case 14: /* 386 interrupt gate */
884 case 15: /* 386 trap gate */
885 break;
886 default:
887 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
888 break;
889 }
890 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
891 cpl = env->hflags & HF_CPL_MASK;
892 /* check privilege if software int */
893 if (is_int && dpl < cpl)
894 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
895 /* check valid bit */
896 if (!(e2 & DESC_P_MASK))
897 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
898 selector = e1 >> 16;
899 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
900 if ((selector & 0xfffc) == 0)
901 raise_exception_err(EXCP0D_GPF, 0);
902
903 if (load_segment(&e1, &e2, selector) != 0)
904 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
905 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
906 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
907 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
908 if (dpl > cpl)
909 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910 if (!(e2 & DESC_P_MASK))
911 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
912 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
913 /* to inner privilege */
914 get_ss_esp_from_tss(&ss, &esp, dpl);
915 if ((ss & 0xfffc) == 0)
916 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
917 if ((ss & 3) != dpl)
918 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
919 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
920 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
921 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
922 if (ss_dpl != dpl)
923 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
924 if (!(ss_e2 & DESC_S_MASK) ||
925 (ss_e2 & DESC_CS_MASK) ||
926 !(ss_e2 & DESC_W_MASK))
927 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928 if (!(ss_e2 & DESC_P_MASK))
929#ifdef VBOX /* See page 3-477 of 253666.pdf */
930 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
931#else
932 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
933#endif
934 new_stack = 1;
935 sp_mask = get_sp_mask(ss_e2);
936 ssp = get_seg_base(ss_e1, ss_e2);
937#if defined(VBOX) && defined(DEBUG)
938 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
939#endif
940 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
941 /* to same privilege */
942 if (env->eflags & VM_MASK)
943 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
944 new_stack = 0;
945 sp_mask = get_sp_mask(env->segs[R_SS].flags);
946 ssp = env->segs[R_SS].base;
947 esp = ESP;
948 dpl = cpl;
949 } else {
950 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
951 new_stack = 0; /* avoid warning */
952 sp_mask = 0; /* avoid warning */
953 ssp = 0; /* avoid warning */
954 esp = 0; /* avoid warning */
955 }
956
957 shift = type >> 3;
958
959#if 0
960 /* XXX: check that enough room is available */
961 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
962 if (env->eflags & VM_MASK)
963 push_size += 8;
964 push_size <<= shift;
965#endif
966 if (shift == 1) {
967 if (new_stack) {
968 if (env->eflags & VM_MASK) {
969 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
970 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
971 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
972 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
973 }
974 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
975 PUSHL(ssp, esp, sp_mask, ESP);
976 }
977 PUSHL(ssp, esp, sp_mask, compute_eflags());
978 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
979 PUSHL(ssp, esp, sp_mask, old_eip);
980 if (has_error_code) {
981 PUSHL(ssp, esp, sp_mask, error_code);
982 }
983 } else {
984 if (new_stack) {
985 if (env->eflags & VM_MASK) {
986 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
987 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
988 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
989 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
990 }
991 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
992 PUSHW(ssp, esp, sp_mask, ESP);
993 }
994 PUSHW(ssp, esp, sp_mask, compute_eflags());
995 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
996 PUSHW(ssp, esp, sp_mask, old_eip);
997 if (has_error_code) {
998 PUSHW(ssp, esp, sp_mask, error_code);
999 }
1000 }
1001
1002 if (new_stack) {
1003 if (env->eflags & VM_MASK) {
1004 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1005 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1006 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1007 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1008 }
1009 ss = (ss & ~3) | dpl;
1010 cpu_x86_load_seg_cache(env, R_SS, ss,
1011 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1012 }
1013 SET_ESP(esp, sp_mask);
1014
1015 selector = (selector & ~3) | dpl;
1016 cpu_x86_load_seg_cache(env, R_CS, selector,
1017 get_seg_base(e1, e2),
1018 get_seg_limit(e1, e2),
1019 e2);
1020 cpu_x86_set_cpl(env, dpl);
1021 env->eip = offset;
1022
1023 /* interrupt gate clear IF mask */
1024 if ((type & 1) == 0) {
1025 env->eflags &= ~IF_MASK;
1026 }
1027#ifndef VBOX
1028 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1029#else
1030 /*
1031 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1032 * gets confused by seeingingly changed EFLAGS. See #3491 and
1033 * public bug #2341.
1034 */
1035 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1036#endif
1037}
1038#ifdef VBOX
1039
1040/* check if VME interrupt redirection is enabled in TSS */
1041DECLINLINE(bool) is_vme_irq_redirected(int intno)
1042{
1043 unsigned int io_offset, intredir_offset;
1044 unsigned char val, mask;
1045
1046 /* TSS must be a valid 32 bit one */
1047 if (!(env->tr.flags & DESC_P_MASK) ||
1048 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1049 env->tr.limit < 103)
1050 goto fail;
1051 io_offset = lduw_kernel(env->tr.base + 0x66);
1052 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1053 if (io_offset < 0x68 + 0x20)
1054 io_offset = 0x68 + 0x20;
1055 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1056 intredir_offset = io_offset - 0x20;
1057
1058 intredir_offset += (intno >> 3);
1059 if ((intredir_offset) > env->tr.limit)
1060 goto fail;
1061
1062 val = ldub_kernel(env->tr.base + intredir_offset);
1063 mask = 1 << (unsigned char)(intno & 7);
1064
1065 /* bit set means no redirection. */
1066 if ((val & mask) != 0) {
1067 return false;
1068 }
1069 return true;
1070
1071fail:
1072 raise_exception_err(EXCP0D_GPF, 0);
1073 return true;
1074}
1075
1076/* V86 mode software interrupt with CR4.VME=1 */
1077static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1078{
1079 target_ulong ptr, ssp;
1080 int selector;
1081 uint32_t offset, esp;
1082 uint32_t old_cs, old_eflags;
1083 uint32_t iopl;
1084
1085 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1086
1087 if (!is_vme_irq_redirected(intno))
1088 {
1089 if (iopl == 3)
1090 {
1091 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1092 return;
1093 }
1094 else
1095 raise_exception_err(EXCP0D_GPF, 0);
1096 }
1097
1098 /* virtual mode idt is at linear address 0 */
1099 ptr = 0 + intno * 4;
1100 offset = lduw_kernel(ptr);
1101 selector = lduw_kernel(ptr + 2);
1102 esp = ESP;
1103 ssp = env->segs[R_SS].base;
1104 old_cs = env->segs[R_CS].selector;
1105
1106 old_eflags = compute_eflags();
1107 if (iopl < 3)
1108 {
1109 /* copy VIF into IF and set IOPL to 3 */
1110 if (env->eflags & VIF_MASK)
1111 old_eflags |= IF_MASK;
1112 else
1113 old_eflags &= ~IF_MASK;
1114
1115 old_eflags |= (3 << IOPL_SHIFT);
1116 }
1117
1118 /* XXX: use SS segment size ? */
1119 PUSHW(ssp, esp, 0xffff, old_eflags);
1120 PUSHW(ssp, esp, 0xffff, old_cs);
1121 PUSHW(ssp, esp, 0xffff, next_eip);
1122
1123 /* update processor state */
1124 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1125 env->eip = offset;
1126 env->segs[R_CS].selector = selector;
1127 env->segs[R_CS].base = (selector << 4);
1128 env->eflags &= ~(TF_MASK | RF_MASK);
1129
1130 if (iopl < 3)
1131 env->eflags &= ~VIF_MASK;
1132 else
1133 env->eflags &= ~IF_MASK;
1134}
1135#endif /* VBOX */
1136
1137#ifdef TARGET_X86_64
1138
1139#define PUSHQ(sp, val)\
1140{\
1141 sp -= 8;\
1142 stq_kernel(sp, (val));\
1143}
1144
1145#define POPQ(sp, val)\
1146{\
1147 val = ldq_kernel(sp);\
1148 sp += 8;\
1149}
1150
1151#ifndef VBOX
1152static inline target_ulong get_rsp_from_tss(int level)
1153#else /* VBOX */
1154DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1155#endif /* VBOX */
1156{
1157 int index;
1158
1159#if 0
1160 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1161 env->tr.base, env->tr.limit);
1162#endif
1163
1164 if (!(env->tr.flags & DESC_P_MASK))
1165 cpu_abort(env, "invalid tss");
1166 index = 8 * level + 4;
1167 if ((index + 7) > env->tr.limit)
1168 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1169 return ldq_kernel(env->tr.base + index);
1170}
1171
1172/* 64 bit interrupt */
1173static void do_interrupt64(int intno, int is_int, int error_code,
1174 target_ulong next_eip, int is_hw)
1175{
1176 SegmentCache *dt;
1177 target_ulong ptr;
1178 int type, dpl, selector, cpl, ist;
1179 int has_error_code, new_stack;
1180 uint32_t e1, e2, e3, ss;
1181 target_ulong old_eip, esp, offset;
1182
1183#ifdef VBOX
1184 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1185 cpu_loop_exit();
1186#endif
1187
1188 has_error_code = 0;
1189 if (!is_int && !is_hw) {
1190 switch(intno) {
1191 case 8:
1192 case 10:
1193 case 11:
1194 case 12:
1195 case 13:
1196 case 14:
1197 case 17:
1198 has_error_code = 1;
1199 break;
1200 }
1201 }
1202 if (is_int)
1203 old_eip = next_eip;
1204 else
1205 old_eip = env->eip;
1206
1207 dt = &env->idt;
1208 if (intno * 16 + 15 > dt->limit)
1209 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1210 ptr = dt->base + intno * 16;
1211 e1 = ldl_kernel(ptr);
1212 e2 = ldl_kernel(ptr + 4);
1213 e3 = ldl_kernel(ptr + 8);
1214 /* check gate type */
1215 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1216 switch(type) {
1217 case 14: /* 386 interrupt gate */
1218 case 15: /* 386 trap gate */
1219 break;
1220 default:
1221 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1222 break;
1223 }
1224 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1225 cpl = env->hflags & HF_CPL_MASK;
1226 /* check privilege if software int */
1227 if (is_int && dpl < cpl)
1228 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1229 /* check valid bit */
1230 if (!(e2 & DESC_P_MASK))
1231 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1232 selector = e1 >> 16;
1233 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1234 ist = e2 & 7;
1235 if ((selector & 0xfffc) == 0)
1236 raise_exception_err(EXCP0D_GPF, 0);
1237
1238 if (load_segment(&e1, &e2, selector) != 0)
1239 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1240 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1241 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1242 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1243 if (dpl > cpl)
1244 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1245 if (!(e2 & DESC_P_MASK))
1246 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1247 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1248 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1249 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1250 /* to inner privilege */
1251 if (ist != 0)
1252 esp = get_rsp_from_tss(ist + 3);
1253 else
1254 esp = get_rsp_from_tss(dpl);
1255 esp &= ~0xfLL; /* align stack */
1256 ss = 0;
1257 new_stack = 1;
1258 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1259 /* to same privilege */
1260 if (env->eflags & VM_MASK)
1261 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1262 new_stack = 0;
1263 if (ist != 0)
1264 esp = get_rsp_from_tss(ist + 3);
1265 else
1266 esp = ESP;
1267 esp &= ~0xfLL; /* align stack */
1268 dpl = cpl;
1269 } else {
1270 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1271 new_stack = 0; /* avoid warning */
1272 esp = 0; /* avoid warning */
1273 }
1274
1275 PUSHQ(esp, env->segs[R_SS].selector);
1276 PUSHQ(esp, ESP);
1277 PUSHQ(esp, compute_eflags());
1278 PUSHQ(esp, env->segs[R_CS].selector);
1279 PUSHQ(esp, old_eip);
1280 if (has_error_code) {
1281 PUSHQ(esp, error_code);
1282 }
1283
1284 if (new_stack) {
1285 ss = 0 | dpl;
1286 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1287 }
1288 ESP = esp;
1289
1290 selector = (selector & ~3) | dpl;
1291 cpu_x86_load_seg_cache(env, R_CS, selector,
1292 get_seg_base(e1, e2),
1293 get_seg_limit(e1, e2),
1294 e2);
1295 cpu_x86_set_cpl(env, dpl);
1296 env->eip = offset;
1297
1298 /* interrupt gate clear IF mask */
1299 if ((type & 1) == 0) {
1300 env->eflags &= ~IF_MASK;
1301 }
1302
1303#ifndef VBOX
1304 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1305#else
1306 /*
1307 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1308 * gets confused by seeingingly changed EFLAGS. See #3491 and
1309 * public bug #2341.
1310 */
1311 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1312#endif
1313}
1314#endif
1315
1316#if defined(CONFIG_USER_ONLY)
1317void helper_syscall(int next_eip_addend)
1318{
1319 env->exception_index = EXCP_SYSCALL;
1320 env->exception_next_eip = env->eip + next_eip_addend;
1321 cpu_loop_exit();
1322}
1323#else
1324void helper_syscall(int next_eip_addend)
1325{
1326 int selector;
1327
1328 if (!(env->efer & MSR_EFER_SCE)) {
1329 raise_exception_err(EXCP06_ILLOP, 0);
1330 }
1331 selector = (env->star >> 32) & 0xffff;
1332#ifdef TARGET_X86_64
1333 if (env->hflags & HF_LMA_MASK) {
1334 int code64;
1335
1336 ECX = env->eip + next_eip_addend;
1337 env->regs[11] = compute_eflags();
1338
1339 code64 = env->hflags & HF_CS64_MASK;
1340
1341 cpu_x86_set_cpl(env, 0);
1342 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1343 0, 0xffffffff,
1344 DESC_G_MASK | DESC_P_MASK |
1345 DESC_S_MASK |
1346 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1347 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1348 0, 0xffffffff,
1349 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1350 DESC_S_MASK |
1351 DESC_W_MASK | DESC_A_MASK);
1352 env->eflags &= ~env->fmask;
1353 load_eflags(env->eflags, 0);
1354 if (code64)
1355 env->eip = env->lstar;
1356 else
1357 env->eip = env->cstar;
1358 } else
1359#endif
1360 {
1361 ECX = (uint32_t)(env->eip + next_eip_addend);
1362
1363 cpu_x86_set_cpl(env, 0);
1364 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1365 0, 0xffffffff,
1366 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1367 DESC_S_MASK |
1368 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1369 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1370 0, 0xffffffff,
1371 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1372 DESC_S_MASK |
1373 DESC_W_MASK | DESC_A_MASK);
1374 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1375 env->eip = (uint32_t)env->star;
1376 }
1377}
1378#endif
1379
1380void helper_sysret(int dflag)
1381{
1382 int cpl, selector;
1383
1384 if (!(env->efer & MSR_EFER_SCE)) {
1385 raise_exception_err(EXCP06_ILLOP, 0);
1386 }
1387 cpl = env->hflags & HF_CPL_MASK;
1388 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1389 raise_exception_err(EXCP0D_GPF, 0);
1390 }
1391 selector = (env->star >> 48) & 0xffff;
1392#ifdef TARGET_X86_64
1393 if (env->hflags & HF_LMA_MASK) {
1394 if (dflag == 2) {
1395 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1396 0, 0xffffffff,
1397 DESC_G_MASK | DESC_P_MASK |
1398 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1399 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1400 DESC_L_MASK);
1401 env->eip = ECX;
1402 } else {
1403 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1404 0, 0xffffffff,
1405 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1406 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1407 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1408 env->eip = (uint32_t)ECX;
1409 }
1410 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1411 0, 0xffffffff,
1412 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1413 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1414 DESC_W_MASK | DESC_A_MASK);
1415 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1416 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1417 cpu_x86_set_cpl(env, 3);
1418 } else
1419#endif
1420 {
1421 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1422 0, 0xffffffff,
1423 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1424 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1425 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1426 env->eip = (uint32_t)ECX;
1427 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1428 0, 0xffffffff,
1429 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1430 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1431 DESC_W_MASK | DESC_A_MASK);
1432 env->eflags |= IF_MASK;
1433 cpu_x86_set_cpl(env, 3);
1434 }
1435#ifdef USE_KQEMU
1436 if (kqemu_is_ok(env)) {
1437 if (env->hflags & HF_LMA_MASK)
1438 CC_OP = CC_OP_EFLAGS;
1439 env->exception_index = -1;
1440 cpu_loop_exit();
1441 }
1442#endif
1443}
1444
1445#ifdef VBOX
1446/**
1447 * Checks and processes external VMM events.
1448 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1449 */
1450void helper_external_event(void)
1451{
1452#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1453 uintptr_t uSP;
1454# ifdef RT_ARCH_AMD64
1455 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1456# else
1457 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1458# endif
1459 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1460#endif
1461 /* Keep in sync with flags checked by gen_check_external_event() */
1462 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1463 {
1464 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1465 ~CPU_INTERRUPT_EXTERNAL_HARD);
1466 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1467 }
1468 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1469 {
1470 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1471 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1472 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1473 }
1474 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1475 {
1476 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1477 ~CPU_INTERRUPT_EXTERNAL_DMA);
1478 remR3DmaRun(env);
1479 }
1480 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1481 {
1482 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1483 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1484 remR3TimersRun(env);
1485 }
1486}
1487/* helper for recording call instruction addresses for later scanning */
1488void helper_record_call()
1489{
1490 if ( !(env->state & CPU_RAW_RING0)
1491 && (env->cr[0] & CR0_PG_MASK)
1492 && !(env->eflags & X86_EFL_IF))
1493 remR3RecordCall(env);
1494}
1495#endif /* VBOX */
1496
1497/* real mode interrupt */
1498static void do_interrupt_real(int intno, int is_int, int error_code,
1499 unsigned int next_eip)
1500{
1501 SegmentCache *dt;
1502 target_ulong ptr, ssp;
1503 int selector;
1504 uint32_t offset, esp;
1505 uint32_t old_cs, old_eip;
1506
1507 /* real mode (simpler !) */
1508 dt = &env->idt;
1509#ifndef VBOX
1510 if (intno * 4 + 3 > dt->limit)
1511#else
1512 if ((unsigned)intno * 4 + 3 > dt->limit)
1513#endif
1514 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1515 ptr = dt->base + intno * 4;
1516 offset = lduw_kernel(ptr);
1517 selector = lduw_kernel(ptr + 2);
1518 esp = ESP;
1519 ssp = env->segs[R_SS].base;
1520 if (is_int)
1521 old_eip = next_eip;
1522 else
1523 old_eip = env->eip;
1524 old_cs = env->segs[R_CS].selector;
1525 /* XXX: use SS segment size ? */
1526 PUSHW(ssp, esp, 0xffff, compute_eflags());
1527 PUSHW(ssp, esp, 0xffff, old_cs);
1528 PUSHW(ssp, esp, 0xffff, old_eip);
1529
1530 /* update processor state */
1531 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1532 env->eip = offset;
1533 env->segs[R_CS].selector = selector;
1534 env->segs[R_CS].base = (selector << 4);
1535 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1536}
1537
1538/* fake user mode interrupt */
1539void do_interrupt_user(int intno, int is_int, int error_code,
1540 target_ulong next_eip)
1541{
1542 SegmentCache *dt;
1543 target_ulong ptr;
1544 int dpl, cpl, shift;
1545 uint32_t e2;
1546
1547 dt = &env->idt;
1548 if (env->hflags & HF_LMA_MASK) {
1549 shift = 4;
1550 } else {
1551 shift = 3;
1552 }
1553 ptr = dt->base + (intno << shift);
1554 e2 = ldl_kernel(ptr + 4);
1555
1556 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1557 cpl = env->hflags & HF_CPL_MASK;
1558 /* check privilege if software int */
1559 if (is_int && dpl < cpl)
1560 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1561
1562 /* Since we emulate only user space, we cannot do more than
1563 exiting the emulation with the suitable exception and error
1564 code */
1565 if (is_int)
1566 EIP = next_eip;
1567}
1568
1569/*
1570 * Begin execution of an interruption. is_int is TRUE if coming from
1571 * the int instruction. next_eip is the EIP value AFTER the interrupt
1572 * instruction. It is only relevant if is_int is TRUE.
1573 */
1574void do_interrupt(int intno, int is_int, int error_code,
1575 target_ulong next_eip, int is_hw)
1576{
1577 if (loglevel & CPU_LOG_INT) {
1578 if ((env->cr[0] & CR0_PE_MASK)) {
1579 static int count;
1580 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1581 count, intno, error_code, is_int,
1582 env->hflags & HF_CPL_MASK,
1583 env->segs[R_CS].selector, EIP,
1584 (int)env->segs[R_CS].base + EIP,
1585 env->segs[R_SS].selector, ESP);
1586 if (intno == 0x0e) {
1587 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1588 } else {
1589 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1590 }
1591 fprintf(logfile, "\n");
1592 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1593#if 0
1594 {
1595 int i;
1596 uint8_t *ptr;
1597 fprintf(logfile, " code=");
1598 ptr = env->segs[R_CS].base + env->eip;
1599 for(i = 0; i < 16; i++) {
1600 fprintf(logfile, " %02x", ldub(ptr + i));
1601 }
1602 fprintf(logfile, "\n");
1603 }
1604#endif
1605 count++;
1606 }
1607 }
1608 if (env->cr[0] & CR0_PE_MASK) {
1609#ifdef TARGET_X86_64
1610 if (env->hflags & HF_LMA_MASK) {
1611 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1612 } else
1613#endif
1614 {
1615#ifdef VBOX
1616 /* int xx *, v86 code and VME enabled? */
1617 if ( (env->eflags & VM_MASK)
1618 && (env->cr[4] & CR4_VME_MASK)
1619 && is_int
1620 && !is_hw
1621 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1622 )
1623 do_soft_interrupt_vme(intno, error_code, next_eip);
1624 else
1625#endif /* VBOX */
1626 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1627 }
1628 } else {
1629 do_interrupt_real(intno, is_int, error_code, next_eip);
1630 }
1631}
1632
1633/*
1634 * Check nested exceptions and change to double or triple fault if
1635 * needed. It should only be called, if this is not an interrupt.
1636 * Returns the new exception number.
1637 */
1638static int check_exception(int intno, int *error_code)
1639{
1640 int first_contributory = env->old_exception == 0 ||
1641 (env->old_exception >= 10 &&
1642 env->old_exception <= 13);
1643 int second_contributory = intno == 0 ||
1644 (intno >= 10 && intno <= 13);
1645
1646 if (loglevel & CPU_LOG_INT)
1647 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1648 env->old_exception, intno);
1649
1650 if (env->old_exception == EXCP08_DBLE)
1651 cpu_abort(env, "triple fault");
1652
1653 if ((first_contributory && second_contributory)
1654 || (env->old_exception == EXCP0E_PAGE &&
1655 (second_contributory || (intno == EXCP0E_PAGE)))) {
1656 intno = EXCP08_DBLE;
1657 *error_code = 0;
1658 }
1659
1660 if (second_contributory || (intno == EXCP0E_PAGE) ||
1661 (intno == EXCP08_DBLE))
1662 env->old_exception = intno;
1663
1664 return intno;
1665}
1666
1667/*
1668 * Signal an interruption. It is executed in the main CPU loop.
1669 * is_int is TRUE if coming from the int instruction. next_eip is the
1670 * EIP value AFTER the interrupt instruction. It is only relevant if
1671 * is_int is TRUE.
1672 */
1673void raise_interrupt(int intno, int is_int, int error_code,
1674 int next_eip_addend)
1675{
1676#if defined(VBOX) && defined(DEBUG)
1677 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1678#endif
1679 if (!is_int) {
1680 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1681 intno = check_exception(intno, &error_code);
1682 } else {
1683 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1684 }
1685
1686 env->exception_index = intno;
1687 env->error_code = error_code;
1688 env->exception_is_int = is_int;
1689 env->exception_next_eip = env->eip + next_eip_addend;
1690 cpu_loop_exit();
1691}
1692
1693/* shortcuts to generate exceptions */
1694
1695void (raise_exception_err)(int exception_index, int error_code)
1696{
1697 raise_interrupt(exception_index, 0, error_code, 0);
1698}
1699
1700void raise_exception(int exception_index)
1701{
1702 raise_interrupt(exception_index, 0, 0, 0);
1703}
1704
1705/* SMM support */
1706
1707#if defined(CONFIG_USER_ONLY)
1708
1709void do_smm_enter(void)
1710{
1711}
1712
1713void helper_rsm(void)
1714{
1715}
1716
1717#else
1718
1719#ifdef TARGET_X86_64
1720#define SMM_REVISION_ID 0x00020064
1721#else
1722#define SMM_REVISION_ID 0x00020000
1723#endif
1724
1725void do_smm_enter(void)
1726{
1727 target_ulong sm_state;
1728 SegmentCache *dt;
1729 int i, offset;
1730
1731 if (loglevel & CPU_LOG_INT) {
1732 fprintf(logfile, "SMM: enter\n");
1733 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1734 }
1735
1736 env->hflags |= HF_SMM_MASK;
1737 cpu_smm_update(env);
1738
1739 sm_state = env->smbase + 0x8000;
1740
1741#ifdef TARGET_X86_64
1742 for(i = 0; i < 6; i++) {
1743 dt = &env->segs[i];
1744 offset = 0x7e00 + i * 16;
1745 stw_phys(sm_state + offset, dt->selector);
1746 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1747 stl_phys(sm_state + offset + 4, dt->limit);
1748 stq_phys(sm_state + offset + 8, dt->base);
1749 }
1750
1751 stq_phys(sm_state + 0x7e68, env->gdt.base);
1752 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1753
1754 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1755 stq_phys(sm_state + 0x7e78, env->ldt.base);
1756 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1757 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1758
1759 stq_phys(sm_state + 0x7e88, env->idt.base);
1760 stl_phys(sm_state + 0x7e84, env->idt.limit);
1761
1762 stw_phys(sm_state + 0x7e90, env->tr.selector);
1763 stq_phys(sm_state + 0x7e98, env->tr.base);
1764 stl_phys(sm_state + 0x7e94, env->tr.limit);
1765 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1766
1767 stq_phys(sm_state + 0x7ed0, env->efer);
1768
1769 stq_phys(sm_state + 0x7ff8, EAX);
1770 stq_phys(sm_state + 0x7ff0, ECX);
1771 stq_phys(sm_state + 0x7fe8, EDX);
1772 stq_phys(sm_state + 0x7fe0, EBX);
1773 stq_phys(sm_state + 0x7fd8, ESP);
1774 stq_phys(sm_state + 0x7fd0, EBP);
1775 stq_phys(sm_state + 0x7fc8, ESI);
1776 stq_phys(sm_state + 0x7fc0, EDI);
1777 for(i = 8; i < 16; i++)
1778 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1779 stq_phys(sm_state + 0x7f78, env->eip);
1780 stl_phys(sm_state + 0x7f70, compute_eflags());
1781 stl_phys(sm_state + 0x7f68, env->dr[6]);
1782 stl_phys(sm_state + 0x7f60, env->dr[7]);
1783
1784 stl_phys(sm_state + 0x7f48, env->cr[4]);
1785 stl_phys(sm_state + 0x7f50, env->cr[3]);
1786 stl_phys(sm_state + 0x7f58, env->cr[0]);
1787
1788 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1789 stl_phys(sm_state + 0x7f00, env->smbase);
1790#else
1791 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1792 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1793 stl_phys(sm_state + 0x7ff4, compute_eflags());
1794 stl_phys(sm_state + 0x7ff0, env->eip);
1795 stl_phys(sm_state + 0x7fec, EDI);
1796 stl_phys(sm_state + 0x7fe8, ESI);
1797 stl_phys(sm_state + 0x7fe4, EBP);
1798 stl_phys(sm_state + 0x7fe0, ESP);
1799 stl_phys(sm_state + 0x7fdc, EBX);
1800 stl_phys(sm_state + 0x7fd8, EDX);
1801 stl_phys(sm_state + 0x7fd4, ECX);
1802 stl_phys(sm_state + 0x7fd0, EAX);
1803 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1804 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1805
1806 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1807 stl_phys(sm_state + 0x7f64, env->tr.base);
1808 stl_phys(sm_state + 0x7f60, env->tr.limit);
1809 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1810
1811 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1812 stl_phys(sm_state + 0x7f80, env->ldt.base);
1813 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1814 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1815
1816 stl_phys(sm_state + 0x7f74, env->gdt.base);
1817 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1818
1819 stl_phys(sm_state + 0x7f58, env->idt.base);
1820 stl_phys(sm_state + 0x7f54, env->idt.limit);
1821
1822 for(i = 0; i < 6; i++) {
1823 dt = &env->segs[i];
1824 if (i < 3)
1825 offset = 0x7f84 + i * 12;
1826 else
1827 offset = 0x7f2c + (i - 3) * 12;
1828 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1829 stl_phys(sm_state + offset + 8, dt->base);
1830 stl_phys(sm_state + offset + 4, dt->limit);
1831 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1832 }
1833 stl_phys(sm_state + 0x7f14, env->cr[4]);
1834
1835 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1836 stl_phys(sm_state + 0x7ef8, env->smbase);
1837#endif
1838 /* init SMM cpu state */
1839
1840#ifdef TARGET_X86_64
1841 cpu_load_efer(env, 0);
1842#endif
1843 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1844 env->eip = 0x00008000;
1845 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1846 0xffffffff, 0);
1847 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1848 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1849 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1850 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1851 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1852
1853 cpu_x86_update_cr0(env,
1854 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1855 cpu_x86_update_cr4(env, 0);
1856 env->dr[7] = 0x00000400;
1857 CC_OP = CC_OP_EFLAGS;
1858}
1859
1860void helper_rsm(void)
1861{
1862#ifdef VBOX
1863 cpu_abort(env, "helper_rsm");
1864#else /* !VBOX */
1865 target_ulong sm_
1866
1867 target_ulong sm_state;
1868 int i, offset;
1869 uint32_t val;
1870
1871 sm_state = env->smbase + 0x8000;
1872#ifdef TARGET_X86_64
1873 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1874
1875 for(i = 0; i < 6; i++) {
1876 offset = 0x7e00 + i * 16;
1877 cpu_x86_load_seg_cache(env, i,
1878 lduw_phys(sm_state + offset),
1879 ldq_phys(sm_state + offset + 8),
1880 ldl_phys(sm_state + offset + 4),
1881 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1882 }
1883
1884 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1885 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1886
1887 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1888 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1889 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1890 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1891
1892 env->idt.base = ldq_phys(sm_state + 0x7e88);
1893 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1894
1895 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1896 env->tr.base = ldq_phys(sm_state + 0x7e98);
1897 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1898 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1899
1900 EAX = ldq_phys(sm_state + 0x7ff8);
1901 ECX = ldq_phys(sm_state + 0x7ff0);
1902 EDX = ldq_phys(sm_state + 0x7fe8);
1903 EBX = ldq_phys(sm_state + 0x7fe0);
1904 ESP = ldq_phys(sm_state + 0x7fd8);
1905 EBP = ldq_phys(sm_state + 0x7fd0);
1906 ESI = ldq_phys(sm_state + 0x7fc8);
1907 EDI = ldq_phys(sm_state + 0x7fc0);
1908 for(i = 8; i < 16; i++)
1909 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1910 env->eip = ldq_phys(sm_state + 0x7f78);
1911 load_eflags(ldl_phys(sm_state + 0x7f70),
1912 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1913 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1914 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1915
1916 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1917 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1918 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1919
1920 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1921 if (val & 0x20000) {
1922 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1923 }
1924#else
1925 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1926 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1927 load_eflags(ldl_phys(sm_state + 0x7ff4),
1928 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1929 env->eip = ldl_phys(sm_state + 0x7ff0);
1930 EDI = ldl_phys(sm_state + 0x7fec);
1931 ESI = ldl_phys(sm_state + 0x7fe8);
1932 EBP = ldl_phys(sm_state + 0x7fe4);
1933 ESP = ldl_phys(sm_state + 0x7fe0);
1934 EBX = ldl_phys(sm_state + 0x7fdc);
1935 EDX = ldl_phys(sm_state + 0x7fd8);
1936 ECX = ldl_phys(sm_state + 0x7fd4);
1937 EAX = ldl_phys(sm_state + 0x7fd0);
1938 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1939 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1940
1941 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1942 env->tr.base = ldl_phys(sm_state + 0x7f64);
1943 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1944 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1945
1946 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1947 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1948 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1949 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1950
1951 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1952 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1953
1954 env->idt.base = ldl_phys(sm_state + 0x7f58);
1955 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1956
1957 for(i = 0; i < 6; i++) {
1958 if (i < 3)
1959 offset = 0x7f84 + i * 12;
1960 else
1961 offset = 0x7f2c + (i - 3) * 12;
1962 cpu_x86_load_seg_cache(env, i,
1963 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1964 ldl_phys(sm_state + offset + 8),
1965 ldl_phys(sm_state + offset + 4),
1966 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1967 }
1968 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1969
1970 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1971 if (val & 0x20000) {
1972 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1973 }
1974#endif
1975 CC_OP = CC_OP_EFLAGS;
1976 env->hflags &= ~HF_SMM_MASK;
1977 cpu_smm_update(env);
1978
1979 if (loglevel & CPU_LOG_INT) {
1980 fprintf(logfile, "SMM: after RSM\n");
1981 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1982 }
1983#endif /* !VBOX */
1984}
1985
1986#endif /* !CONFIG_USER_ONLY */
1987
1988
1989/* division, flags are undefined */
1990
1991void helper_divb_AL(target_ulong t0)
1992{
1993 unsigned int num, den, q, r;
1994
1995 num = (EAX & 0xffff);
1996 den = (t0 & 0xff);
1997 if (den == 0) {
1998 raise_exception(EXCP00_DIVZ);
1999 }
2000 q = (num / den);
2001 if (q > 0xff)
2002 raise_exception(EXCP00_DIVZ);
2003 q &= 0xff;
2004 r = (num % den) & 0xff;
2005 EAX = (EAX & ~0xffff) | (r << 8) | q;
2006}
2007
2008void helper_idivb_AL(target_ulong t0)
2009{
2010 int num, den, q, r;
2011
2012 num = (int16_t)EAX;
2013 den = (int8_t)t0;
2014 if (den == 0) {
2015 raise_exception(EXCP00_DIVZ);
2016 }
2017 q = (num / den);
2018 if (q != (int8_t)q)
2019 raise_exception(EXCP00_DIVZ);
2020 q &= 0xff;
2021 r = (num % den) & 0xff;
2022 EAX = (EAX & ~0xffff) | (r << 8) | q;
2023}
2024
2025void helper_divw_AX(target_ulong t0)
2026{
2027 unsigned int num, den, q, r;
2028
2029 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2030 den = (t0 & 0xffff);
2031 if (den == 0) {
2032 raise_exception(EXCP00_DIVZ);
2033 }
2034 q = (num / den);
2035 if (q > 0xffff)
2036 raise_exception(EXCP00_DIVZ);
2037 q &= 0xffff;
2038 r = (num % den) & 0xffff;
2039 EAX = (EAX & ~0xffff) | q;
2040 EDX = (EDX & ~0xffff) | r;
2041}
2042
2043void helper_idivw_AX(target_ulong t0)
2044{
2045 int num, den, q, r;
2046
2047 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2048 den = (int16_t)t0;
2049 if (den == 0) {
2050 raise_exception(EXCP00_DIVZ);
2051 }
2052 q = (num / den);
2053 if (q != (int16_t)q)
2054 raise_exception(EXCP00_DIVZ);
2055 q &= 0xffff;
2056 r = (num % den) & 0xffff;
2057 EAX = (EAX & ~0xffff) | q;
2058 EDX = (EDX & ~0xffff) | r;
2059}
2060
2061void helper_divl_EAX(target_ulong t0)
2062{
2063 unsigned int den, r;
2064 uint64_t num, q;
2065
2066 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2067 den = t0;
2068 if (den == 0) {
2069 raise_exception(EXCP00_DIVZ);
2070 }
2071 q = (num / den);
2072 r = (num % den);
2073 if (q > 0xffffffff)
2074 raise_exception(EXCP00_DIVZ);
2075 EAX = (uint32_t)q;
2076 EDX = (uint32_t)r;
2077}
2078
2079void helper_idivl_EAX(target_ulong t0)
2080{
2081 int den, r;
2082 int64_t num, q;
2083
2084 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2085 den = t0;
2086 if (den == 0) {
2087 raise_exception(EXCP00_DIVZ);
2088 }
2089 q = (num / den);
2090 r = (num % den);
2091 if (q != (int32_t)q)
2092 raise_exception(EXCP00_DIVZ);
2093 EAX = (uint32_t)q;
2094 EDX = (uint32_t)r;
2095}
2096
2097/* bcd */
2098
2099/* XXX: exception */
2100void helper_aam(int base)
2101{
2102 int al, ah;
2103 al = EAX & 0xff;
2104 ah = al / base;
2105 al = al % base;
2106 EAX = (EAX & ~0xffff) | al | (ah << 8);
2107 CC_DST = al;
2108}
2109
2110void helper_aad(int base)
2111{
2112 int al, ah;
2113 al = EAX & 0xff;
2114 ah = (EAX >> 8) & 0xff;
2115 al = ((ah * base) + al) & 0xff;
2116 EAX = (EAX & ~0xffff) | al;
2117 CC_DST = al;
2118}
2119
2120void helper_aaa(void)
2121{
2122 int icarry;
2123 int al, ah, af;
2124 int eflags;
2125
2126 eflags = cc_table[CC_OP].compute_all();
2127 af = eflags & CC_A;
2128 al = EAX & 0xff;
2129 ah = (EAX >> 8) & 0xff;
2130
2131 icarry = (al > 0xf9);
2132 if (((al & 0x0f) > 9 ) || af) {
2133 al = (al + 6) & 0x0f;
2134 ah = (ah + 1 + icarry) & 0xff;
2135 eflags |= CC_C | CC_A;
2136 } else {
2137 eflags &= ~(CC_C | CC_A);
2138 al &= 0x0f;
2139 }
2140 EAX = (EAX & ~0xffff) | al | (ah << 8);
2141 CC_SRC = eflags;
2142 FORCE_RET();
2143}
2144
2145void helper_aas(void)
2146{
2147 int icarry;
2148 int al, ah, af;
2149 int eflags;
2150
2151 eflags = cc_table[CC_OP].compute_all();
2152 af = eflags & CC_A;
2153 al = EAX & 0xff;
2154 ah = (EAX >> 8) & 0xff;
2155
2156 icarry = (al < 6);
2157 if (((al & 0x0f) > 9 ) || af) {
2158 al = (al - 6) & 0x0f;
2159 ah = (ah - 1 - icarry) & 0xff;
2160 eflags |= CC_C | CC_A;
2161 } else {
2162 eflags &= ~(CC_C | CC_A);
2163 al &= 0x0f;
2164 }
2165 EAX = (EAX & ~0xffff) | al | (ah << 8);
2166 CC_SRC = eflags;
2167 FORCE_RET();
2168}
2169
2170void helper_daa(void)
2171{
2172 int al, af, cf;
2173 int eflags;
2174
2175 eflags = cc_table[CC_OP].compute_all();
2176 cf = eflags & CC_C;
2177 af = eflags & CC_A;
2178 al = EAX & 0xff;
2179
2180 eflags = 0;
2181 if (((al & 0x0f) > 9 ) || af) {
2182 al = (al + 6) & 0xff;
2183 eflags |= CC_A;
2184 }
2185 if ((al > 0x9f) || cf) {
2186 al = (al + 0x60) & 0xff;
2187 eflags |= CC_C;
2188 }
2189 EAX = (EAX & ~0xff) | al;
2190 /* well, speed is not an issue here, so we compute the flags by hand */
2191 eflags |= (al == 0) << 6; /* zf */
2192 eflags |= parity_table[al]; /* pf */
2193 eflags |= (al & 0x80); /* sf */
2194 CC_SRC = eflags;
2195 FORCE_RET();
2196}
2197
2198void helper_das(void)
2199{
2200 int al, al1, af, cf;
2201 int eflags;
2202
2203 eflags = cc_table[CC_OP].compute_all();
2204 cf = eflags & CC_C;
2205 af = eflags & CC_A;
2206 al = EAX & 0xff;
2207
2208 eflags = 0;
2209 al1 = al;
2210 if (((al & 0x0f) > 9 ) || af) {
2211 eflags |= CC_A;
2212 if (al < 6 || cf)
2213 eflags |= CC_C;
2214 al = (al - 6) & 0xff;
2215 }
2216 if ((al1 > 0x99) || cf) {
2217 al = (al - 0x60) & 0xff;
2218 eflags |= CC_C;
2219 }
2220 EAX = (EAX & ~0xff) | al;
2221 /* well, speed is not an issue here, so we compute the flags by hand */
2222 eflags |= (al == 0) << 6; /* zf */
2223 eflags |= parity_table[al]; /* pf */
2224 eflags |= (al & 0x80); /* sf */
2225 CC_SRC = eflags;
2226 FORCE_RET();
2227}
2228
2229void helper_into(int next_eip_addend)
2230{
2231 int eflags;
2232 eflags = cc_table[CC_OP].compute_all();
2233 if (eflags & CC_O) {
2234 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2235 }
2236}
2237
2238void helper_cmpxchg8b(target_ulong a0)
2239{
2240 uint64_t d;
2241 int eflags;
2242
2243 eflags = cc_table[CC_OP].compute_all();
2244 d = ldq(a0);
2245 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2246 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2247 eflags |= CC_Z;
2248 } else {
2249 /* always do the store */
2250 stq(a0, d);
2251 EDX = (uint32_t)(d >> 32);
2252 EAX = (uint32_t)d;
2253 eflags &= ~CC_Z;
2254 }
2255 CC_SRC = eflags;
2256}
2257
2258#ifdef TARGET_X86_64
2259void helper_cmpxchg16b(target_ulong a0)
2260{
2261 uint64_t d0, d1;
2262 int eflags;
2263
2264 if ((a0 & 0xf) != 0)
2265 raise_exception(EXCP0D_GPF);
2266 eflags = cc_table[CC_OP].compute_all();
2267 d0 = ldq(a0);
2268 d1 = ldq(a0 + 8);
2269 if (d0 == EAX && d1 == EDX) {
2270 stq(a0, EBX);
2271 stq(a0 + 8, ECX);
2272 eflags |= CC_Z;
2273 } else {
2274 /* always do the store */
2275 stq(a0, d0);
2276 stq(a0 + 8, d1);
2277 EDX = d1;
2278 EAX = d0;
2279 eflags &= ~CC_Z;
2280 }
2281 CC_SRC = eflags;
2282}
2283#endif
2284
2285void helper_single_step(void)
2286{
2287 env->dr[6] |= 0x4000;
2288 raise_exception(EXCP01_SSTP);
2289}
2290
2291void helper_cpuid(void)
2292{
2293#ifndef VBOX
2294 uint32_t index;
2295
2296 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2297
2298 index = (uint32_t)EAX;
2299 /* test if maximum index reached */
2300 if (index & 0x80000000) {
2301 if (index > env->cpuid_xlevel)
2302 index = env->cpuid_level;
2303 } else {
2304 if (index > env->cpuid_level)
2305 index = env->cpuid_level;
2306 }
2307
2308 switch(index) {
2309 case 0:
2310 EAX = env->cpuid_level;
2311 EBX = env->cpuid_vendor1;
2312 EDX = env->cpuid_vendor2;
2313 ECX = env->cpuid_vendor3;
2314 break;
2315 case 1:
2316 EAX = env->cpuid_version;
2317 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2318 ECX = env->cpuid_ext_features;
2319 EDX = env->cpuid_features;
2320 break;
2321 case 2:
2322 /* cache info: needed for Pentium Pro compatibility */
2323 EAX = 1;
2324 EBX = 0;
2325 ECX = 0;
2326 EDX = 0x2c307d;
2327 break;
2328 case 4:
2329 /* cache info: needed for Core compatibility */
2330 switch (ECX) {
2331 case 0: /* L1 dcache info */
2332 EAX = 0x0000121;
2333 EBX = 0x1c0003f;
2334 ECX = 0x000003f;
2335 EDX = 0x0000001;
2336 break;
2337 case 1: /* L1 icache info */
2338 EAX = 0x0000122;
2339 EBX = 0x1c0003f;
2340 ECX = 0x000003f;
2341 EDX = 0x0000001;
2342 break;
2343 case 2: /* L2 cache info */
2344 EAX = 0x0000143;
2345 EBX = 0x3c0003f;
2346 ECX = 0x0000fff;
2347 EDX = 0x0000001;
2348 break;
2349 default: /* end of info */
2350 EAX = 0;
2351 EBX = 0;
2352 ECX = 0;
2353 EDX = 0;
2354 break;
2355 }
2356
2357 break;
2358 case 5:
2359 /* mwait info: needed for Core compatibility */
2360 EAX = 0; /* Smallest monitor-line size in bytes */
2361 EBX = 0; /* Largest monitor-line size in bytes */
2362 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2363 EDX = 0;
2364 break;
2365 case 6:
2366 /* Thermal and Power Leaf */
2367 EAX = 0;
2368 EBX = 0;
2369 ECX = 0;
2370 EDX = 0;
2371 break;
2372 case 9:
2373 /* Direct Cache Access Information Leaf */
2374 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2375 EBX = 0;
2376 ECX = 0;
2377 EDX = 0;
2378 break;
2379 case 0xA:
2380 /* Architectural Performance Monitoring Leaf */
2381 EAX = 0;
2382 EBX = 0;
2383 ECX = 0;
2384 EDX = 0;
2385 break;
2386 case 0x80000000:
2387 EAX = env->cpuid_xlevel;
2388 EBX = env->cpuid_vendor1;
2389 EDX = env->cpuid_vendor2;
2390 ECX = env->cpuid_vendor3;
2391 break;
2392 case 0x80000001:
2393 EAX = env->cpuid_features;
2394 EBX = 0;
2395 ECX = env->cpuid_ext3_features;
2396 EDX = env->cpuid_ext2_features;
2397 break;
2398 case 0x80000002:
2399 case 0x80000003:
2400 case 0x80000004:
2401 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2402 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2403 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2404 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2405 break;
2406 case 0x80000005:
2407 /* cache info (L1 cache) */
2408 EAX = 0x01ff01ff;
2409 EBX = 0x01ff01ff;
2410 ECX = 0x40020140;
2411 EDX = 0x40020140;
2412 break;
2413 case 0x80000006:
2414 /* cache info (L2 cache) */
2415 EAX = 0;
2416 EBX = 0x42004200;
2417 ECX = 0x02008140;
2418 EDX = 0;
2419 break;
2420 case 0x80000008:
2421 /* virtual & phys address size in low 2 bytes. */
2422/* XXX: This value must match the one used in the MMU code. */
2423 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2424 /* 64 bit processor */
2425#if defined(USE_KQEMU)
2426 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2427#else
2428/* XXX: The physical address space is limited to 42 bits in exec.c. */
2429 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2430#endif
2431 } else {
2432#if defined(USE_KQEMU)
2433 EAX = 0x00000020; /* 32 bits physical */
2434#else
2435 if (env->cpuid_features & CPUID_PSE36)
2436 EAX = 0x00000024; /* 36 bits physical */
2437 else
2438 EAX = 0x00000020; /* 32 bits physical */
2439#endif
2440 }
2441 EBX = 0;
2442 ECX = 0;
2443 EDX = 0;
2444 break;
2445 case 0x8000000A:
2446 EAX = 0x00000001;
2447 EBX = 0;
2448 ECX = 0;
2449 EDX = 0;
2450 break;
2451 default:
2452 /* reserved values: zero */
2453 EAX = 0;
2454 EBX = 0;
2455 ECX = 0;
2456 EDX = 0;
2457 break;
2458 }
2459#else /* VBOX */
2460 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2461#endif /* VBOX */
2462}
2463
2464void helper_enter_level(int level, int data32, target_ulong t1)
2465{
2466 target_ulong ssp;
2467 uint32_t esp_mask, esp, ebp;
2468
2469 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2470 ssp = env->segs[R_SS].base;
2471 ebp = EBP;
2472 esp = ESP;
2473 if (data32) {
2474 /* 32 bit */
2475 esp -= 4;
2476 while (--level) {
2477 esp -= 4;
2478 ebp -= 4;
2479 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2480 }
2481 esp -= 4;
2482 stl(ssp + (esp & esp_mask), t1);
2483 } else {
2484 /* 16 bit */
2485 esp -= 2;
2486 while (--level) {
2487 esp -= 2;
2488 ebp -= 2;
2489 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2490 }
2491 esp -= 2;
2492 stw(ssp + (esp & esp_mask), t1);
2493 }
2494}
2495
2496#ifdef TARGET_X86_64
2497void helper_enter64_level(int level, int data64, target_ulong t1)
2498{
2499 target_ulong esp, ebp;
2500 ebp = EBP;
2501 esp = ESP;
2502
2503 if (data64) {
2504 /* 64 bit */
2505 esp -= 8;
2506 while (--level) {
2507 esp -= 8;
2508 ebp -= 8;
2509 stq(esp, ldq(ebp));
2510 }
2511 esp -= 8;
2512 stq(esp, t1);
2513 } else {
2514 /* 16 bit */
2515 esp -= 2;
2516 while (--level) {
2517 esp -= 2;
2518 ebp -= 2;
2519 stw(esp, lduw(ebp));
2520 }
2521 esp -= 2;
2522 stw(esp, t1);
2523 }
2524}
2525#endif
2526
2527void helper_lldt(int selector)
2528{
2529 SegmentCache *dt;
2530 uint32_t e1, e2;
2531#ifndef VBOX
2532 int index, entry_limit;
2533#else
2534 unsigned int index, entry_limit;
2535#endif
2536 target_ulong ptr;
2537
2538#ifdef VBOX
2539 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2540 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2541#endif
2542
2543 selector &= 0xffff;
2544 if ((selector & 0xfffc) == 0) {
2545 /* XXX: NULL selector case: invalid LDT */
2546 env->ldt.base = 0;
2547 env->ldt.limit = 0;
2548 } else {
2549 if (selector & 0x4)
2550 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2551 dt = &env->gdt;
2552 index = selector & ~7;
2553#ifdef TARGET_X86_64
2554 if (env->hflags & HF_LMA_MASK)
2555 entry_limit = 15;
2556 else
2557#endif
2558 entry_limit = 7;
2559 if ((index + entry_limit) > dt->limit)
2560 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2561 ptr = dt->base + index;
2562 e1 = ldl_kernel(ptr);
2563 e2 = ldl_kernel(ptr + 4);
2564 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2565 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2566 if (!(e2 & DESC_P_MASK))
2567 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2568#ifdef TARGET_X86_64
2569 if (env->hflags & HF_LMA_MASK) {
2570 uint32_t e3;
2571 e3 = ldl_kernel(ptr + 8);
2572 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2573 env->ldt.base |= (target_ulong)e3 << 32;
2574 } else
2575#endif
2576 {
2577 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2578 }
2579 }
2580 env->ldt.selector = selector;
2581#ifdef VBOX
2582 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2583 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2584#endif
2585}
2586
2587void helper_ltr(int selector)
2588{
2589 SegmentCache *dt;
2590 uint32_t e1, e2;
2591#ifndef VBOX
2592 int index, type, entry_limit;
2593#else
2594 unsigned int index;
2595 int type, entry_limit;
2596#endif
2597 target_ulong ptr;
2598
2599#ifdef VBOX
2600 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2601 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2602 env->tr.flags, (RTSEL)(selector & 0xffff)));
2603#endif
2604 selector &= 0xffff;
2605 if ((selector & 0xfffc) == 0) {
2606 /* NULL selector case: invalid TR */
2607 env->tr.base = 0;
2608 env->tr.limit = 0;
2609 env->tr.flags = 0;
2610 } else {
2611 if (selector & 0x4)
2612 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2613 dt = &env->gdt;
2614 index = selector & ~7;
2615#ifdef TARGET_X86_64
2616 if (env->hflags & HF_LMA_MASK)
2617 entry_limit = 15;
2618 else
2619#endif
2620 entry_limit = 7;
2621 if ((index + entry_limit) > dt->limit)
2622 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2623 ptr = dt->base + index;
2624 e1 = ldl_kernel(ptr);
2625 e2 = ldl_kernel(ptr + 4);
2626 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2627 if ((e2 & DESC_S_MASK) ||
2628 (type != 1 && type != 9))
2629 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2630 if (!(e2 & DESC_P_MASK))
2631 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2632#ifdef TARGET_X86_64
2633 if (env->hflags & HF_LMA_MASK) {
2634 uint32_t e3, e4;
2635 e3 = ldl_kernel(ptr + 8);
2636 e4 = ldl_kernel(ptr + 12);
2637 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2638 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2639 load_seg_cache_raw_dt(&env->tr, e1, e2);
2640 env->tr.base |= (target_ulong)e3 << 32;
2641 } else
2642#endif
2643 {
2644 load_seg_cache_raw_dt(&env->tr, e1, e2);
2645 }
2646 e2 |= DESC_TSS_BUSY_MASK;
2647 stl_kernel(ptr + 4, e2);
2648 }
2649 env->tr.selector = selector;
2650#ifdef VBOX
2651 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2652 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2653 env->tr.flags, (RTSEL)(selector & 0xffff)));
2654#endif
2655}
2656
2657/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2658void helper_load_seg(int seg_reg, int selector)
2659{
2660 uint32_t e1, e2;
2661 int cpl, dpl, rpl;
2662 SegmentCache *dt;
2663#ifndef VBOX
2664 int index;
2665#else
2666 unsigned int index;
2667#endif
2668 target_ulong ptr;
2669
2670 selector &= 0xffff;
2671 cpl = env->hflags & HF_CPL_MASK;
2672
2673#ifdef VBOX
2674 /* Trying to load a selector with CPL=1? */
2675 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2676 {
2677 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2678 selector = selector & 0xfffc;
2679 }
2680#endif
2681 if ((selector & 0xfffc) == 0) {
2682 /* null selector case */
2683 if (seg_reg == R_SS
2684#ifdef TARGET_X86_64
2685 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2686#endif
2687 )
2688 raise_exception_err(EXCP0D_GPF, 0);
2689 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2690 } else {
2691
2692 if (selector & 0x4)
2693 dt = &env->ldt;
2694 else
2695 dt = &env->gdt;
2696 index = selector & ~7;
2697 if ((index + 7) > dt->limit)
2698 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2699 ptr = dt->base + index;
2700 e1 = ldl_kernel(ptr);
2701 e2 = ldl_kernel(ptr + 4);
2702
2703 if (!(e2 & DESC_S_MASK))
2704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2705 rpl = selector & 3;
2706 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2707 if (seg_reg == R_SS) {
2708 /* must be writable segment */
2709 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2710 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2711 if (rpl != cpl || dpl != cpl)
2712 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2713 } else {
2714 /* must be readable segment */
2715 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2716 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2717
2718 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2719 /* if not conforming code, test rights */
2720 if (dpl < cpl || dpl < rpl)
2721 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2722 }
2723 }
2724
2725 if (!(e2 & DESC_P_MASK)) {
2726 if (seg_reg == R_SS)
2727 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2728 else
2729 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2730 }
2731
2732 /* set the access bit if not already set */
2733 if (!(e2 & DESC_A_MASK)) {
2734 e2 |= DESC_A_MASK;
2735 stl_kernel(ptr + 4, e2);
2736 }
2737
2738 cpu_x86_load_seg_cache(env, seg_reg, selector,
2739 get_seg_base(e1, e2),
2740 get_seg_limit(e1, e2),
2741 e2);
2742#if 0
2743 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2744 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2745#endif
2746 }
2747}
2748
2749/* protected mode jump */
2750void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2751 int next_eip_addend)
2752{
2753 int gate_cs, type;
2754 uint32_t e1, e2, cpl, dpl, rpl, limit;
2755 target_ulong next_eip;
2756
2757#ifdef VBOX
2758 e1 = e2 = 0;
2759#endif
2760 if ((new_cs & 0xfffc) == 0)
2761 raise_exception_err(EXCP0D_GPF, 0);
2762 if (load_segment(&e1, &e2, new_cs) != 0)
2763 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2764 cpl = env->hflags & HF_CPL_MASK;
2765 if (e2 & DESC_S_MASK) {
2766 if (!(e2 & DESC_CS_MASK))
2767 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2768 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2769 if (e2 & DESC_C_MASK) {
2770 /* conforming code segment */
2771 if (dpl > cpl)
2772 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2773 } else {
2774 /* non conforming code segment */
2775 rpl = new_cs & 3;
2776 if (rpl > cpl)
2777 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2778 if (dpl != cpl)
2779 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2780 }
2781 if (!(e2 & DESC_P_MASK))
2782 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2783 limit = get_seg_limit(e1, e2);
2784 if (new_eip > limit &&
2785 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2786 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2787 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2788 get_seg_base(e1, e2), limit, e2);
2789 EIP = new_eip;
2790 } else {
2791 /* jump to call or task gate */
2792 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2793 rpl = new_cs & 3;
2794 cpl = env->hflags & HF_CPL_MASK;
2795 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2796 switch(type) {
2797 case 1: /* 286 TSS */
2798 case 9: /* 386 TSS */
2799 case 5: /* task gate */
2800 if (dpl < cpl || dpl < rpl)
2801 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2802 next_eip = env->eip + next_eip_addend;
2803 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2804 CC_OP = CC_OP_EFLAGS;
2805 break;
2806 case 4: /* 286 call gate */
2807 case 12: /* 386 call gate */
2808 if ((dpl < cpl) || (dpl < rpl))
2809 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2810 if (!(e2 & DESC_P_MASK))
2811 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2812 gate_cs = e1 >> 16;
2813 new_eip = (e1 & 0xffff);
2814 if (type == 12)
2815 new_eip |= (e2 & 0xffff0000);
2816 if (load_segment(&e1, &e2, gate_cs) != 0)
2817 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2818 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2819 /* must be code segment */
2820 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2821 (DESC_S_MASK | DESC_CS_MASK)))
2822 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2823 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2824 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2825 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2826 if (!(e2 & DESC_P_MASK))
2827#ifdef VBOX /* See page 3-514 of 253666.pdf */
2828 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2829#else
2830 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2831#endif
2832 limit = get_seg_limit(e1, e2);
2833 if (new_eip > limit)
2834 raise_exception_err(EXCP0D_GPF, 0);
2835 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2836 get_seg_base(e1, e2), limit, e2);
2837 EIP = new_eip;
2838 break;
2839 default:
2840 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2841 break;
2842 }
2843 }
2844}
2845
2846/* real mode call */
2847void helper_lcall_real(int new_cs, target_ulong new_eip1,
2848 int shift, int next_eip)
2849{
2850 int new_eip;
2851 uint32_t esp, esp_mask;
2852 target_ulong ssp;
2853
2854 new_eip = new_eip1;
2855 esp = ESP;
2856 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2857 ssp = env->segs[R_SS].base;
2858 if (shift) {
2859 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2860 PUSHL(ssp, esp, esp_mask, next_eip);
2861 } else {
2862 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2863 PUSHW(ssp, esp, esp_mask, next_eip);
2864 }
2865
2866 SET_ESP(esp, esp_mask);
2867 env->eip = new_eip;
2868 env->segs[R_CS].selector = new_cs;
2869 env->segs[R_CS].base = (new_cs << 4);
2870}
2871
2872/* protected mode call */
2873void helper_lcall_protected(int new_cs, target_ulong new_eip,
2874 int shift, int next_eip_addend)
2875{
2876 int new_stack, i;
2877 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2878 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2879 uint32_t val, limit, old_sp_mask;
2880 target_ulong ssp, old_ssp, next_eip;
2881
2882#ifdef VBOX
2883 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2884#endif
2885 next_eip = env->eip + next_eip_addend;
2886#ifdef DEBUG_PCALL
2887 if (loglevel & CPU_LOG_PCALL) {
2888 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2889 new_cs, (uint32_t)new_eip, shift);
2890 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2891 }
2892#endif
2893 if ((new_cs & 0xfffc) == 0)
2894 raise_exception_err(EXCP0D_GPF, 0);
2895 if (load_segment(&e1, &e2, new_cs) != 0)
2896 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2897 cpl = env->hflags & HF_CPL_MASK;
2898#ifdef DEBUG_PCALL
2899 if (loglevel & CPU_LOG_PCALL) {
2900 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2901 }
2902#endif
2903 if (e2 & DESC_S_MASK) {
2904 if (!(e2 & DESC_CS_MASK))
2905 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2906 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2907 if (e2 & DESC_C_MASK) {
2908 /* conforming code segment */
2909 if (dpl > cpl)
2910 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2911 } else {
2912 /* non conforming code segment */
2913 rpl = new_cs & 3;
2914 if (rpl > cpl)
2915 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2916 if (dpl != cpl)
2917 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2918 }
2919 if (!(e2 & DESC_P_MASK))
2920 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2921
2922#ifdef TARGET_X86_64
2923 /* XXX: check 16/32 bit cases in long mode */
2924 if (shift == 2) {
2925 target_ulong rsp;
2926 /* 64 bit case */
2927 rsp = ESP;
2928 PUSHQ(rsp, env->segs[R_CS].selector);
2929 PUSHQ(rsp, next_eip);
2930 /* from this point, not restartable */
2931 ESP = rsp;
2932 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2933 get_seg_base(e1, e2),
2934 get_seg_limit(e1, e2), e2);
2935 EIP = new_eip;
2936 } else
2937#endif
2938 {
2939 sp = ESP;
2940 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2941 ssp = env->segs[R_SS].base;
2942 if (shift) {
2943 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2944 PUSHL(ssp, sp, sp_mask, next_eip);
2945 } else {
2946 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2947 PUSHW(ssp, sp, sp_mask, next_eip);
2948 }
2949
2950 limit = get_seg_limit(e1, e2);
2951 if (new_eip > limit)
2952 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2953 /* from this point, not restartable */
2954 SET_ESP(sp, sp_mask);
2955 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2956 get_seg_base(e1, e2), limit, e2);
2957 EIP = new_eip;
2958 }
2959 } else {
2960 /* check gate type */
2961 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2962 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2963 rpl = new_cs & 3;
2964 switch(type) {
2965 case 1: /* available 286 TSS */
2966 case 9: /* available 386 TSS */
2967 case 5: /* task gate */
2968 if (dpl < cpl || dpl < rpl)
2969 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2970 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2971 CC_OP = CC_OP_EFLAGS;
2972 return;
2973 case 4: /* 286 call gate */
2974 case 12: /* 386 call gate */
2975 break;
2976 default:
2977 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2978 break;
2979 }
2980 shift = type >> 3;
2981
2982 if (dpl < cpl || dpl < rpl)
2983 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2984 /* check valid bit */
2985 if (!(e2 & DESC_P_MASK))
2986 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2987 selector = e1 >> 16;
2988 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2989 param_count = e2 & 0x1f;
2990 if ((selector & 0xfffc) == 0)
2991 raise_exception_err(EXCP0D_GPF, 0);
2992
2993 if (load_segment(&e1, &e2, selector) != 0)
2994 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2995 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2996 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2997 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2998 if (dpl > cpl)
2999 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3000 if (!(e2 & DESC_P_MASK))
3001 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3002
3003 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3004 /* to inner privilege */
3005 get_ss_esp_from_tss(&ss, &sp, dpl);
3006#ifdef DEBUG_PCALL
3007 if (loglevel & CPU_LOG_PCALL)
3008 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3009 ss, sp, param_count, ESP);
3010#endif
3011 if ((ss & 0xfffc) == 0)
3012 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3013 if ((ss & 3) != dpl)
3014 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3015 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3016 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3017 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3018 if (ss_dpl != dpl)
3019 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3020 if (!(ss_e2 & DESC_S_MASK) ||
3021 (ss_e2 & DESC_CS_MASK) ||
3022 !(ss_e2 & DESC_W_MASK))
3023 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3024 if (!(ss_e2 & DESC_P_MASK))
3025#ifdef VBOX /* See page 3-99 of 253666.pdf */
3026 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3027#else
3028 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3029#endif
3030
3031 // push_size = ((param_count * 2) + 8) << shift;
3032
3033 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3034 old_ssp = env->segs[R_SS].base;
3035
3036 sp_mask = get_sp_mask(ss_e2);
3037 ssp = get_seg_base(ss_e1, ss_e2);
3038 if (shift) {
3039 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3040 PUSHL(ssp, sp, sp_mask, ESP);
3041 for(i = param_count - 1; i >= 0; i--) {
3042 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3043 PUSHL(ssp, sp, sp_mask, val);
3044 }
3045 } else {
3046 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3047 PUSHW(ssp, sp, sp_mask, ESP);
3048 for(i = param_count - 1; i >= 0; i--) {
3049 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3050 PUSHW(ssp, sp, sp_mask, val);
3051 }
3052 }
3053 new_stack = 1;
3054 } else {
3055 /* to same privilege */
3056 sp = ESP;
3057 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3058 ssp = env->segs[R_SS].base;
3059 // push_size = (4 << shift);
3060 new_stack = 0;
3061 }
3062
3063 if (shift) {
3064 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3065 PUSHL(ssp, sp, sp_mask, next_eip);
3066 } else {
3067 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3068 PUSHW(ssp, sp, sp_mask, next_eip);
3069 }
3070
3071 /* from this point, not restartable */
3072
3073 if (new_stack) {
3074 ss = (ss & ~3) | dpl;
3075 cpu_x86_load_seg_cache(env, R_SS, ss,
3076 ssp,
3077 get_seg_limit(ss_e1, ss_e2),
3078 ss_e2);
3079 }
3080
3081 selector = (selector & ~3) | dpl;
3082 cpu_x86_load_seg_cache(env, R_CS, selector,
3083 get_seg_base(e1, e2),
3084 get_seg_limit(e1, e2),
3085 e2);
3086 cpu_x86_set_cpl(env, dpl);
3087 SET_ESP(sp, sp_mask);
3088 EIP = offset;
3089 }
3090#ifdef USE_KQEMU
3091 if (kqemu_is_ok(env)) {
3092 env->exception_index = -1;
3093 cpu_loop_exit();
3094 }
3095#endif
3096}
3097
3098/* real and vm86 mode iret */
3099void helper_iret_real(int shift)
3100{
3101 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3102 target_ulong ssp;
3103 int eflags_mask;
3104#ifdef VBOX
3105 bool fVME = false;
3106
3107 remR3TrapClear(env->pVM);
3108#endif /* VBOX */
3109
3110 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3111 sp = ESP;
3112 ssp = env->segs[R_SS].base;
3113 if (shift == 1) {
3114 /* 32 bits */
3115 POPL(ssp, sp, sp_mask, new_eip);
3116 POPL(ssp, sp, sp_mask, new_cs);
3117 new_cs &= 0xffff;
3118 POPL(ssp, sp, sp_mask, new_eflags);
3119 } else {
3120 /* 16 bits */
3121 POPW(ssp, sp, sp_mask, new_eip);
3122 POPW(ssp, sp, sp_mask, new_cs);
3123 POPW(ssp, sp, sp_mask, new_eflags);
3124 }
3125#ifdef VBOX
3126 if ( (env->eflags & VM_MASK)
3127 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3128 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3129 {
3130 fVME = true;
3131 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3132 /* if TF will be set -> #GP */
3133 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3134 || (new_eflags & TF_MASK))
3135 raise_exception(EXCP0D_GPF);
3136 }
3137#endif /* VBOX */
3138 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3139 env->segs[R_CS].selector = new_cs;
3140 env->segs[R_CS].base = (new_cs << 4);
3141 env->eip = new_eip;
3142#ifdef VBOX
3143 if (fVME)
3144 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3145 else
3146#endif
3147 if (env->eflags & VM_MASK)
3148 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3149 else
3150 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3151 if (shift == 0)
3152 eflags_mask &= 0xffff;
3153 load_eflags(new_eflags, eflags_mask);
3154 env->hflags2 &= ~HF2_NMI_MASK;
3155#ifdef VBOX
3156 if (fVME)
3157 {
3158 if (new_eflags & IF_MASK)
3159 env->eflags |= VIF_MASK;
3160 else
3161 env->eflags &= ~VIF_MASK;
3162 }
3163#endif /* VBOX */
3164}
3165
3166#ifndef VBOX
3167static inline void validate_seg(int seg_reg, int cpl)
3168#else /* VBOX */
3169DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3170#endif /* VBOX */
3171{
3172 int dpl;
3173 uint32_t e2;
3174
3175 /* XXX: on x86_64, we do not want to nullify FS and GS because
3176 they may still contain a valid base. I would be interested to
3177 know how a real x86_64 CPU behaves */
3178 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3179 (env->segs[seg_reg].selector & 0xfffc) == 0)
3180 return;
3181
3182 e2 = env->segs[seg_reg].flags;
3183 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3184 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3185 /* data or non conforming code segment */
3186 if (dpl < cpl) {
3187 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3188 }
3189 }
3190}
3191
3192/* protected mode iret */
3193#ifndef VBOX
3194static inline void helper_ret_protected(int shift, int is_iret, int addend)
3195#else /* VBOX */
3196DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3197#endif /* VBOX */
3198{
3199 uint32_t new_cs, new_eflags, new_ss;
3200 uint32_t new_es, new_ds, new_fs, new_gs;
3201 uint32_t e1, e2, ss_e1, ss_e2;
3202 int cpl, dpl, rpl, eflags_mask, iopl;
3203 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3204
3205#ifdef VBOX
3206 ss_e1 = ss_e2 = e1 = e2 = 0;
3207#endif
3208
3209#ifdef TARGET_X86_64
3210 if (shift == 2)
3211 sp_mask = -1;
3212 else
3213#endif
3214 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3215 sp = ESP;
3216 ssp = env->segs[R_SS].base;
3217 new_eflags = 0; /* avoid warning */
3218#ifdef TARGET_X86_64
3219 if (shift == 2) {
3220 POPQ(sp, new_eip);
3221 POPQ(sp, new_cs);
3222 new_cs &= 0xffff;
3223 if (is_iret) {
3224 POPQ(sp, new_eflags);
3225 }
3226 } else
3227#endif
3228 if (shift == 1) {
3229 /* 32 bits */
3230 POPL(ssp, sp, sp_mask, new_eip);
3231 POPL(ssp, sp, sp_mask, new_cs);
3232 new_cs &= 0xffff;
3233 if (is_iret) {
3234 POPL(ssp, sp, sp_mask, new_eflags);
3235#if defined(VBOX) && defined(DEBUG)
3236 printf("iret: new CS %04X\n", new_cs);
3237 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3238 printf("iret: new EFLAGS %08X\n", new_eflags);
3239 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3240#endif
3241 if (new_eflags & VM_MASK)
3242 goto return_to_vm86;
3243 }
3244#ifdef VBOX
3245 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3246 {
3247#ifdef DEBUG
3248 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3249#endif
3250 new_cs = new_cs & 0xfffc;
3251 }
3252#endif
3253 } else {
3254 /* 16 bits */
3255 POPW(ssp, sp, sp_mask, new_eip);
3256 POPW(ssp, sp, sp_mask, new_cs);
3257 if (is_iret)
3258 POPW(ssp, sp, sp_mask, new_eflags);
3259 }
3260#ifdef DEBUG_PCALL
3261 if (loglevel & CPU_LOG_PCALL) {
3262 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3263 new_cs, new_eip, shift, addend);
3264 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3265 }
3266#endif
3267 if ((new_cs & 0xfffc) == 0)
3268 {
3269#if defined(VBOX) && defined(DEBUG)
3270 printf("new_cs & 0xfffc) == 0\n");
3271#endif
3272 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3273 }
3274 if (load_segment(&e1, &e2, new_cs) != 0)
3275 {
3276#if defined(VBOX) && defined(DEBUG)
3277 printf("load_segment failed\n");
3278#endif
3279 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3280 }
3281 if (!(e2 & DESC_S_MASK) ||
3282 !(e2 & DESC_CS_MASK))
3283 {
3284#if defined(VBOX) && defined(DEBUG)
3285 printf("e2 mask %08x\n", e2);
3286#endif
3287 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3288 }
3289 cpl = env->hflags & HF_CPL_MASK;
3290 rpl = new_cs & 3;
3291 if (rpl < cpl)
3292 {
3293#if defined(VBOX) && defined(DEBUG)
3294 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3295#endif
3296 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3297 }
3298 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3299 if (e2 & DESC_C_MASK) {
3300 if (dpl > rpl)
3301 {
3302#if defined(VBOX) && defined(DEBUG)
3303 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3304#endif
3305 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3306 }
3307 } else {
3308 if (dpl != rpl)
3309 {
3310#if defined(VBOX) && defined(DEBUG)
3311 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3312#endif
3313 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3314 }
3315 }
3316 if (!(e2 & DESC_P_MASK))
3317 {
3318#if defined(VBOX) && defined(DEBUG)
3319 printf("DESC_P_MASK e2=%08x\n", e2);
3320#endif
3321 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3322 }
3323
3324 sp += addend;
3325 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3326 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3327 /* return to same privilege level */
3328 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3329 get_seg_base(e1, e2),
3330 get_seg_limit(e1, e2),
3331 e2);
3332 } else {
3333 /* return to different privilege level */
3334#ifdef TARGET_X86_64
3335 if (shift == 2) {
3336 POPQ(sp, new_esp);
3337 POPQ(sp, new_ss);
3338 new_ss &= 0xffff;
3339 } else
3340#endif
3341 if (shift == 1) {
3342 /* 32 bits */
3343 POPL(ssp, sp, sp_mask, new_esp);
3344 POPL(ssp, sp, sp_mask, new_ss);
3345 new_ss &= 0xffff;
3346 } else {
3347 /* 16 bits */
3348 POPW(ssp, sp, sp_mask, new_esp);
3349 POPW(ssp, sp, sp_mask, new_ss);
3350 }
3351#ifdef DEBUG_PCALL
3352 if (loglevel & CPU_LOG_PCALL) {
3353 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3354 new_ss, new_esp);
3355 }
3356#endif
3357 if ((new_ss & 0xfffc) == 0) {
3358#ifdef TARGET_X86_64
3359 /* NULL ss is allowed in long mode if cpl != 3*/
3360 /* XXX: test CS64 ? */
3361 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3362 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3363 0, 0xffffffff,
3364 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3365 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3366 DESC_W_MASK | DESC_A_MASK);
3367 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3368 } else
3369#endif
3370 {
3371 raise_exception_err(EXCP0D_GPF, 0);
3372 }
3373 } else {
3374 if ((new_ss & 3) != rpl)
3375 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3376 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3377 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3378 if (!(ss_e2 & DESC_S_MASK) ||
3379 (ss_e2 & DESC_CS_MASK) ||
3380 !(ss_e2 & DESC_W_MASK))
3381 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3382 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3383 if (dpl != rpl)
3384 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3385 if (!(ss_e2 & DESC_P_MASK))
3386 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3387 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3388 get_seg_base(ss_e1, ss_e2),
3389 get_seg_limit(ss_e1, ss_e2),
3390 ss_e2);
3391 }
3392
3393 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3394 get_seg_base(e1, e2),
3395 get_seg_limit(e1, e2),
3396 e2);
3397 cpu_x86_set_cpl(env, rpl);
3398 sp = new_esp;
3399#ifdef TARGET_X86_64
3400 if (env->hflags & HF_CS64_MASK)
3401 sp_mask = -1;
3402 else
3403#endif
3404 sp_mask = get_sp_mask(ss_e2);
3405
3406 /* validate data segments */
3407 validate_seg(R_ES, rpl);
3408 validate_seg(R_DS, rpl);
3409 validate_seg(R_FS, rpl);
3410 validate_seg(R_GS, rpl);
3411
3412 sp += addend;
3413 }
3414 SET_ESP(sp, sp_mask);
3415 env->eip = new_eip;
3416 if (is_iret) {
3417 /* NOTE: 'cpl' is the _old_ CPL */
3418 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3419 if (cpl == 0)
3420#ifdef VBOX
3421 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3422#else
3423 eflags_mask |= IOPL_MASK;
3424#endif
3425 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3426 if (cpl <= iopl)
3427 eflags_mask |= IF_MASK;
3428 if (shift == 0)
3429 eflags_mask &= 0xffff;
3430 load_eflags(new_eflags, eflags_mask);
3431 }
3432 return;
3433
3434 return_to_vm86:
3435 POPL(ssp, sp, sp_mask, new_esp);
3436 POPL(ssp, sp, sp_mask, new_ss);
3437 POPL(ssp, sp, sp_mask, new_es);
3438 POPL(ssp, sp, sp_mask, new_ds);
3439 POPL(ssp, sp, sp_mask, new_fs);
3440 POPL(ssp, sp, sp_mask, new_gs);
3441
3442 /* modify processor state */
3443 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3444 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3445 load_seg_vm(R_CS, new_cs & 0xffff);
3446 cpu_x86_set_cpl(env, 3);
3447 load_seg_vm(R_SS, new_ss & 0xffff);
3448 load_seg_vm(R_ES, new_es & 0xffff);
3449 load_seg_vm(R_DS, new_ds & 0xffff);
3450 load_seg_vm(R_FS, new_fs & 0xffff);
3451 load_seg_vm(R_GS, new_gs & 0xffff);
3452
3453 env->eip = new_eip & 0xffff;
3454 ESP = new_esp;
3455}
3456
3457void helper_iret_protected(int shift, int next_eip)
3458{
3459 int tss_selector, type;
3460 uint32_t e1, e2;
3461
3462#ifdef VBOX
3463 e1 = e2 = 0;
3464 remR3TrapClear(env->pVM);
3465#endif
3466
3467 /* specific case for TSS */
3468 if (env->eflags & NT_MASK) {
3469#ifdef TARGET_X86_64
3470 if (env->hflags & HF_LMA_MASK)
3471 raise_exception_err(EXCP0D_GPF, 0);
3472#endif
3473 tss_selector = lduw_kernel(env->tr.base + 0);
3474 if (tss_selector & 4)
3475 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3476 if (load_segment(&e1, &e2, tss_selector) != 0)
3477 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3478 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3479 /* NOTE: we check both segment and busy TSS */
3480 if (type != 3)
3481 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3482 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3483 } else {
3484 helper_ret_protected(shift, 1, 0);
3485 }
3486 env->hflags2 &= ~HF2_NMI_MASK;
3487#ifdef USE_KQEMU
3488 if (kqemu_is_ok(env)) {
3489 CC_OP = CC_OP_EFLAGS;
3490 env->exception_index = -1;
3491 cpu_loop_exit();
3492 }
3493#endif
3494}
3495
3496void helper_lret_protected(int shift, int addend)
3497{
3498 helper_ret_protected(shift, 0, addend);
3499#ifdef USE_KQEMU
3500 if (kqemu_is_ok(env)) {
3501 env->exception_index = -1;
3502 cpu_loop_exit();
3503 }
3504#endif
3505}
3506
3507void helper_sysenter(void)
3508{
3509 if (env->sysenter_cs == 0) {
3510 raise_exception_err(EXCP0D_GPF, 0);
3511 }
3512 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3513 cpu_x86_set_cpl(env, 0);
3514
3515#ifdef TARGET_X86_64
3516 if (env->hflags & HF_LMA_MASK) {
3517 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3518 0, 0xffffffff,
3519 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3520 DESC_S_MASK |
3521 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3522 } else
3523#endif
3524 {
3525 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3526 0, 0xffffffff,
3527 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3528 DESC_S_MASK |
3529 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3530 }
3531 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3532 0, 0xffffffff,
3533 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3534 DESC_S_MASK |
3535 DESC_W_MASK | DESC_A_MASK);
3536 ESP = env->sysenter_esp;
3537 EIP = env->sysenter_eip;
3538}
3539
3540void helper_sysexit(int dflag)
3541{
3542 int cpl;
3543
3544 cpl = env->hflags & HF_CPL_MASK;
3545 if (env->sysenter_cs == 0 || cpl != 0) {
3546 raise_exception_err(EXCP0D_GPF, 0);
3547 }
3548 cpu_x86_set_cpl(env, 3);
3549#ifdef TARGET_X86_64
3550 if (dflag == 2) {
3551 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3552 0, 0xffffffff,
3553 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3554 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3555 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3556 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3557 0, 0xffffffff,
3558 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3559 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3560 DESC_W_MASK | DESC_A_MASK);
3561 } else
3562#endif
3563 {
3564 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3565 0, 0xffffffff,
3566 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3567 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3568 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3569 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3570 0, 0xffffffff,
3571 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3572 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3573 DESC_W_MASK | DESC_A_MASK);
3574 }
3575 ESP = ECX;
3576 EIP = EDX;
3577#ifdef USE_KQEMU
3578 if (kqemu_is_ok(env)) {
3579 env->exception_index = -1;
3580 cpu_loop_exit();
3581 }
3582#endif
3583}
3584
3585#if defined(CONFIG_USER_ONLY)
3586target_ulong helper_read_crN(int reg)
3587{
3588 return 0;
3589}
3590
3591void helper_write_crN(int reg, target_ulong t0)
3592{
3593}
3594#else
3595target_ulong helper_read_crN(int reg)
3596{
3597 target_ulong val;
3598
3599 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3600 switch(reg) {
3601 default:
3602 val = env->cr[reg];
3603 break;
3604 case 8:
3605 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3606 val = cpu_get_apic_tpr(env);
3607 } else {
3608 val = env->v_tpr;
3609 }
3610 break;
3611 }
3612 return val;
3613}
3614
3615void helper_write_crN(int reg, target_ulong t0)
3616{
3617 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3618 switch(reg) {
3619 case 0:
3620 cpu_x86_update_cr0(env, t0);
3621 break;
3622 case 3:
3623 cpu_x86_update_cr3(env, t0);
3624 break;
3625 case 4:
3626 cpu_x86_update_cr4(env, t0);
3627 break;
3628 case 8:
3629 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3630 cpu_set_apic_tpr(env, t0);
3631 }
3632 env->v_tpr = t0 & 0x0f;
3633 break;
3634 default:
3635 env->cr[reg] = t0;
3636 break;
3637 }
3638}
3639#endif
3640
3641void helper_lmsw(target_ulong t0)
3642{
3643 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3644 if already set to one. */
3645 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3646 helper_write_crN(0, t0);
3647}
3648
3649void helper_clts(void)
3650{
3651 env->cr[0] &= ~CR0_TS_MASK;
3652 env->hflags &= ~HF_TS_MASK;
3653}
3654
3655/* XXX: do more */
3656void helper_movl_drN_T0(int reg, target_ulong t0)
3657{
3658 env->dr[reg] = t0;
3659}
3660
3661void helper_invlpg(target_ulong addr)
3662{
3663 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3664 tlb_flush_page(env, addr);
3665}
3666
3667void helper_rdtsc(void)
3668{
3669 uint64_t val;
3670
3671 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3672 raise_exception(EXCP0D_GPF);
3673 }
3674 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3675
3676 val = cpu_get_tsc(env) + env->tsc_offset;
3677 EAX = (uint32_t)(val);
3678 EDX = (uint32_t)(val >> 32);
3679}
3680
3681#ifdef VBOX
3682void helper_rdtscp(void)
3683{
3684 uint64_t val;
3685 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3686 raise_exception(EXCP0D_GPF);
3687 }
3688
3689 val = cpu_get_tsc(env);
3690 EAX = (uint32_t)(val);
3691 EDX = (uint32_t)(val >> 32);
3692 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3693 ECX = (uint32_t)(val);
3694 else
3695 ECX = 0;
3696}
3697#endif
3698
3699void helper_rdpmc(void)
3700{
3701#ifdef VBOX
3702 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3703 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3704 raise_exception(EXCP0D_GPF);
3705 }
3706 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3707 EAX = 0;
3708 EDX = 0;
3709#else
3710 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3711 raise_exception(EXCP0D_GPF);
3712 }
3713 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3714
3715 /* currently unimplemented */
3716 raise_exception_err(EXCP06_ILLOP, 0);
3717#endif
3718}
3719
3720#if defined(CONFIG_USER_ONLY)
3721void helper_wrmsr(void)
3722{
3723}
3724
3725void helper_rdmsr(void)
3726{
3727}
3728#else
3729void helper_wrmsr(void)
3730{
3731 uint64_t val;
3732
3733 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3734
3735 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3736
3737 switch((uint32_t)ECX) {
3738 case MSR_IA32_SYSENTER_CS:
3739 env->sysenter_cs = val & 0xffff;
3740 break;
3741 case MSR_IA32_SYSENTER_ESP:
3742 env->sysenter_esp = val;
3743 break;
3744 case MSR_IA32_SYSENTER_EIP:
3745 env->sysenter_eip = val;
3746 break;
3747 case MSR_IA32_APICBASE:
3748#ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3749 cpu_set_apic_base(env, val);
3750#endif
3751 break;
3752 case MSR_EFER:
3753 {
3754 uint64_t update_mask;
3755 update_mask = 0;
3756 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3757 update_mask |= MSR_EFER_SCE;
3758 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3759 update_mask |= MSR_EFER_LME;
3760 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3761 update_mask |= MSR_EFER_FFXSR;
3762 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3763 update_mask |= MSR_EFER_NXE;
3764 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3765 update_mask |= MSR_EFER_SVME;
3766 cpu_load_efer(env, (env->efer & ~update_mask) |
3767 (val & update_mask));
3768 }
3769 break;
3770 case MSR_STAR:
3771 env->star = val;
3772 break;
3773 case MSR_PAT:
3774 env->pat = val;
3775 break;
3776 case MSR_VM_HSAVE_PA:
3777 env->vm_hsave = val;
3778 break;
3779#ifdef TARGET_X86_64
3780 case MSR_LSTAR:
3781 env->lstar = val;
3782 break;
3783 case MSR_CSTAR:
3784 env->cstar = val;
3785 break;
3786 case MSR_FMASK:
3787 env->fmask = val;
3788 break;
3789 case MSR_FSBASE:
3790 env->segs[R_FS].base = val;
3791 break;
3792 case MSR_GSBASE:
3793 env->segs[R_GS].base = val;
3794 break;
3795 case MSR_KERNELGSBASE:
3796 env->kernelgsbase = val;
3797 break;
3798#endif
3799 default:
3800#ifndef VBOX
3801 /* XXX: exception ? */
3802#endif
3803 break;
3804 }
3805
3806#ifdef VBOX
3807 /* call CPUM. */
3808 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3809 {
3810 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3811 }
3812#endif
3813}
3814
3815void helper_rdmsr(void)
3816{
3817 uint64_t val;
3818 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3819
3820 switch((uint32_t)ECX) {
3821 case MSR_IA32_SYSENTER_CS:
3822 val = env->sysenter_cs;
3823 break;
3824 case MSR_IA32_SYSENTER_ESP:
3825 val = env->sysenter_esp;
3826 break;
3827 case MSR_IA32_SYSENTER_EIP:
3828 val = env->sysenter_eip;
3829 break;
3830 case MSR_IA32_APICBASE:
3831 val = cpu_get_apic_base(env);
3832 break;
3833 case MSR_EFER:
3834 val = env->efer;
3835 break;
3836 case MSR_STAR:
3837 val = env->star;
3838 break;
3839 case MSR_PAT:
3840 val = env->pat;
3841 break;
3842 case MSR_VM_HSAVE_PA:
3843 val = env->vm_hsave;
3844 break;
3845#ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3846 case MSR_IA32_PERF_STATUS:
3847 /* tsc_increment_by_tick */
3848 val = 1000ULL;
3849 /* CPU multiplier */
3850 val |= ((uint64_t)4ULL << 40);
3851 break;
3852#endif
3853#ifdef TARGET_X86_64
3854 case MSR_LSTAR:
3855 val = env->lstar;
3856 break;
3857 case MSR_CSTAR:
3858 val = env->cstar;
3859 break;
3860 case MSR_FMASK:
3861 val = env->fmask;
3862 break;
3863 case MSR_FSBASE:
3864 val = env->segs[R_FS].base;
3865 break;
3866 case MSR_GSBASE:
3867 val = env->segs[R_GS].base;
3868 break;
3869 case MSR_KERNELGSBASE:
3870 val = env->kernelgsbase;
3871 break;
3872#endif
3873#ifdef USE_KQEMU
3874 case MSR_QPI_COMMBASE:
3875 if (env->kqemu_enabled) {
3876 val = kqemu_comm_base;
3877 } else {
3878 val = 0;
3879 }
3880 break;
3881#endif
3882 default:
3883#ifndef VBOX
3884 /* XXX: exception ? */
3885 val = 0;
3886#else /* VBOX */
3887 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3888 {
3889 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3890 val = 0;
3891 }
3892#endif
3893 break;
3894 }
3895 EAX = (uint32_t)(val);
3896 EDX = (uint32_t)(val >> 32);
3897
3898#ifdef VBOX_STRICT
3899 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3900 val = 0;
3901 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3902#endif
3903}
3904#endif
3905
3906target_ulong helper_lsl(target_ulong selector1)
3907{
3908 unsigned int limit;
3909 uint32_t e1, e2, eflags, selector;
3910 int rpl, dpl, cpl, type;
3911
3912 selector = selector1 & 0xffff;
3913 eflags = cc_table[CC_OP].compute_all();
3914 if (load_segment(&e1, &e2, selector) != 0)
3915 goto fail;
3916 rpl = selector & 3;
3917 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3918 cpl = env->hflags & HF_CPL_MASK;
3919 if (e2 & DESC_S_MASK) {
3920 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3921 /* conforming */
3922 } else {
3923 if (dpl < cpl || dpl < rpl)
3924 goto fail;
3925 }
3926 } else {
3927 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3928 switch(type) {
3929 case 1:
3930 case 2:
3931 case 3:
3932 case 9:
3933 case 11:
3934 break;
3935 default:
3936 goto fail;
3937 }
3938 if (dpl < cpl || dpl < rpl) {
3939 fail:
3940 CC_SRC = eflags & ~CC_Z;
3941 return 0;
3942 }
3943 }
3944 limit = get_seg_limit(e1, e2);
3945 CC_SRC = eflags | CC_Z;
3946 return limit;
3947}
3948
3949target_ulong helper_lar(target_ulong selector1)
3950{
3951 uint32_t e1, e2, eflags, selector;
3952 int rpl, dpl, cpl, type;
3953
3954 selector = selector1 & 0xffff;
3955 eflags = cc_table[CC_OP].compute_all();
3956 if ((selector & 0xfffc) == 0)
3957 goto fail;
3958 if (load_segment(&e1, &e2, selector) != 0)
3959 goto fail;
3960 rpl = selector & 3;
3961 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3962 cpl = env->hflags & HF_CPL_MASK;
3963 if (e2 & DESC_S_MASK) {
3964 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3965 /* conforming */
3966 } else {
3967 if (dpl < cpl || dpl < rpl)
3968 goto fail;
3969 }
3970 } else {
3971 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3972 switch(type) {
3973 case 1:
3974 case 2:
3975 case 3:
3976 case 4:
3977 case 5:
3978 case 9:
3979 case 11:
3980 case 12:
3981 break;
3982 default:
3983 goto fail;
3984 }
3985 if (dpl < cpl || dpl < rpl) {
3986 fail:
3987 CC_SRC = eflags & ~CC_Z;
3988 return 0;
3989 }
3990 }
3991 CC_SRC = eflags | CC_Z;
3992 return e2 & 0x00f0ff00;
3993}
3994
3995void helper_verr(target_ulong selector1)
3996{
3997 uint32_t e1, e2, eflags, selector;
3998 int rpl, dpl, cpl;
3999
4000 selector = selector1 & 0xffff;
4001 eflags = cc_table[CC_OP].compute_all();
4002 if ((selector & 0xfffc) == 0)
4003 goto fail;
4004 if (load_segment(&e1, &e2, selector) != 0)
4005 goto fail;
4006 if (!(e2 & DESC_S_MASK))
4007 goto fail;
4008 rpl = selector & 3;
4009 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4010 cpl = env->hflags & HF_CPL_MASK;
4011 if (e2 & DESC_CS_MASK) {
4012 if (!(e2 & DESC_R_MASK))
4013 goto fail;
4014 if (!(e2 & DESC_C_MASK)) {
4015 if (dpl < cpl || dpl < rpl)
4016 goto fail;
4017 }
4018 } else {
4019 if (dpl < cpl || dpl < rpl) {
4020 fail:
4021 CC_SRC = eflags & ~CC_Z;
4022 return;
4023 }
4024 }
4025 CC_SRC = eflags | CC_Z;
4026}
4027
4028void helper_verw(target_ulong selector1)
4029{
4030 uint32_t e1, e2, eflags, selector;
4031 int rpl, dpl, cpl;
4032
4033 selector = selector1 & 0xffff;
4034 eflags = cc_table[CC_OP].compute_all();
4035 if ((selector & 0xfffc) == 0)
4036 goto fail;
4037 if (load_segment(&e1, &e2, selector) != 0)
4038 goto fail;
4039 if (!(e2 & DESC_S_MASK))
4040 goto fail;
4041 rpl = selector & 3;
4042 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4043 cpl = env->hflags & HF_CPL_MASK;
4044 if (e2 & DESC_CS_MASK) {
4045 goto fail;
4046 } else {
4047 if (dpl < cpl || dpl < rpl)
4048 goto fail;
4049 if (!(e2 & DESC_W_MASK)) {
4050 fail:
4051 CC_SRC = eflags & ~CC_Z;
4052 return;
4053 }
4054 }
4055 CC_SRC = eflags | CC_Z;
4056}
4057
4058/* x87 FPU helpers */
4059
4060static void fpu_set_exception(int mask)
4061{
4062 env->fpus |= mask;
4063 if (env->fpus & (~env->fpuc & FPUC_EM))
4064 env->fpus |= FPUS_SE | FPUS_B;
4065}
4066
4067#ifndef VBOX
4068static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4069#else /* VBOX */
4070DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4071#endif /* VBOX */
4072{
4073 if (b == 0.0)
4074 fpu_set_exception(FPUS_ZE);
4075 return a / b;
4076}
4077
4078void fpu_raise_exception(void)
4079{
4080 if (env->cr[0] & CR0_NE_MASK) {
4081 raise_exception(EXCP10_COPR);
4082 }
4083#if !defined(CONFIG_USER_ONLY)
4084 else {
4085 cpu_set_ferr(env);
4086 }
4087#endif
4088}
4089
4090void helper_flds_FT0(uint32_t val)
4091{
4092 union {
4093 float32 f;
4094 uint32_t i;
4095 } u;
4096 u.i = val;
4097 FT0 = float32_to_floatx(u.f, &env->fp_status);
4098}
4099
4100void helper_fldl_FT0(uint64_t val)
4101{
4102 union {
4103 float64 f;
4104 uint64_t i;
4105 } u;
4106 u.i = val;
4107 FT0 = float64_to_floatx(u.f, &env->fp_status);
4108}
4109
4110void helper_fildl_FT0(int32_t val)
4111{
4112 FT0 = int32_to_floatx(val, &env->fp_status);
4113}
4114
4115void helper_flds_ST0(uint32_t val)
4116{
4117 int new_fpstt;
4118 union {
4119 float32 f;
4120 uint32_t i;
4121 } u;
4122 new_fpstt = (env->fpstt - 1) & 7;
4123 u.i = val;
4124 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4125 env->fpstt = new_fpstt;
4126 env->fptags[new_fpstt] = 0; /* validate stack entry */
4127}
4128
4129void helper_fldl_ST0(uint64_t val)
4130{
4131 int new_fpstt;
4132 union {
4133 float64 f;
4134 uint64_t i;
4135 } u;
4136 new_fpstt = (env->fpstt - 1) & 7;
4137 u.i = val;
4138 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4139 env->fpstt = new_fpstt;
4140 env->fptags[new_fpstt] = 0; /* validate stack entry */
4141}
4142
4143void helper_fildl_ST0(int32_t val)
4144{
4145 int new_fpstt;
4146 new_fpstt = (env->fpstt - 1) & 7;
4147 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4148 env->fpstt = new_fpstt;
4149 env->fptags[new_fpstt] = 0; /* validate stack entry */
4150}
4151
4152void helper_fildll_ST0(int64_t val)
4153{
4154 int new_fpstt;
4155 new_fpstt = (env->fpstt - 1) & 7;
4156 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4157 env->fpstt = new_fpstt;
4158 env->fptags[new_fpstt] = 0; /* validate stack entry */
4159}
4160
4161#ifndef VBOX
4162uint32_t helper_fsts_ST0(void)
4163#else
4164RTCCUINTREG helper_fsts_ST0(void)
4165#endif
4166{
4167 union {
4168 float32 f;
4169 uint32_t i;
4170 } u;
4171 u.f = floatx_to_float32(ST0, &env->fp_status);
4172 return u.i;
4173}
4174
4175uint64_t helper_fstl_ST0(void)
4176{
4177 union {
4178 float64 f;
4179 uint64_t i;
4180 } u;
4181 u.f = floatx_to_float64(ST0, &env->fp_status);
4182 return u.i;
4183}
4184#ifndef VBOX
4185int32_t helper_fist_ST0(void)
4186#else
4187RTCCINTREG helper_fist_ST0(void)
4188#endif
4189{
4190 int32_t val;
4191 val = floatx_to_int32(ST0, &env->fp_status);
4192 if (val != (int16_t)val)
4193 val = -32768;
4194 return val;
4195}
4196
4197#ifndef VBOX
4198int32_t helper_fistl_ST0(void)
4199#else
4200RTCCINTREG helper_fistl_ST0(void)
4201#endif
4202{
4203 int32_t val;
4204 val = floatx_to_int32(ST0, &env->fp_status);
4205 return val;
4206}
4207
4208int64_t helper_fistll_ST0(void)
4209{
4210 int64_t val;
4211 val = floatx_to_int64(ST0, &env->fp_status);
4212 return val;
4213}
4214
4215#ifndef VBOX
4216int32_t helper_fistt_ST0(void)
4217#else
4218RTCCINTREG helper_fistt_ST0(void)
4219#endif
4220{
4221 int32_t val;
4222 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4223 if (val != (int16_t)val)
4224 val = -32768;
4225 return val;
4226}
4227
4228#ifndef VBOX
4229int32_t helper_fisttl_ST0(void)
4230#else
4231RTCCINTREG helper_fisttl_ST0(void)
4232#endif
4233{
4234 int32_t val;
4235 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4236 return val;
4237}
4238
4239int64_t helper_fisttll_ST0(void)
4240{
4241 int64_t val;
4242 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4243 return val;
4244}
4245
4246void helper_fldt_ST0(target_ulong ptr)
4247{
4248 int new_fpstt;
4249 new_fpstt = (env->fpstt - 1) & 7;
4250 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4251 env->fpstt = new_fpstt;
4252 env->fptags[new_fpstt] = 0; /* validate stack entry */
4253}
4254
4255void helper_fstt_ST0(target_ulong ptr)
4256{
4257 helper_fstt(ST0, ptr);
4258}
4259
4260void helper_fpush(void)
4261{
4262 fpush();
4263}
4264
4265void helper_fpop(void)
4266{
4267 fpop();
4268}
4269
4270void helper_fdecstp(void)
4271{
4272 env->fpstt = (env->fpstt - 1) & 7;
4273 env->fpus &= (~0x4700);
4274}
4275
4276void helper_fincstp(void)
4277{
4278 env->fpstt = (env->fpstt + 1) & 7;
4279 env->fpus &= (~0x4700);
4280}
4281
4282/* FPU move */
4283
4284void helper_ffree_STN(int st_index)
4285{
4286 env->fptags[(env->fpstt + st_index) & 7] = 1;
4287}
4288
4289void helper_fmov_ST0_FT0(void)
4290{
4291 ST0 = FT0;
4292}
4293
4294void helper_fmov_FT0_STN(int st_index)
4295{
4296 FT0 = ST(st_index);
4297}
4298
4299void helper_fmov_ST0_STN(int st_index)
4300{
4301 ST0 = ST(st_index);
4302}
4303
4304void helper_fmov_STN_ST0(int st_index)
4305{
4306 ST(st_index) = ST0;
4307}
4308
4309void helper_fxchg_ST0_STN(int st_index)
4310{
4311 CPU86_LDouble tmp;
4312 tmp = ST(st_index);
4313 ST(st_index) = ST0;
4314 ST0 = tmp;
4315}
4316
4317/* FPU operations */
4318
4319static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4320
4321void helper_fcom_ST0_FT0(void)
4322{
4323 int ret;
4324
4325 ret = floatx_compare(ST0, FT0, &env->fp_status);
4326 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4327 FORCE_RET();
4328}
4329
4330void helper_fucom_ST0_FT0(void)
4331{
4332 int ret;
4333
4334 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4335 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4336 FORCE_RET();
4337}
4338
4339static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4340
4341void helper_fcomi_ST0_FT0(void)
4342{
4343 int eflags;
4344 int ret;
4345
4346 ret = floatx_compare(ST0, FT0, &env->fp_status);
4347 eflags = cc_table[CC_OP].compute_all();
4348 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4349 CC_SRC = eflags;
4350 FORCE_RET();
4351}
4352
4353void helper_fucomi_ST0_FT0(void)
4354{
4355 int eflags;
4356 int ret;
4357
4358 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4359 eflags = cc_table[CC_OP].compute_all();
4360 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4361 CC_SRC = eflags;
4362 FORCE_RET();
4363}
4364
4365void helper_fadd_ST0_FT0(void)
4366{
4367 ST0 += FT0;
4368}
4369
4370void helper_fmul_ST0_FT0(void)
4371{
4372 ST0 *= FT0;
4373}
4374
4375void helper_fsub_ST0_FT0(void)
4376{
4377 ST0 -= FT0;
4378}
4379
4380void helper_fsubr_ST0_FT0(void)
4381{
4382 ST0 = FT0 - ST0;
4383}
4384
4385void helper_fdiv_ST0_FT0(void)
4386{
4387 ST0 = helper_fdiv(ST0, FT0);
4388}
4389
4390void helper_fdivr_ST0_FT0(void)
4391{
4392 ST0 = helper_fdiv(FT0, ST0);
4393}
4394
4395/* fp operations between STN and ST0 */
4396
4397void helper_fadd_STN_ST0(int st_index)
4398{
4399 ST(st_index) += ST0;
4400}
4401
4402void helper_fmul_STN_ST0(int st_index)
4403{
4404 ST(st_index) *= ST0;
4405}
4406
4407void helper_fsub_STN_ST0(int st_index)
4408{
4409 ST(st_index) -= ST0;
4410}
4411
4412void helper_fsubr_STN_ST0(int st_index)
4413{
4414 CPU86_LDouble *p;
4415 p = &ST(st_index);
4416 *p = ST0 - *p;
4417}
4418
4419void helper_fdiv_STN_ST0(int st_index)
4420{
4421 CPU86_LDouble *p;
4422 p = &ST(st_index);
4423 *p = helper_fdiv(*p, ST0);
4424}
4425
4426void helper_fdivr_STN_ST0(int st_index)
4427{
4428 CPU86_LDouble *p;
4429 p = &ST(st_index);
4430 *p = helper_fdiv(ST0, *p);
4431}
4432
4433/* misc FPU operations */
4434void helper_fchs_ST0(void)
4435{
4436 ST0 = floatx_chs(ST0);
4437}
4438
4439void helper_fabs_ST0(void)
4440{
4441 ST0 = floatx_abs(ST0);
4442}
4443
4444void helper_fld1_ST0(void)
4445{
4446 ST0 = f15rk[1];
4447}
4448
4449void helper_fldl2t_ST0(void)
4450{
4451 ST0 = f15rk[6];
4452}
4453
4454void helper_fldl2e_ST0(void)
4455{
4456 ST0 = f15rk[5];
4457}
4458
4459void helper_fldpi_ST0(void)
4460{
4461 ST0 = f15rk[2];
4462}
4463
4464void helper_fldlg2_ST0(void)
4465{
4466 ST0 = f15rk[3];
4467}
4468
4469void helper_fldln2_ST0(void)
4470{
4471 ST0 = f15rk[4];
4472}
4473
4474void helper_fldz_ST0(void)
4475{
4476 ST0 = f15rk[0];
4477}
4478
4479void helper_fldz_FT0(void)
4480{
4481 FT0 = f15rk[0];
4482}
4483
4484#ifndef VBOX
4485uint32_t helper_fnstsw(void)
4486#else
4487RTCCUINTREG helper_fnstsw(void)
4488#endif
4489{
4490 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4491}
4492
4493#ifndef VBOX
4494uint32_t helper_fnstcw(void)
4495#else
4496RTCCUINTREG helper_fnstcw(void)
4497#endif
4498{
4499 return env->fpuc;
4500}
4501
4502static void update_fp_status(void)
4503{
4504 int rnd_type;
4505
4506 /* set rounding mode */
4507 switch(env->fpuc & RC_MASK) {
4508 default:
4509 case RC_NEAR:
4510 rnd_type = float_round_nearest_even;
4511 break;
4512 case RC_DOWN:
4513 rnd_type = float_round_down;
4514 break;
4515 case RC_UP:
4516 rnd_type = float_round_up;
4517 break;
4518 case RC_CHOP:
4519 rnd_type = float_round_to_zero;
4520 break;
4521 }
4522 set_float_rounding_mode(rnd_type, &env->fp_status);
4523#ifdef FLOATX80
4524 switch((env->fpuc >> 8) & 3) {
4525 case 0:
4526 rnd_type = 32;
4527 break;
4528 case 2:
4529 rnd_type = 64;
4530 break;
4531 case 3:
4532 default:
4533 rnd_type = 80;
4534 break;
4535 }
4536 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4537#endif
4538}
4539
4540void helper_fldcw(uint32_t val)
4541{
4542 env->fpuc = val;
4543 update_fp_status();
4544}
4545
4546void helper_fclex(void)
4547{
4548 env->fpus &= 0x7f00;
4549}
4550
4551void helper_fwait(void)
4552{
4553 if (env->fpus & FPUS_SE)
4554 fpu_raise_exception();
4555 FORCE_RET();
4556}
4557
4558void helper_fninit(void)
4559{
4560 env->fpus = 0;
4561 env->fpstt = 0;
4562 env->fpuc = 0x37f;
4563 env->fptags[0] = 1;
4564 env->fptags[1] = 1;
4565 env->fptags[2] = 1;
4566 env->fptags[3] = 1;
4567 env->fptags[4] = 1;
4568 env->fptags[5] = 1;
4569 env->fptags[6] = 1;
4570 env->fptags[7] = 1;
4571}
4572
4573/* BCD ops */
4574
4575void helper_fbld_ST0(target_ulong ptr)
4576{
4577 CPU86_LDouble tmp;
4578 uint64_t val;
4579 unsigned int v;
4580 int i;
4581
4582 val = 0;
4583 for(i = 8; i >= 0; i--) {
4584 v = ldub(ptr + i);
4585 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4586 }
4587 tmp = val;
4588 if (ldub(ptr + 9) & 0x80)
4589 tmp = -tmp;
4590 fpush();
4591 ST0 = tmp;
4592}
4593
4594void helper_fbst_ST0(target_ulong ptr)
4595{
4596 int v;
4597 target_ulong mem_ref, mem_end;
4598 int64_t val;
4599
4600 val = floatx_to_int64(ST0, &env->fp_status);
4601 mem_ref = ptr;
4602 mem_end = mem_ref + 9;
4603 if (val < 0) {
4604 stb(mem_end, 0x80);
4605 val = -val;
4606 } else {
4607 stb(mem_end, 0x00);
4608 }
4609 while (mem_ref < mem_end) {
4610 if (val == 0)
4611 break;
4612 v = val % 100;
4613 val = val / 100;
4614 v = ((v / 10) << 4) | (v % 10);
4615 stb(mem_ref++, v);
4616 }
4617 while (mem_ref < mem_end) {
4618 stb(mem_ref++, 0);
4619 }
4620}
4621
4622void helper_f2xm1(void)
4623{
4624 ST0 = pow(2.0,ST0) - 1.0;
4625}
4626
4627void helper_fyl2x(void)
4628{
4629 CPU86_LDouble fptemp;
4630
4631 fptemp = ST0;
4632 if (fptemp>0.0){
4633 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4634 ST1 *= fptemp;
4635 fpop();
4636 } else {
4637 env->fpus &= (~0x4700);
4638 env->fpus |= 0x400;
4639 }
4640}
4641
4642void helper_fptan(void)
4643{
4644 CPU86_LDouble fptemp;
4645
4646 fptemp = ST0;
4647 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4648 env->fpus |= 0x400;
4649 } else {
4650 ST0 = tan(fptemp);
4651 fpush();
4652 ST0 = 1.0;
4653 env->fpus &= (~0x400); /* C2 <-- 0 */
4654 /* the above code is for |arg| < 2**52 only */
4655 }
4656}
4657
4658void helper_fpatan(void)
4659{
4660 CPU86_LDouble fptemp, fpsrcop;
4661
4662 fpsrcop = ST1;
4663 fptemp = ST0;
4664 ST1 = atan2(fpsrcop,fptemp);
4665 fpop();
4666}
4667
4668void helper_fxtract(void)
4669{
4670 CPU86_LDoubleU temp;
4671 unsigned int expdif;
4672
4673 temp.d = ST0;
4674 expdif = EXPD(temp) - EXPBIAS;
4675 /*DP exponent bias*/
4676 ST0 = expdif;
4677 fpush();
4678 BIASEXPONENT(temp);
4679 ST0 = temp.d;
4680}
4681
4682#ifdef VBOX
4683#ifdef _MSC_VER
4684/* MSC cannot divide by zero */
4685extern double _Nan;
4686#define NaN _Nan
4687#else
4688#define NaN (0.0 / 0.0)
4689#endif
4690#endif /* VBOX */
4691
4692void helper_fprem1(void)
4693{
4694 CPU86_LDouble dblq, fpsrcop, fptemp;
4695 CPU86_LDoubleU fpsrcop1, fptemp1;
4696 int expdif;
4697 signed long long int q;
4698
4699#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4700 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4701#else
4702 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4703#endif
4704 ST0 = 0.0 / 0.0; /* NaN */
4705 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4706 return;
4707 }
4708
4709 fpsrcop = ST0;
4710 fptemp = ST1;
4711 fpsrcop1.d = fpsrcop;
4712 fptemp1.d = fptemp;
4713 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4714
4715 if (expdif < 0) {
4716 /* optimisation? taken from the AMD docs */
4717 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4718 /* ST0 is unchanged */
4719 return;
4720 }
4721
4722 if (expdif < 53) {
4723 dblq = fpsrcop / fptemp;
4724 /* round dblq towards nearest integer */
4725 dblq = rint(dblq);
4726 ST0 = fpsrcop - fptemp * dblq;
4727
4728 /* convert dblq to q by truncating towards zero */
4729 if (dblq < 0.0)
4730 q = (signed long long int)(-dblq);
4731 else
4732 q = (signed long long int)dblq;
4733
4734 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4735 /* (C0,C3,C1) <-- (q2,q1,q0) */
4736 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4737 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4738 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4739 } else {
4740 env->fpus |= 0x400; /* C2 <-- 1 */
4741 fptemp = pow(2.0, expdif - 50);
4742 fpsrcop = (ST0 / ST1) / fptemp;
4743 /* fpsrcop = integer obtained by chopping */
4744 fpsrcop = (fpsrcop < 0.0) ?
4745 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4746 ST0 -= (ST1 * fpsrcop * fptemp);
4747 }
4748}
4749
4750void helper_fprem(void)
4751{
4752 CPU86_LDouble dblq, fpsrcop, fptemp;
4753 CPU86_LDoubleU fpsrcop1, fptemp1;
4754 int expdif;
4755 signed long long int q;
4756
4757#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4758 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4759#else
4760 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4761#endif
4762 ST0 = 0.0 / 0.0; /* NaN */
4763 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4764 return;
4765 }
4766
4767 fpsrcop = (CPU86_LDouble)ST0;
4768 fptemp = (CPU86_LDouble)ST1;
4769 fpsrcop1.d = fpsrcop;
4770 fptemp1.d = fptemp;
4771 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4772
4773 if (expdif < 0) {
4774 /* optimisation? taken from the AMD docs */
4775 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4776 /* ST0 is unchanged */
4777 return;
4778 }
4779
4780 if ( expdif < 53 ) {
4781 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4782 /* round dblq towards zero */
4783 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4784 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4785
4786 /* convert dblq to q by truncating towards zero */
4787 if (dblq < 0.0)
4788 q = (signed long long int)(-dblq);
4789 else
4790 q = (signed long long int)dblq;
4791
4792 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4793 /* (C0,C3,C1) <-- (q2,q1,q0) */
4794 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4795 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4796 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4797 } else {
4798 int N = 32 + (expdif % 32); /* as per AMD docs */
4799 env->fpus |= 0x400; /* C2 <-- 1 */
4800 fptemp = pow(2.0, (double)(expdif - N));
4801 fpsrcop = (ST0 / ST1) / fptemp;
4802 /* fpsrcop = integer obtained by chopping */
4803 fpsrcop = (fpsrcop < 0.0) ?
4804 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4805 ST0 -= (ST1 * fpsrcop * fptemp);
4806 }
4807}
4808
4809void helper_fyl2xp1(void)
4810{
4811 CPU86_LDouble fptemp;
4812
4813 fptemp = ST0;
4814 if ((fptemp+1.0)>0.0) {
4815 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4816 ST1 *= fptemp;
4817 fpop();
4818 } else {
4819 env->fpus &= (~0x4700);
4820 env->fpus |= 0x400;
4821 }
4822}
4823
4824void helper_fsqrt(void)
4825{
4826 CPU86_LDouble fptemp;
4827
4828 fptemp = ST0;
4829 if (fptemp<0.0) {
4830 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4831 env->fpus |= 0x400;
4832 }
4833 ST0 = sqrt(fptemp);
4834}
4835
4836void helper_fsincos(void)
4837{
4838 CPU86_LDouble fptemp;
4839
4840 fptemp = ST0;
4841 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4842 env->fpus |= 0x400;
4843 } else {
4844 ST0 = sin(fptemp);
4845 fpush();
4846 ST0 = cos(fptemp);
4847 env->fpus &= (~0x400); /* C2 <-- 0 */
4848 /* the above code is for |arg| < 2**63 only */
4849 }
4850}
4851
4852void helper_frndint(void)
4853{
4854 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4855}
4856
4857void helper_fscale(void)
4858{
4859 ST0 = ldexp (ST0, (int)(ST1));
4860}
4861
4862void helper_fsin(void)
4863{
4864 CPU86_LDouble fptemp;
4865
4866 fptemp = ST0;
4867 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4868 env->fpus |= 0x400;
4869 } else {
4870 ST0 = sin(fptemp);
4871 env->fpus &= (~0x400); /* C2 <-- 0 */
4872 /* the above code is for |arg| < 2**53 only */
4873 }
4874}
4875
4876void helper_fcos(void)
4877{
4878 CPU86_LDouble fptemp;
4879
4880 fptemp = ST0;
4881 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4882 env->fpus |= 0x400;
4883 } else {
4884 ST0 = cos(fptemp);
4885 env->fpus &= (~0x400); /* C2 <-- 0 */
4886 /* the above code is for |arg5 < 2**63 only */
4887 }
4888}
4889
4890void helper_fxam_ST0(void)
4891{
4892 CPU86_LDoubleU temp;
4893 int expdif;
4894
4895 temp.d = ST0;
4896
4897 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4898 if (SIGND(temp))
4899 env->fpus |= 0x200; /* C1 <-- 1 */
4900
4901 /* XXX: test fptags too */
4902 expdif = EXPD(temp);
4903 if (expdif == MAXEXPD) {
4904#ifdef USE_X86LDOUBLE
4905 if (MANTD(temp) == 0x8000000000000000ULL)
4906#else
4907 if (MANTD(temp) == 0)
4908#endif
4909 env->fpus |= 0x500 /*Infinity*/;
4910 else
4911 env->fpus |= 0x100 /*NaN*/;
4912 } else if (expdif == 0) {
4913 if (MANTD(temp) == 0)
4914 env->fpus |= 0x4000 /*Zero*/;
4915 else
4916 env->fpus |= 0x4400 /*Denormal*/;
4917 } else {
4918 env->fpus |= 0x400;
4919 }
4920}
4921
4922void helper_fstenv(target_ulong ptr, int data32)
4923{
4924 int fpus, fptag, exp, i;
4925 uint64_t mant;
4926 CPU86_LDoubleU tmp;
4927
4928 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4929 fptag = 0;
4930 for (i=7; i>=0; i--) {
4931 fptag <<= 2;
4932 if (env->fptags[i]) {
4933 fptag |= 3;
4934 } else {
4935 tmp.d = env->fpregs[i].d;
4936 exp = EXPD(tmp);
4937 mant = MANTD(tmp);
4938 if (exp == 0 && mant == 0) {
4939 /* zero */
4940 fptag |= 1;
4941 } else if (exp == 0 || exp == MAXEXPD
4942#ifdef USE_X86LDOUBLE
4943 || (mant & (1LL << 63)) == 0
4944#endif
4945 ) {
4946 /* NaNs, infinity, denormal */
4947 fptag |= 2;
4948 }
4949 }
4950 }
4951 if (data32) {
4952 /* 32 bit */
4953 stl(ptr, env->fpuc);
4954 stl(ptr + 4, fpus);
4955 stl(ptr + 8, fptag);
4956 stl(ptr + 12, 0); /* fpip */
4957 stl(ptr + 16, 0); /* fpcs */
4958 stl(ptr + 20, 0); /* fpoo */
4959 stl(ptr + 24, 0); /* fpos */
4960 } else {
4961 /* 16 bit */
4962 stw(ptr, env->fpuc);
4963 stw(ptr + 2, fpus);
4964 stw(ptr + 4, fptag);
4965 stw(ptr + 6, 0);
4966 stw(ptr + 8, 0);
4967 stw(ptr + 10, 0);
4968 stw(ptr + 12, 0);
4969 }
4970}
4971
4972void helper_fldenv(target_ulong ptr, int data32)
4973{
4974 int i, fpus, fptag;
4975
4976 if (data32) {
4977 env->fpuc = lduw(ptr);
4978 fpus = lduw(ptr + 4);
4979 fptag = lduw(ptr + 8);
4980 }
4981 else {
4982 env->fpuc = lduw(ptr);
4983 fpus = lduw(ptr + 2);
4984 fptag = lduw(ptr + 4);
4985 }
4986 env->fpstt = (fpus >> 11) & 7;
4987 env->fpus = fpus & ~0x3800;
4988 for(i = 0;i < 8; i++) {
4989 env->fptags[i] = ((fptag & 3) == 3);
4990 fptag >>= 2;
4991 }
4992}
4993
4994void helper_fsave(target_ulong ptr, int data32)
4995{
4996 CPU86_LDouble tmp;
4997 int i;
4998
4999 helper_fstenv(ptr, data32);
5000
5001 ptr += (14 << data32);
5002 for(i = 0;i < 8; i++) {
5003 tmp = ST(i);
5004 helper_fstt(tmp, ptr);
5005 ptr += 10;
5006 }
5007
5008 /* fninit */
5009 env->fpus = 0;
5010 env->fpstt = 0;
5011 env->fpuc = 0x37f;
5012 env->fptags[0] = 1;
5013 env->fptags[1] = 1;
5014 env->fptags[2] = 1;
5015 env->fptags[3] = 1;
5016 env->fptags[4] = 1;
5017 env->fptags[5] = 1;
5018 env->fptags[6] = 1;
5019 env->fptags[7] = 1;
5020}
5021
5022void helper_frstor(target_ulong ptr, int data32)
5023{
5024 CPU86_LDouble tmp;
5025 int i;
5026
5027 helper_fldenv(ptr, data32);
5028 ptr += (14 << data32);
5029
5030 for(i = 0;i < 8; i++) {
5031 tmp = helper_fldt(ptr);
5032 ST(i) = tmp;
5033 ptr += 10;
5034 }
5035}
5036
5037void helper_fxsave(target_ulong ptr, int data64)
5038{
5039 int fpus, fptag, i, nb_xmm_regs;
5040 CPU86_LDouble tmp;
5041 target_ulong addr;
5042
5043 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5044 fptag = 0;
5045 for(i = 0; i < 8; i++) {
5046 fptag |= (env->fptags[i] << i);
5047 }
5048 stw(ptr, env->fpuc);
5049 stw(ptr + 2, fpus);
5050 stw(ptr + 4, fptag ^ 0xff);
5051#ifdef TARGET_X86_64
5052 if (data64) {
5053 stq(ptr + 0x08, 0); /* rip */
5054 stq(ptr + 0x10, 0); /* rdp */
5055 } else
5056#endif
5057 {
5058 stl(ptr + 0x08, 0); /* eip */
5059 stl(ptr + 0x0c, 0); /* sel */
5060 stl(ptr + 0x10, 0); /* dp */
5061 stl(ptr + 0x14, 0); /* sel */
5062 }
5063
5064 addr = ptr + 0x20;
5065 for(i = 0;i < 8; i++) {
5066 tmp = ST(i);
5067 helper_fstt(tmp, addr);
5068 addr += 16;
5069 }
5070
5071 if (env->cr[4] & CR4_OSFXSR_MASK) {
5072 /* XXX: finish it */
5073 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5074 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5075 if (env->hflags & HF_CS64_MASK)
5076 nb_xmm_regs = 16;
5077 else
5078 nb_xmm_regs = 8;
5079 addr = ptr + 0xa0;
5080 for(i = 0; i < nb_xmm_regs; i++) {
5081 stq(addr, env->xmm_regs[i].XMM_Q(0));
5082 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5083 addr += 16;
5084 }
5085 }
5086}
5087
5088void helper_fxrstor(target_ulong ptr, int data64)
5089{
5090 int i, fpus, fptag, nb_xmm_regs;
5091 CPU86_LDouble tmp;
5092 target_ulong addr;
5093
5094 env->fpuc = lduw(ptr);
5095 fpus = lduw(ptr + 2);
5096 fptag = lduw(ptr + 4);
5097 env->fpstt = (fpus >> 11) & 7;
5098 env->fpus = fpus & ~0x3800;
5099 fptag ^= 0xff;
5100 for(i = 0;i < 8; i++) {
5101 env->fptags[i] = ((fptag >> i) & 1);
5102 }
5103
5104 addr = ptr + 0x20;
5105 for(i = 0;i < 8; i++) {
5106 tmp = helper_fldt(addr);
5107 ST(i) = tmp;
5108 addr += 16;
5109 }
5110
5111 if (env->cr[4] & CR4_OSFXSR_MASK) {
5112 /* XXX: finish it */
5113 env->mxcsr = ldl(ptr + 0x18);
5114 //ldl(ptr + 0x1c);
5115 if (env->hflags & HF_CS64_MASK)
5116 nb_xmm_regs = 16;
5117 else
5118 nb_xmm_regs = 8;
5119 addr = ptr + 0xa0;
5120 for(i = 0; i < nb_xmm_regs; i++) {
5121#if !defined(VBOX) || __GNUC__ < 4
5122 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5123 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5124#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5125# if 1
5126 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5127 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5128 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5129 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5130# else
5131 /* this works fine on Mac OS X, gcc 4.0.1 */
5132 uint64_t u64 = ldq(addr);
5133 env->xmm_regs[i].XMM_Q(0);
5134 u64 = ldq(addr + 4);
5135 env->xmm_regs[i].XMM_Q(1) = u64;
5136# endif
5137#endif
5138 addr += 16;
5139 }
5140 }
5141}
5142
5143#ifndef USE_X86LDOUBLE
5144
5145void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5146{
5147 CPU86_LDoubleU temp;
5148 int e;
5149
5150 temp.d = f;
5151 /* mantissa */
5152 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5153 /* exponent + sign */
5154 e = EXPD(temp) - EXPBIAS + 16383;
5155 e |= SIGND(temp) >> 16;
5156 *pexp = e;
5157}
5158
5159CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5160{
5161 CPU86_LDoubleU temp;
5162 int e;
5163 uint64_t ll;
5164
5165 /* XXX: handle overflow ? */
5166 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5167 e |= (upper >> 4) & 0x800; /* sign */
5168 ll = (mant >> 11) & ((1LL << 52) - 1);
5169#ifdef __arm__
5170 temp.l.upper = (e << 20) | (ll >> 32);
5171 temp.l.lower = ll;
5172#else
5173 temp.ll = ll | ((uint64_t)e << 52);
5174#endif
5175 return temp.d;
5176}
5177
5178#else
5179
5180void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5181{
5182 CPU86_LDoubleU temp;
5183
5184 temp.d = f;
5185 *pmant = temp.l.lower;
5186 *pexp = temp.l.upper;
5187}
5188
5189CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5190{
5191 CPU86_LDoubleU temp;
5192
5193 temp.l.upper = upper;
5194 temp.l.lower = mant;
5195 return temp.d;
5196}
5197#endif
5198
5199#ifdef TARGET_X86_64
5200
5201//#define DEBUG_MULDIV
5202
5203static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5204{
5205 *plow += a;
5206 /* carry test */
5207 if (*plow < a)
5208 (*phigh)++;
5209 *phigh += b;
5210}
5211
5212static void neg128(uint64_t *plow, uint64_t *phigh)
5213{
5214 *plow = ~ *plow;
5215 *phigh = ~ *phigh;
5216 add128(plow, phigh, 1, 0);
5217}
5218
5219/* return TRUE if overflow */
5220static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5221{
5222 uint64_t q, r, a1, a0;
5223 int i, qb, ab;
5224
5225 a0 = *plow;
5226 a1 = *phigh;
5227 if (a1 == 0) {
5228 q = a0 / b;
5229 r = a0 % b;
5230 *plow = q;
5231 *phigh = r;
5232 } else {
5233 if (a1 >= b)
5234 return 1;
5235 /* XXX: use a better algorithm */
5236 for(i = 0; i < 64; i++) {
5237 ab = a1 >> 63;
5238 a1 = (a1 << 1) | (a0 >> 63);
5239 if (ab || a1 >= b) {
5240 a1 -= b;
5241 qb = 1;
5242 } else {
5243 qb = 0;
5244 }
5245 a0 = (a0 << 1) | qb;
5246 }
5247#if defined(DEBUG_MULDIV)
5248 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5249 *phigh, *plow, b, a0, a1);
5250#endif
5251 *plow = a0;
5252 *phigh = a1;
5253 }
5254 return 0;
5255}
5256
5257/* return TRUE if overflow */
5258static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5259{
5260 int sa, sb;
5261 sa = ((int64_t)*phigh < 0);
5262 if (sa)
5263 neg128(plow, phigh);
5264 sb = (b < 0);
5265 if (sb)
5266 b = -b;
5267 if (div64(plow, phigh, b) != 0)
5268 return 1;
5269 if (sa ^ sb) {
5270 if (*plow > (1ULL << 63))
5271 return 1;
5272 *plow = - *plow;
5273 } else {
5274 if (*plow >= (1ULL << 63))
5275 return 1;
5276 }
5277 if (sa)
5278 *phigh = - *phigh;
5279 return 0;
5280}
5281
5282void helper_mulq_EAX_T0(target_ulong t0)
5283{
5284 uint64_t r0, r1;
5285
5286 mulu64(&r0, &r1, EAX, t0);
5287 EAX = r0;
5288 EDX = r1;
5289 CC_DST = r0;
5290 CC_SRC = r1;
5291}
5292
5293void helper_imulq_EAX_T0(target_ulong t0)
5294{
5295 uint64_t r0, r1;
5296
5297 muls64(&r0, &r1, EAX, t0);
5298 EAX = r0;
5299 EDX = r1;
5300 CC_DST = r0;
5301 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5302}
5303
5304target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5305{
5306 uint64_t r0, r1;
5307
5308 muls64(&r0, &r1, t0, t1);
5309 CC_DST = r0;
5310 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5311 return r0;
5312}
5313
5314void helper_divq_EAX(target_ulong t0)
5315{
5316 uint64_t r0, r1;
5317 if (t0 == 0) {
5318 raise_exception(EXCP00_DIVZ);
5319 }
5320 r0 = EAX;
5321 r1 = EDX;
5322 if (div64(&r0, &r1, t0))
5323 raise_exception(EXCP00_DIVZ);
5324 EAX = r0;
5325 EDX = r1;
5326}
5327
5328void helper_idivq_EAX(target_ulong t0)
5329{
5330 uint64_t r0, r1;
5331 if (t0 == 0) {
5332 raise_exception(EXCP00_DIVZ);
5333 }
5334 r0 = EAX;
5335 r1 = EDX;
5336 if (idiv64(&r0, &r1, t0))
5337 raise_exception(EXCP00_DIVZ);
5338 EAX = r0;
5339 EDX = r1;
5340}
5341#endif
5342
5343static void do_hlt(void)
5344{
5345 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5346 env->halted = 1;
5347 env->exception_index = EXCP_HLT;
5348 cpu_loop_exit();
5349}
5350
5351void helper_hlt(int next_eip_addend)
5352{
5353 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5354 EIP += next_eip_addend;
5355
5356 do_hlt();
5357}
5358
5359void helper_monitor(target_ulong ptr)
5360{
5361#ifdef VBOX
5362 if ((uint32_t)ECX > 1)
5363 raise_exception(EXCP0D_GPF);
5364#else
5365 if ((uint32_t)ECX != 0)
5366 raise_exception(EXCP0D_GPF);
5367#endif
5368 /* XXX: store address ? */
5369 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5370}
5371
5372void helper_mwait(int next_eip_addend)
5373{
5374 if ((uint32_t)ECX != 0)
5375 raise_exception(EXCP0D_GPF);
5376#ifdef VBOX
5377 helper_hlt(next_eip_addend);
5378#else
5379 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5380 EIP += next_eip_addend;
5381
5382 /* XXX: not complete but not completely erroneous */
5383 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5384 /* more than one CPU: do not sleep because another CPU may
5385 wake this one */
5386 } else {
5387 do_hlt();
5388 }
5389#endif
5390}
5391
5392void helper_debug(void)
5393{
5394 env->exception_index = EXCP_DEBUG;
5395 cpu_loop_exit();
5396}
5397
5398void helper_raise_interrupt(int intno, int next_eip_addend)
5399{
5400 raise_interrupt(intno, 1, 0, next_eip_addend);
5401}
5402
5403void helper_raise_exception(int exception_index)
5404{
5405 raise_exception(exception_index);
5406}
5407
5408void helper_cli(void)
5409{
5410 env->eflags &= ~IF_MASK;
5411}
5412
5413void helper_sti(void)
5414{
5415 env->eflags |= IF_MASK;
5416}
5417
5418#ifdef VBOX
5419void helper_cli_vme(void)
5420{
5421 env->eflags &= ~VIF_MASK;
5422}
5423
5424void helper_sti_vme(void)
5425{
5426 /* First check, then change eflags according to the AMD manual */
5427 if (env->eflags & VIP_MASK) {
5428 raise_exception(EXCP0D_GPF);
5429 }
5430 env->eflags |= VIF_MASK;
5431}
5432#endif
5433
5434#if 0
5435/* vm86plus instructions */
5436void helper_cli_vm(void)
5437{
5438 env->eflags &= ~VIF_MASK;
5439}
5440
5441void helper_sti_vm(void)
5442{
5443 env->eflags |= VIF_MASK;
5444 if (env->eflags & VIP_MASK) {
5445 raise_exception(EXCP0D_GPF);
5446 }
5447}
5448#endif
5449
5450void helper_set_inhibit_irq(void)
5451{
5452 env->hflags |= HF_INHIBIT_IRQ_MASK;
5453}
5454
5455void helper_reset_inhibit_irq(void)
5456{
5457 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5458}
5459
5460void helper_boundw(target_ulong a0, int v)
5461{
5462 int low, high;
5463 low = ldsw(a0);
5464 high = ldsw(a0 + 2);
5465 v = (int16_t)v;
5466 if (v < low || v > high) {
5467 raise_exception(EXCP05_BOUND);
5468 }
5469 FORCE_RET();
5470}
5471
5472void helper_boundl(target_ulong a0, int v)
5473{
5474 int low, high;
5475 low = ldl(a0);
5476 high = ldl(a0 + 4);
5477 if (v < low || v > high) {
5478 raise_exception(EXCP05_BOUND);
5479 }
5480 FORCE_RET();
5481}
5482
5483static float approx_rsqrt(float a)
5484{
5485 return 1.0 / sqrt(a);
5486}
5487
5488static float approx_rcp(float a)
5489{
5490 return 1.0 / a;
5491}
5492
5493#if !defined(CONFIG_USER_ONLY)
5494
5495#define MMUSUFFIX _mmu
5496
5497#define SHIFT 0
5498#include "softmmu_template.h"
5499
5500#define SHIFT 1
5501#include "softmmu_template.h"
5502
5503#define SHIFT 2
5504#include "softmmu_template.h"
5505
5506#define SHIFT 3
5507#include "softmmu_template.h"
5508
5509#endif
5510
5511#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5512/* This code assumes real physical address always fit into host CPU reg,
5513 which is wrong in general, but true for our current use cases. */
5514RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5515{
5516 return remR3PhysReadS8(addr);
5517}
5518RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5519{
5520 return remR3PhysReadU8(addr);
5521}
5522void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5523{
5524 remR3PhysWriteU8(addr, val);
5525}
5526RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5527{
5528 return remR3PhysReadS16(addr);
5529}
5530RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5531{
5532 return remR3PhysReadU16(addr);
5533}
5534void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5535{
5536 remR3PhysWriteU16(addr, val);
5537}
5538RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5539{
5540 return remR3PhysReadS32(addr);
5541}
5542RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5543{
5544 return remR3PhysReadU32(addr);
5545}
5546void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5547{
5548 remR3PhysWriteU32(addr, val);
5549}
5550uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5551{
5552 return remR3PhysReadU64(addr);
5553}
5554void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5555{
5556 remR3PhysWriteU64(addr, val);
5557}
5558#endif
5559
5560/* try to fill the TLB and return an exception if error. If retaddr is
5561 NULL, it means that the function was called in C code (i.e. not
5562 from generated code or from helper.c) */
5563/* XXX: fix it to restore all registers */
5564void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5565{
5566 TranslationBlock *tb;
5567 int ret;
5568 unsigned long pc;
5569 CPUX86State *saved_env;
5570
5571 /* XXX: hack to restore env in all cases, even if not called from
5572 generated code */
5573 saved_env = env;
5574 env = cpu_single_env;
5575
5576 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5577 if (ret) {
5578 if (retaddr) {
5579 /* now we have a real cpu fault */
5580 pc = (unsigned long)retaddr;
5581 tb = tb_find_pc(pc);
5582 if (tb) {
5583 /* the PC is inside the translated code. It means that we have
5584 a virtual CPU fault */
5585 cpu_restore_state(tb, env, pc, NULL);
5586 }
5587 }
5588 raise_exception_err(env->exception_index, env->error_code);
5589 }
5590 env = saved_env;
5591}
5592
5593#ifdef VBOX
5594
5595/**
5596 * Correctly computes the eflags.
5597 * @returns eflags.
5598 * @param env1 CPU environment.
5599 */
5600uint32_t raw_compute_eflags(CPUX86State *env1)
5601{
5602 CPUX86State *savedenv = env;
5603 uint32_t efl;
5604 env = env1;
5605 efl = compute_eflags();
5606 env = savedenv;
5607 return efl;
5608}
5609
5610/**
5611 * Reads byte from virtual address in guest memory area.
5612 * XXX: is it working for any addresses? swapped out pages?
5613 * @returns readed data byte.
5614 * @param env1 CPU environment.
5615 * @param pvAddr GC Virtual address.
5616 */
5617uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5618{
5619 CPUX86State *savedenv = env;
5620 uint8_t u8;
5621 env = env1;
5622 u8 = ldub_kernel(addr);
5623 env = savedenv;
5624 return u8;
5625}
5626
5627/**
5628 * Reads byte from virtual address in guest memory area.
5629 * XXX: is it working for any addresses? swapped out pages?
5630 * @returns readed data byte.
5631 * @param env1 CPU environment.
5632 * @param pvAddr GC Virtual address.
5633 */
5634uint16_t read_word(CPUX86State *env1, target_ulong addr)
5635{
5636 CPUX86State *savedenv = env;
5637 uint16_t u16;
5638 env = env1;
5639 u16 = lduw_kernel(addr);
5640 env = savedenv;
5641 return u16;
5642}
5643
5644/**
5645 * Reads byte from virtual address in guest memory area.
5646 * XXX: is it working for any addresses? swapped out pages?
5647 * @returns readed data byte.
5648 * @param env1 CPU environment.
5649 * @param pvAddr GC Virtual address.
5650 */
5651uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5652{
5653 CPUX86State *savedenv = env;
5654 uint32_t u32;
5655 env = env1;
5656 u32 = ldl_kernel(addr);
5657 env = savedenv;
5658 return u32;
5659}
5660
5661/**
5662 * Writes byte to virtual address in guest memory area.
5663 * XXX: is it working for any addresses? swapped out pages?
5664 * @returns readed data byte.
5665 * @param env1 CPU environment.
5666 * @param pvAddr GC Virtual address.
5667 * @param val byte value
5668 */
5669void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5670{
5671 CPUX86State *savedenv = env;
5672 env = env1;
5673 stb(addr, val);
5674 env = savedenv;
5675}
5676
5677void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5678{
5679 CPUX86State *savedenv = env;
5680 env = env1;
5681 stw(addr, val);
5682 env = savedenv;
5683}
5684
5685void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5686{
5687 CPUX86State *savedenv = env;
5688 env = env1;
5689 stl(addr, val);
5690 env = savedenv;
5691}
5692
5693/**
5694 * Correctly loads selector into segment register with updating internal
5695 * qemu data/caches.
5696 * @param env1 CPU environment.
5697 * @param seg_reg Segment register.
5698 * @param selector Selector to load.
5699 */
5700void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5701{
5702 CPUX86State *savedenv = env;
5703#ifdef FORCE_SEGMENT_SYNC
5704 jmp_buf old_buf;
5705#endif
5706
5707 env = env1;
5708
5709 if ( env->eflags & X86_EFL_VM
5710 || !(env->cr[0] & X86_CR0_PE))
5711 {
5712 load_seg_vm(seg_reg, selector);
5713
5714 env = savedenv;
5715
5716 /* Successful sync. */
5717 env1->segs[seg_reg].newselector = 0;
5718 }
5719 else
5720 {
5721 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5722 time critical - let's not do that */
5723#ifdef FORCE_SEGMENT_SYNC
5724 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5725#endif
5726 if (setjmp(env1->jmp_env) == 0)
5727 {
5728 if (seg_reg == R_CS)
5729 {
5730 uint32_t e1, e2;
5731 e1 = e2 = 0;
5732 load_segment(&e1, &e2, selector);
5733 cpu_x86_load_seg_cache(env, R_CS, selector,
5734 get_seg_base(e1, e2),
5735 get_seg_limit(e1, e2),
5736 e2);
5737 }
5738 else
5739 helper_load_seg(seg_reg, selector);
5740 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5741 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5742
5743 env = savedenv;
5744
5745 /* Successful sync. */
5746 env1->segs[seg_reg].newselector = 0;
5747 }
5748 else
5749 {
5750 env = savedenv;
5751
5752 /* Postpone sync until the guest uses the selector. */
5753 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5754 env1->segs[seg_reg].newselector = selector;
5755 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5756 env1->exception_index = -1;
5757 env1->error_code = 0;
5758 env1->old_exception = -1;
5759 }
5760#ifdef FORCE_SEGMENT_SYNC
5761 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5762#endif
5763 }
5764
5765}
5766
5767DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5768{
5769 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5770}
5771
5772
5773int emulate_single_instr(CPUX86State *env1)
5774{
5775 TranslationBlock *tb;
5776 TranslationBlock *current;
5777 int flags;
5778 uint8_t *tc_ptr;
5779 target_ulong old_eip;
5780
5781 /* ensures env is loaded! */
5782 CPUX86State *savedenv = env;
5783 env = env1;
5784
5785 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5786
5787 current = env->current_tb;
5788 env->current_tb = NULL;
5789 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5790
5791 /*
5792 * Translate only one instruction.
5793 */
5794 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5795 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5796 env->segs[R_CS].base, flags, 0);
5797
5798 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5799
5800
5801 /* tb_link_phys: */
5802 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5803 tb->jmp_next[0] = NULL;
5804 tb->jmp_next[1] = NULL;
5805 Assert(tb->jmp_next[0] == NULL);
5806 Assert(tb->jmp_next[1] == NULL);
5807 if (tb->tb_next_offset[0] != 0xffff)
5808 tb_reset_jump(tb, 0);
5809 if (tb->tb_next_offset[1] != 0xffff)
5810 tb_reset_jump(tb, 1);
5811
5812 /*
5813 * Execute it using emulation
5814 */
5815 old_eip = env->eip;
5816 env->current_tb = tb;
5817
5818 /*
5819 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5820 * perhaps not a very safe hack
5821 */
5822 while(old_eip == env->eip)
5823 {
5824 tc_ptr = tb->tc_ptr;
5825
5826#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5827 int fake_ret;
5828 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5829#else
5830 tcg_qemu_tb_exec(tc_ptr);
5831#endif
5832 /*
5833 * Exit once we detect an external interrupt and interrupts are enabled
5834 */
5835 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5836 ( (env->eflags & IF_MASK) &&
5837 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5838 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5839 {
5840 break;
5841 }
5842 }
5843 env->current_tb = current;
5844
5845 tb_phys_invalidate(tb, -1);
5846 tb_free(tb);
5847/*
5848 Assert(tb->tb_next_offset[0] == 0xffff);
5849 Assert(tb->tb_next_offset[1] == 0xffff);
5850 Assert(tb->tb_next[0] == 0xffff);
5851 Assert(tb->tb_next[1] == 0xffff);
5852 Assert(tb->jmp_next[0] == NULL);
5853 Assert(tb->jmp_next[1] == NULL);
5854 Assert(tb->jmp_first == NULL); */
5855
5856 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5857
5858 /*
5859 * Execute the next instruction when we encounter instruction fusing.
5860 */
5861 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5862 {
5863 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5864 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5865 emulate_single_instr(env);
5866 }
5867
5868 env = savedenv;
5869 return 0;
5870}
5871
5872/**
5873 * Correctly loads a new ldtr selector.
5874 *
5875 * @param env1 CPU environment.
5876 * @param selector Selector to load.
5877 */
5878void sync_ldtr(CPUX86State *env1, int selector)
5879{
5880 CPUX86State *saved_env = env;
5881 if (setjmp(env1->jmp_env) == 0)
5882 {
5883 env = env1;
5884 helper_lldt(selector);
5885 env = saved_env;
5886 }
5887 else
5888 {
5889 env = saved_env;
5890#ifdef VBOX_STRICT
5891 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5892#endif
5893 }
5894}
5895
5896int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5897 uint32_t *esp_ptr, int dpl)
5898{
5899 int type, index, shift;
5900
5901 CPUX86State *savedenv = env;
5902 env = env1;
5903
5904 if (!(env->tr.flags & DESC_P_MASK))
5905 cpu_abort(env, "invalid tss");
5906 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5907 if ((type & 7) != 1)
5908 cpu_abort(env, "invalid tss type %d", type);
5909 shift = type >> 3;
5910 index = (dpl * 4 + 2) << shift;
5911 if (index + (4 << shift) - 1 > env->tr.limit)
5912 {
5913 env = savedenv;
5914 return 0;
5915 }
5916 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5917
5918 if (shift == 0) {
5919 *esp_ptr = lduw_kernel(env->tr.base + index);
5920 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5921 } else {
5922 *esp_ptr = ldl_kernel(env->tr.base + index);
5923 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5924 }
5925
5926 env = savedenv;
5927 return 1;
5928}
5929
5930//*****************************************************************************
5931// Needs to be at the bottom of the file (overriding macros)
5932
5933#ifndef VBOX
5934static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5935#else /* VBOX */
5936DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5937#endif /* VBOX */
5938{
5939 return *(CPU86_LDouble *)ptr;
5940}
5941
5942#ifndef VBOX
5943static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5944#else /* VBOX */
5945DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5946#endif /* VBOX */
5947{
5948 *(CPU86_LDouble *)ptr = f;
5949}
5950
5951#undef stw
5952#undef stl
5953#undef stq
5954#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5955#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5956#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5957
5958//*****************************************************************************
5959void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5960{
5961 int fpus, fptag, i, nb_xmm_regs;
5962 CPU86_LDouble tmp;
5963 uint8_t *addr;
5964 int data64 = !!(env->hflags & HF_LMA_MASK);
5965
5966 if (env->cpuid_features & CPUID_FXSR)
5967 {
5968 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5969 fptag = 0;
5970 for(i = 0; i < 8; i++) {
5971 fptag |= (env->fptags[i] << i);
5972 }
5973 stw(ptr, env->fpuc);
5974 stw(ptr + 2, fpus);
5975 stw(ptr + 4, fptag ^ 0xff);
5976
5977 addr = ptr + 0x20;
5978 for(i = 0;i < 8; i++) {
5979 tmp = ST(i);
5980 helper_fstt_raw(tmp, addr);
5981 addr += 16;
5982 }
5983
5984 if (env->cr[4] & CR4_OSFXSR_MASK) {
5985 /* XXX: finish it */
5986 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5987 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5988 nb_xmm_regs = 8 << data64;
5989 addr = ptr + 0xa0;
5990 for(i = 0; i < nb_xmm_regs; i++) {
5991#if __GNUC__ < 4
5992 stq(addr, env->xmm_regs[i].XMM_Q(0));
5993 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5994#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5995 stl(addr, env->xmm_regs[i].XMM_L(0));
5996 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5997 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5998 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5999#endif
6000 addr += 16;
6001 }
6002 }
6003 }
6004 else
6005 {
6006 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6007 int fptag;
6008
6009 fp->FCW = env->fpuc;
6010 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6011 fptag = 0;
6012 for (i=7; i>=0; i--) {
6013 fptag <<= 2;
6014 if (env->fptags[i]) {
6015 fptag |= 3;
6016 } else {
6017 /* the FPU automatically computes it */
6018 }
6019 }
6020 fp->FTW = fptag;
6021
6022 for(i = 0;i < 8; i++) {
6023 tmp = ST(i);
6024 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6025 }
6026 }
6027}
6028
6029//*****************************************************************************
6030#undef lduw
6031#undef ldl
6032#undef ldq
6033#define lduw(a) *(uint16_t *)(a)
6034#define ldl(a) *(uint32_t *)(a)
6035#define ldq(a) *(uint64_t *)(a)
6036//*****************************************************************************
6037void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6038{
6039 int i, fpus, fptag, nb_xmm_regs;
6040 CPU86_LDouble tmp;
6041 uint8_t *addr;
6042 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6043
6044 if (env->cpuid_features & CPUID_FXSR)
6045 {
6046 env->fpuc = lduw(ptr);
6047 fpus = lduw(ptr + 2);
6048 fptag = lduw(ptr + 4);
6049 env->fpstt = (fpus >> 11) & 7;
6050 env->fpus = fpus & ~0x3800;
6051 fptag ^= 0xff;
6052 for(i = 0;i < 8; i++) {
6053 env->fptags[i] = ((fptag >> i) & 1);
6054 }
6055
6056 addr = ptr + 0x20;
6057 for(i = 0;i < 8; i++) {
6058 tmp = helper_fldt_raw(addr);
6059 ST(i) = tmp;
6060 addr += 16;
6061 }
6062
6063 if (env->cr[4] & CR4_OSFXSR_MASK) {
6064 /* XXX: finish it, endianness */
6065 env->mxcsr = ldl(ptr + 0x18);
6066 //ldl(ptr + 0x1c);
6067 nb_xmm_regs = 8 << data64;
6068 addr = ptr + 0xa0;
6069 for(i = 0; i < nb_xmm_regs; i++) {
6070#if HC_ARCH_BITS == 32
6071 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6072 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6073 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6074 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6075 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6076#else
6077 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6078 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6079#endif
6080 addr += 16;
6081 }
6082 }
6083 }
6084 else
6085 {
6086 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6087 int fptag, j;
6088
6089 env->fpuc = fp->FCW;
6090 env->fpstt = (fp->FSW >> 11) & 7;
6091 env->fpus = fp->FSW & ~0x3800;
6092 fptag = fp->FTW;
6093 for(i = 0;i < 8; i++) {
6094 env->fptags[i] = ((fptag & 3) == 3);
6095 fptag >>= 2;
6096 }
6097 j = env->fpstt;
6098 for(i = 0;i < 8; i++) {
6099 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6100 ST(i) = tmp;
6101 }
6102 }
6103}
6104//*****************************************************************************
6105//*****************************************************************************
6106
6107#endif /* VBOX */
6108
6109/* Secure Virtual Machine helpers */
6110
6111#if defined(CONFIG_USER_ONLY)
6112
6113void helper_vmrun(int aflag, int next_eip_addend)
6114{
6115}
6116void helper_vmmcall(void)
6117{
6118}
6119void helper_vmload(int aflag)
6120{
6121}
6122void helper_vmsave(int aflag)
6123{
6124}
6125void helper_stgi(void)
6126{
6127}
6128void helper_clgi(void)
6129{
6130}
6131void helper_skinit(void)
6132{
6133}
6134void helper_invlpga(int aflag)
6135{
6136}
6137void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6138{
6139}
6140void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6141{
6142}
6143
6144void helper_svm_check_io(uint32_t port, uint32_t param,
6145 uint32_t next_eip_addend)
6146{
6147}
6148#else
6149
6150#ifndef VBOX
6151static inline void svm_save_seg(target_phys_addr_t addr,
6152#else /* VBOX */
6153DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6154#endif /* VBOX */
6155 const SegmentCache *sc)
6156{
6157 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6158 sc->selector);
6159 stq_phys(addr + offsetof(struct vmcb_seg, base),
6160 sc->base);
6161 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6162 sc->limit);
6163 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6164 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6165}
6166
6167#ifndef VBOX
6168static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6169#else /* VBOX */
6170DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6171#endif /* VBOX */
6172{
6173 unsigned int flags;
6174
6175 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6176 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6177 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6178 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6179 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6180}
6181
6182#ifndef VBOX
6183static inline void svm_load_seg_cache(target_phys_addr_t addr,
6184#else /* VBOX */
6185DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6186#endif /* VBOX */
6187 CPUState *env, int seg_reg)
6188{
6189 SegmentCache sc1, *sc = &sc1;
6190 svm_load_seg(addr, sc);
6191 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6192 sc->base, sc->limit, sc->flags);
6193}
6194
6195void helper_vmrun(int aflag, int next_eip_addend)
6196{
6197 target_ulong addr;
6198 uint32_t event_inj;
6199 uint32_t int_ctl;
6200
6201 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6202
6203 if (aflag == 2)
6204 addr = EAX;
6205 else
6206 addr = (uint32_t)EAX;
6207
6208 if (loglevel & CPU_LOG_TB_IN_ASM)
6209 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6210
6211 env->vm_vmcb = addr;
6212
6213 /* save the current CPU state in the hsave page */
6214 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6215 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6216
6217 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6218 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6219
6220 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6221 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6222 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6223 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6224 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6225 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6226
6227 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6228 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6229
6230 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6231 &env->segs[R_ES]);
6232 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6233 &env->segs[R_CS]);
6234 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6235 &env->segs[R_SS]);
6236 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6237 &env->segs[R_DS]);
6238
6239 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6240 EIP + next_eip_addend);
6241 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6242 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6243
6244 /* load the interception bitmaps so we do not need to access the
6245 vmcb in svm mode */
6246 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6247 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6248 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6249 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6250 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6251 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6252
6253 /* enable intercepts */
6254 env->hflags |= HF_SVMI_MASK;
6255
6256 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6257
6258 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6259 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6260
6261 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6262 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6263
6264 /* clear exit_info_2 so we behave like the real hardware */
6265 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6266
6267 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6268 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6269 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6270 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6271 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6272 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6273 if (int_ctl & V_INTR_MASKING_MASK) {
6274 env->v_tpr = int_ctl & V_TPR_MASK;
6275 env->hflags2 |= HF2_VINTR_MASK;
6276 if (env->eflags & IF_MASK)
6277 env->hflags2 |= HF2_HIF_MASK;
6278 }
6279
6280 cpu_load_efer(env,
6281 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6282 env->eflags = 0;
6283 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6284 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6285 CC_OP = CC_OP_EFLAGS;
6286
6287 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6288 env, R_ES);
6289 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6290 env, R_CS);
6291 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6292 env, R_SS);
6293 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6294 env, R_DS);
6295
6296 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6297 env->eip = EIP;
6298 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6299 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6300 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6301 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6302 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6303
6304 /* FIXME: guest state consistency checks */
6305
6306 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6307 case TLB_CONTROL_DO_NOTHING:
6308 break;
6309 case TLB_CONTROL_FLUSH_ALL_ASID:
6310 /* FIXME: this is not 100% correct but should work for now */
6311 tlb_flush(env, 1);
6312 break;
6313 }
6314
6315 env->hflags2 |= HF2_GIF_MASK;
6316
6317 if (int_ctl & V_IRQ_MASK) {
6318 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6319 }
6320
6321 /* maybe we need to inject an event */
6322 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6323 if (event_inj & SVM_EVTINJ_VALID) {
6324 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6325 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6326 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6327 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6328
6329 if (loglevel & CPU_LOG_TB_IN_ASM)
6330 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6331 /* FIXME: need to implement valid_err */
6332 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6333 case SVM_EVTINJ_TYPE_INTR:
6334 env->exception_index = vector;
6335 env->error_code = event_inj_err;
6336 env->exception_is_int = 0;
6337 env->exception_next_eip = -1;
6338 if (loglevel & CPU_LOG_TB_IN_ASM)
6339 fprintf(logfile, "INTR");
6340 /* XXX: is it always correct ? */
6341 do_interrupt(vector, 0, 0, 0, 1);
6342 break;
6343 case SVM_EVTINJ_TYPE_NMI:
6344 env->exception_index = EXCP02_NMI;
6345 env->error_code = event_inj_err;
6346 env->exception_is_int = 0;
6347 env->exception_next_eip = EIP;
6348 if (loglevel & CPU_LOG_TB_IN_ASM)
6349 fprintf(logfile, "NMI");
6350 cpu_loop_exit();
6351 break;
6352 case SVM_EVTINJ_TYPE_EXEPT:
6353 env->exception_index = vector;
6354 env->error_code = event_inj_err;
6355 env->exception_is_int = 0;
6356 env->exception_next_eip = -1;
6357 if (loglevel & CPU_LOG_TB_IN_ASM)
6358 fprintf(logfile, "EXEPT");
6359 cpu_loop_exit();
6360 break;
6361 case SVM_EVTINJ_TYPE_SOFT:
6362 env->exception_index = vector;
6363 env->error_code = event_inj_err;
6364 env->exception_is_int = 1;
6365 env->exception_next_eip = EIP;
6366 if (loglevel & CPU_LOG_TB_IN_ASM)
6367 fprintf(logfile, "SOFT");
6368 cpu_loop_exit();
6369 break;
6370 }
6371 if (loglevel & CPU_LOG_TB_IN_ASM)
6372 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6373 }
6374}
6375
6376void helper_vmmcall(void)
6377{
6378 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6379 raise_exception(EXCP06_ILLOP);
6380}
6381
6382void helper_vmload(int aflag)
6383{
6384 target_ulong addr;
6385 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6386
6387 if (aflag == 2)
6388 addr = EAX;
6389 else
6390 addr = (uint32_t)EAX;
6391
6392 if (loglevel & CPU_LOG_TB_IN_ASM)
6393 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6394 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6395 env->segs[R_FS].base);
6396
6397 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6398 env, R_FS);
6399 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6400 env, R_GS);
6401 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6402 &env->tr);
6403 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6404 &env->ldt);
6405
6406#ifdef TARGET_X86_64
6407 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6408 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6409 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6410 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6411#endif
6412 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6413 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6414 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6415 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6416}
6417
6418void helper_vmsave(int aflag)
6419{
6420 target_ulong addr;
6421 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6422
6423 if (aflag == 2)
6424 addr = EAX;
6425 else
6426 addr = (uint32_t)EAX;
6427
6428 if (loglevel & CPU_LOG_TB_IN_ASM)
6429 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6430 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6431 env->segs[R_FS].base);
6432
6433 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6434 &env->segs[R_FS]);
6435 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6436 &env->segs[R_GS]);
6437 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6438 &env->tr);
6439 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6440 &env->ldt);
6441
6442#ifdef TARGET_X86_64
6443 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6444 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6445 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6446 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6447#endif
6448 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6449 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6450 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6451 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6452}
6453
6454void helper_stgi(void)
6455{
6456 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6457 env->hflags2 |= HF2_GIF_MASK;
6458}
6459
6460void helper_clgi(void)
6461{
6462 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6463 env->hflags2 &= ~HF2_GIF_MASK;
6464}
6465
6466void helper_skinit(void)
6467{
6468 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6469 /* XXX: not implemented */
6470 raise_exception(EXCP06_ILLOP);
6471}
6472
6473void helper_invlpga(int aflag)
6474{
6475 target_ulong addr;
6476 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6477
6478 if (aflag == 2)
6479 addr = EAX;
6480 else
6481 addr = (uint32_t)EAX;
6482
6483 /* XXX: could use the ASID to see if it is needed to do the
6484 flush */
6485 tlb_flush_page(env, addr);
6486}
6487
6488void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6489{
6490 if (likely(!(env->hflags & HF_SVMI_MASK)))
6491 return;
6492#ifndef VBOX
6493 switch(type) {
6494#ifndef VBOX
6495 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6496#else
6497 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6498 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6499 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6500#endif
6501 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6502 helper_vmexit(type, param);
6503 }
6504 break;
6505#ifndef VBOX
6506 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6507#else
6508 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6509 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6510 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6511#endif
6512 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6513 helper_vmexit(type, param);
6514 }
6515 break;
6516 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6517 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6518 helper_vmexit(type, param);
6519 }
6520 break;
6521 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6522 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6523 helper_vmexit(type, param);
6524 }
6525 break;
6526 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6527 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6528 helper_vmexit(type, param);
6529 }
6530 break;
6531 case SVM_EXIT_MSR:
6532 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6533 /* FIXME: this should be read in at vmrun (faster this way?) */
6534 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6535 uint32_t t0, t1;
6536 switch((uint32_t)ECX) {
6537 case 0 ... 0x1fff:
6538 t0 = (ECX * 2) % 8;
6539 t1 = ECX / 8;
6540 break;
6541 case 0xc0000000 ... 0xc0001fff:
6542 t0 = (8192 + ECX - 0xc0000000) * 2;
6543 t1 = (t0 / 8);
6544 t0 %= 8;
6545 break;
6546 case 0xc0010000 ... 0xc0011fff:
6547 t0 = (16384 + ECX - 0xc0010000) * 2;
6548 t1 = (t0 / 8);
6549 t0 %= 8;
6550 break;
6551 default:
6552 helper_vmexit(type, param);
6553 t0 = 0;
6554 t1 = 0;
6555 break;
6556 }
6557 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6558 helper_vmexit(type, param);
6559 }
6560 break;
6561 default:
6562 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6563 helper_vmexit(type, param);
6564 }
6565 break;
6566 }
6567#else
6568 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6569#endif
6570}
6571
6572void helper_svm_check_io(uint32_t port, uint32_t param,
6573 uint32_t next_eip_addend)
6574{
6575 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6576 /* FIXME: this should be read in at vmrun (faster this way?) */
6577 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6578 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6579 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6580 /* next EIP */
6581 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6582 env->eip + next_eip_addend);
6583 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6584 }
6585 }
6586}
6587
6588/* Note: currently only 32 bits of exit_code are used */
6589void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6590{
6591 uint32_t int_ctl;
6592
6593 if (loglevel & CPU_LOG_TB_IN_ASM)
6594 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6595 exit_code, exit_info_1,
6596 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6597 EIP);
6598
6599 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6600 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6601 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6602 } else {
6603 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6604 }
6605
6606 /* Save the VM state in the vmcb */
6607 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6608 &env->segs[R_ES]);
6609 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6610 &env->segs[R_CS]);
6611 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6612 &env->segs[R_SS]);
6613 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6614 &env->segs[R_DS]);
6615
6616 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6617 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6618
6619 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6620 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6621
6622 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6623 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6624 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6625 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6626 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6627
6628 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6629 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6630 int_ctl |= env->v_tpr & V_TPR_MASK;
6631 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6632 int_ctl |= V_IRQ_MASK;
6633 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6634
6635 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6636 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6637 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6638 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6639 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6640 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6641 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6642
6643 /* Reload the host state from vm_hsave */
6644 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6645 env->hflags &= ~HF_SVMI_MASK;
6646 env->intercept = 0;
6647 env->intercept_exceptions = 0;
6648 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6649 env->tsc_offset = 0;
6650
6651 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6652 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6653
6654 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6655 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6656
6657 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6658 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6659 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6660 /* we need to set the efer after the crs so the hidden flags get
6661 set properly */
6662 cpu_load_efer(env,
6663 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6664 env->eflags = 0;
6665 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6666 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6667 CC_OP = CC_OP_EFLAGS;
6668
6669 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6670 env, R_ES);
6671 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6672 env, R_CS);
6673 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6674 env, R_SS);
6675 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6676 env, R_DS);
6677
6678 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6679 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6680 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6681
6682 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6683 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6684
6685 /* other setups */
6686 cpu_x86_set_cpl(env, 0);
6687 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6688 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6689
6690 env->hflags2 &= ~HF2_GIF_MASK;
6691 /* FIXME: Resets the current ASID register to zero (host ASID). */
6692
6693 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6694
6695 /* Clears the TSC_OFFSET inside the processor. */
6696
6697 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6698 from the page table indicated the host's CR3. If the PDPEs contain
6699 illegal state, the processor causes a shutdown. */
6700
6701 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6702 env->cr[0] |= CR0_PE_MASK;
6703 env->eflags &= ~VM_MASK;
6704
6705 /* Disables all breakpoints in the host DR7 register. */
6706
6707 /* Checks the reloaded host state for consistency. */
6708
6709 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6710 host's code segment or non-canonical (in the case of long mode), a
6711 #GP fault is delivered inside the host.) */
6712
6713 /* remove any pending exception */
6714 env->exception_index = -1;
6715 env->error_code = 0;
6716 env->old_exception = -1;
6717
6718 cpu_loop_exit();
6719}
6720
6721#endif
6722
6723/* MMX/SSE */
6724/* XXX: optimize by storing fptt and fptags in the static cpu state */
6725void helper_enter_mmx(void)
6726{
6727 env->fpstt = 0;
6728 *(uint32_t *)(env->fptags) = 0;
6729 *(uint32_t *)(env->fptags + 4) = 0;
6730}
6731
6732void helper_emms(void)
6733{
6734 /* set to empty state */
6735 *(uint32_t *)(env->fptags) = 0x01010101;
6736 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6737}
6738
6739/* XXX: suppress */
6740void helper_movq(uint64_t *d, uint64_t *s)
6741{
6742 *d = *s;
6743}
6744
6745#define SHIFT 0
6746#include "ops_sse.h"
6747
6748#define SHIFT 1
6749#include "ops_sse.h"
6750
6751#define SHIFT 0
6752#include "helper_template.h"
6753#undef SHIFT
6754
6755#define SHIFT 1
6756#include "helper_template.h"
6757#undef SHIFT
6758
6759#define SHIFT 2
6760#include "helper_template.h"
6761#undef SHIFT
6762
6763#ifdef TARGET_X86_64
6764
6765#define SHIFT 3
6766#include "helper_template.h"
6767#undef SHIFT
6768
6769#endif
6770
6771/* bit operations */
6772target_ulong helper_bsf(target_ulong t0)
6773{
6774 int count;
6775 target_ulong res;
6776
6777 res = t0;
6778 count = 0;
6779 while ((res & 1) == 0) {
6780 count++;
6781 res >>= 1;
6782 }
6783 return count;
6784}
6785
6786target_ulong helper_bsr(target_ulong t0)
6787{
6788 int count;
6789 target_ulong res, mask;
6790
6791 res = t0;
6792 count = TARGET_LONG_BITS - 1;
6793 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6794 while ((res & mask) == 0) {
6795 count--;
6796 res <<= 1;
6797 }
6798 return count;
6799}
6800
6801
6802static int compute_all_eflags(void)
6803{
6804 return CC_SRC;
6805}
6806
6807static int compute_c_eflags(void)
6808{
6809 return CC_SRC & CC_C;
6810}
6811
6812#ifndef VBOX
6813CCTable cc_table[CC_OP_NB] = {
6814 [CC_OP_DYNAMIC] = { /* should never happen */ },
6815
6816 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6817
6818 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6819 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6820 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6821
6822 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6823 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6824 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6825
6826 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6827 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6828 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6829
6830 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6831 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6832 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6833
6834 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6835 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6836 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6837
6838 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6839 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6840 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6841
6842 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6843 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6844 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6845
6846 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6847 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6848 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6849
6850 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6851 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6852 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6853
6854 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6855 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6856 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6857
6858#ifdef TARGET_X86_64
6859 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6860
6861 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6862
6863 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6864
6865 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6866
6867 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6868
6869 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6870
6871 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6872
6873 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6874
6875 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6876
6877 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6878#endif
6879};
6880#else /* VBOX */
6881/* Sync carefully with cpu.h */
6882CCTable cc_table[CC_OP_NB] = {
6883 /* CC_OP_DYNAMIC */ { 0, 0 },
6884
6885 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6886
6887 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6888 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6889 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6890#ifdef TARGET_X86_64
6891 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6892#else
6893 /* CC_OP_MULQ */ { 0, 0 },
6894#endif
6895
6896 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6897 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6898 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6899#ifdef TARGET_X86_64
6900 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6901#else
6902 /* CC_OP_ADDQ */ { 0, 0 },
6903#endif
6904
6905 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6906 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6907 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6908#ifdef TARGET_X86_64
6909 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6910#else
6911 /* CC_OP_ADCQ */ { 0, 0 },
6912#endif
6913
6914 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6915 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6916 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6917#ifdef TARGET_X86_64
6918 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6919#else
6920 /* CC_OP_SUBQ */ { 0, 0 },
6921#endif
6922
6923 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6924 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6925 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6926#ifdef TARGET_X86_64
6927 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6928#else
6929 /* CC_OP_SBBQ */ { 0, 0 },
6930#endif
6931
6932 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6933 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6934 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6935#ifdef TARGET_X86_64
6936 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6937#else
6938 /* CC_OP_LOGICQ */ { 0, 0 },
6939#endif
6940
6941 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6942 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6943 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6944#ifdef TARGET_X86_64
6945 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6946#else
6947 /* CC_OP_INCQ */ { 0, 0 },
6948#endif
6949
6950 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6951 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6952 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6953#ifdef TARGET_X86_64
6954 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6955#else
6956 /* CC_OP_DECQ */ { 0, 0 },
6957#endif
6958
6959 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6960 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6961 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6962#ifdef TARGET_X86_64
6963 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6964#else
6965 /* CC_OP_SHLQ */ { 0, 0 },
6966#endif
6967
6968 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6969 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6970 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6971#ifdef TARGET_X86_64
6972 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6973#else
6974 /* CC_OP_SARQ */ { 0, 0 },
6975#endif
6976};
6977#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette