VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 27263

Last change on this file since 27263 was 26993, checked in by vboxsync, 15 years ago

VMM: implement some Nehalem MSRs

  • Property svn:eol-style set to native
File size: 195.1 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176
177 /* According to AMD manual, should be read with IOPL == 3 */
178 eflags |= (3 << IOPL_SHIFT);
179
180 /* We only use helper_read_eflags_vme() in 16-bits mode */
181 return eflags & 0xffff;
182}
183
184void helper_dump_state()
185{
186 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
187 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
188 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
189 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
190 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
191 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
192 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
193}
194#endif
195
196/* return non zero if error */
197#ifndef VBOX
198static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#else /* VBOX */
200DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
201#endif /* VBOX */
202 int selector)
203{
204 SegmentCache *dt;
205 int index;
206 target_ulong ptr;
207
208#ifdef VBOX
209 /* Trying to load a selector with CPL=1? */
210 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
211 {
212 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
213 selector = selector & 0xfffc;
214 }
215#endif
216
217 if (selector & 0x4)
218 dt = &env->ldt;
219 else
220 dt = &env->gdt;
221 index = selector & ~7;
222 if ((index + 7) > dt->limit)
223 return -1;
224 ptr = dt->base + index;
225 *e1_ptr = ldl_kernel(ptr);
226 *e2_ptr = ldl_kernel(ptr + 4);
227 return 0;
228}
229
230#ifndef VBOX
231static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
232#else /* VBOX */
233DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
234#endif /* VBOX */
235{
236 unsigned int limit;
237 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
238 if (e2 & DESC_G_MASK)
239 limit = (limit << 12) | 0xfff;
240 return limit;
241}
242
243#ifndef VBOX
244static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
245#else /* VBOX */
246DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
247#endif /* VBOX */
248{
249 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
250}
251
252#ifndef VBOX
253static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#else /* VBOX */
255DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
256#endif /* VBOX */
257{
258 sc->base = get_seg_base(e1, e2);
259 sc->limit = get_seg_limit(e1, e2);
260 sc->flags = e2;
261}
262
263/* init the segment cache in vm86 mode. */
264#ifndef VBOX
265static inline void load_seg_vm(int seg, int selector)
266#else /* VBOX */
267DECLINLINE(void) load_seg_vm(int seg, int selector)
268#endif /* VBOX */
269{
270 selector &= 0xffff;
271#ifdef VBOX
272 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
273 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
274 flags |= (3 << DESC_DPL_SHIFT);
275
276 cpu_x86_load_seg_cache(env, seg, selector,
277 (selector << 4), 0xffff, flags);
278#else
279 cpu_x86_load_seg_cache(env, seg, selector,
280 (selector << 4), 0xffff, 0);
281#endif
282}
283
284#ifndef VBOX
285static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
286#else /* VBOX */
287DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
288#endif /* VBOX */
289 uint32_t *esp_ptr, int dpl)
290{
291#ifndef VBOX
292 int type, index, shift;
293#else
294 unsigned int type, index, shift;
295#endif
296
297#if 0
298 {
299 int i;
300 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
301 for(i=0;i<env->tr.limit;i++) {
302 printf("%02x ", env->tr.base[i]);
303 if ((i & 7) == 7) printf("\n");
304 }
305 printf("\n");
306 }
307#endif
308
309 if (!(env->tr.flags & DESC_P_MASK))
310 cpu_abort(env, "invalid tss");
311 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
312 if ((type & 7) != 1)
313 cpu_abort(env, "invalid tss type");
314 shift = type >> 3;
315 index = (dpl * 4 + 2) << shift;
316 if (index + (4 << shift) - 1 > env->tr.limit)
317 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
318 if (shift == 0) {
319 *esp_ptr = lduw_kernel(env->tr.base + index);
320 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
321 } else {
322 *esp_ptr = ldl_kernel(env->tr.base + index);
323 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
324 }
325}
326
327/* XXX: merge with load_seg() */
328static void tss_load_seg(int seg_reg, int selector)
329{
330 uint32_t e1, e2;
331 int rpl, dpl, cpl;
332
333#ifdef VBOX
334 e1 = e2 = 0;
335 cpl = env->hflags & HF_CPL_MASK;
336 /* Trying to load a selector with CPL=1? */
337 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
338 {
339 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
340 selector = selector & 0xfffc;
341 }
342#endif
343
344 if ((selector & 0xfffc) != 0) {
345 if (load_segment(&e1, &e2, selector) != 0)
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 if (!(e2 & DESC_S_MASK))
348 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
349 rpl = selector & 3;
350 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
351 cpl = env->hflags & HF_CPL_MASK;
352 if (seg_reg == R_CS) {
353 if (!(e2 & DESC_CS_MASK))
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 /* XXX: is it correct ? */
356 if (dpl != rpl)
357 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
358 if ((e2 & DESC_C_MASK) && dpl > rpl)
359 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
360 } else if (seg_reg == R_SS) {
361 /* SS must be writable data */
362 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
363 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
364 if (dpl != cpl || dpl != rpl)
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
366 } else {
367 /* not readable code */
368 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
369 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
370 /* if data or non conforming code, checks the rights */
371 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
372 if (dpl < cpl || dpl < rpl)
373 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
374 }
375 }
376 if (!(e2 & DESC_P_MASK))
377 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
378 cpu_x86_load_seg_cache(env, seg_reg, selector,
379 get_seg_base(e1, e2),
380 get_seg_limit(e1, e2),
381 e2);
382 } else {
383 if (seg_reg == R_SS || seg_reg == R_CS)
384 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
385#ifdef VBOX
386#if 0
387 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
388 cpu_x86_load_seg_cache(env, seg_reg, selector,
389 0, 0, 0);
390#endif
391#endif
392 }
393}
394
395#define SWITCH_TSS_JMP 0
396#define SWITCH_TSS_IRET 1
397#define SWITCH_TSS_CALL 2
398
399/* XXX: restore CPU state in registers (PowerPC case) */
400static void switch_tss(int tss_selector,
401 uint32_t e1, uint32_t e2, int source,
402 uint32_t next_eip)
403{
404 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
405 target_ulong tss_base;
406 uint32_t new_regs[8], new_segs[6];
407 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
408 uint32_t old_eflags, eflags_mask;
409 SegmentCache *dt;
410#ifndef VBOX
411 int index;
412#else
413 unsigned int index;
414#endif
415 target_ulong ptr;
416
417 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
418#ifdef DEBUG_PCALL
419 if (loglevel & CPU_LOG_PCALL)
420 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
421#endif
422
423#if defined(VBOX) && defined(DEBUG)
424 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
425#endif
426
427 /* if task gate, we read the TSS segment and we load it */
428 if (type == 5) {
429 if (!(e2 & DESC_P_MASK))
430 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
431 tss_selector = e1 >> 16;
432 if (tss_selector & 4)
433 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
434 if (load_segment(&e1, &e2, tss_selector) != 0)
435 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
436 if (e2 & DESC_S_MASK)
437 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
438 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
439 if ((type & 7) != 1)
440 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
441 }
442
443 if (!(e2 & DESC_P_MASK))
444 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
445
446 if (type & 8)
447 tss_limit_max = 103;
448 else
449 tss_limit_max = 43;
450 tss_limit = get_seg_limit(e1, e2);
451 tss_base = get_seg_base(e1, e2);
452 if ((tss_selector & 4) != 0 ||
453 tss_limit < tss_limit_max)
454 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
455 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
456 if (old_type & 8)
457 old_tss_limit_max = 103;
458 else
459 old_tss_limit_max = 43;
460
461 /* read all the registers from the new TSS */
462 if (type & 8) {
463 /* 32 bit */
464 new_cr3 = ldl_kernel(tss_base + 0x1c);
465 new_eip = ldl_kernel(tss_base + 0x20);
466 new_eflags = ldl_kernel(tss_base + 0x24);
467 for(i = 0; i < 8; i++)
468 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
469 for(i = 0; i < 6; i++)
470 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
471 new_ldt = lduw_kernel(tss_base + 0x60);
472 new_trap = ldl_kernel(tss_base + 0x64);
473 } else {
474 /* 16 bit */
475 new_cr3 = 0;
476 new_eip = lduw_kernel(tss_base + 0x0e);
477 new_eflags = lduw_kernel(tss_base + 0x10);
478 for(i = 0; i < 8; i++)
479 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
480 for(i = 0; i < 4; i++)
481 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
482 new_ldt = lduw_kernel(tss_base + 0x2a);
483 new_segs[R_FS] = 0;
484 new_segs[R_GS] = 0;
485 new_trap = 0;
486 }
487
488 /* NOTE: we must avoid memory exceptions during the task switch,
489 so we make dummy accesses before */
490 /* XXX: it can still fail in some cases, so a bigger hack is
491 necessary to valid the TLB after having done the accesses */
492
493 v1 = ldub_kernel(env->tr.base);
494 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
495 stb_kernel(env->tr.base, v1);
496 stb_kernel(env->tr.base + old_tss_limit_max, v2);
497
498 /* clear busy bit (it is restartable) */
499 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
500 target_ulong ptr;
501 uint32_t e2;
502 ptr = env->gdt.base + (env->tr.selector & ~7);
503 e2 = ldl_kernel(ptr + 4);
504 e2 &= ~DESC_TSS_BUSY_MASK;
505 stl_kernel(ptr + 4, e2);
506 }
507 old_eflags = compute_eflags();
508 if (source == SWITCH_TSS_IRET)
509 old_eflags &= ~NT_MASK;
510
511 /* save the current state in the old TSS */
512 if (type & 8) {
513 /* 32 bit */
514 stl_kernel(env->tr.base + 0x20, next_eip);
515 stl_kernel(env->tr.base + 0x24, old_eflags);
516 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
517 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
518 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
519 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
520 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
521 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
522 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
523 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
524 for(i = 0; i < 6; i++)
525 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
526#ifdef VBOX
527 /* Must store the ldt as it gets reloaded and might have been changed. */
528 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
529#endif
530#if defined(VBOX) && defined(DEBUG)
531 printf("TSS 32 bits switch\n");
532 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
533#endif
534 } else {
535 /* 16 bit */
536 stw_kernel(env->tr.base + 0x0e, next_eip);
537 stw_kernel(env->tr.base + 0x10, old_eflags);
538 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
539 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
540 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
541 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
542 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
543 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
544 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
545 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
546 for(i = 0; i < 4; i++)
547 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
548#ifdef VBOX
549 /* Must store the ldt as it gets reloaded and might have been changed. */
550 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
551#endif
552 }
553
554 /* now if an exception occurs, it will occurs in the next task
555 context */
556
557 if (source == SWITCH_TSS_CALL) {
558 stw_kernel(tss_base, env->tr.selector);
559 new_eflags |= NT_MASK;
560 }
561
562 /* set busy bit */
563 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
564 target_ulong ptr;
565 uint32_t e2;
566 ptr = env->gdt.base + (tss_selector & ~7);
567 e2 = ldl_kernel(ptr + 4);
568 e2 |= DESC_TSS_BUSY_MASK;
569 stl_kernel(ptr + 4, e2);
570 }
571
572 /* set the new CPU state */
573 /* from this point, any exception which occurs can give problems */
574 env->cr[0] |= CR0_TS_MASK;
575 env->hflags |= HF_TS_MASK;
576 env->tr.selector = tss_selector;
577 env->tr.base = tss_base;
578 env->tr.limit = tss_limit;
579 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
580
581 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
582 cpu_x86_update_cr3(env, new_cr3);
583 }
584
585 /* load all registers without an exception, then reload them with
586 possible exception */
587 env->eip = new_eip;
588 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
589 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
590 if (!(type & 8))
591 eflags_mask &= 0xffff;
592 load_eflags(new_eflags, eflags_mask);
593 /* XXX: what to do in 16 bit case ? */
594 EAX = new_regs[0];
595 ECX = new_regs[1];
596 EDX = new_regs[2];
597 EBX = new_regs[3];
598 ESP = new_regs[4];
599 EBP = new_regs[5];
600 ESI = new_regs[6];
601 EDI = new_regs[7];
602 if (new_eflags & VM_MASK) {
603 for(i = 0; i < 6; i++)
604 load_seg_vm(i, new_segs[i]);
605 /* in vm86, CPL is always 3 */
606 cpu_x86_set_cpl(env, 3);
607 } else {
608 /* CPL is set the RPL of CS */
609 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
610 /* first just selectors as the rest may trigger exceptions */
611 for(i = 0; i < 6; i++)
612 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
613 }
614
615 env->ldt.selector = new_ldt & ~4;
616 env->ldt.base = 0;
617 env->ldt.limit = 0;
618 env->ldt.flags = 0;
619
620 /* load the LDT */
621 if (new_ldt & 4)
622 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
623
624 if ((new_ldt & 0xfffc) != 0) {
625 dt = &env->gdt;
626 index = new_ldt & ~7;
627 if ((index + 7) > dt->limit)
628 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
629 ptr = dt->base + index;
630 e1 = ldl_kernel(ptr);
631 e2 = ldl_kernel(ptr + 4);
632 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
633 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
634 if (!(e2 & DESC_P_MASK))
635 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
636 load_seg_cache_raw_dt(&env->ldt, e1, e2);
637 }
638
639 /* load the segments */
640 if (!(new_eflags & VM_MASK)) {
641 tss_load_seg(R_CS, new_segs[R_CS]);
642 tss_load_seg(R_SS, new_segs[R_SS]);
643 tss_load_seg(R_ES, new_segs[R_ES]);
644 tss_load_seg(R_DS, new_segs[R_DS]);
645 tss_load_seg(R_FS, new_segs[R_FS]);
646 tss_load_seg(R_GS, new_segs[R_GS]);
647 }
648
649 /* check that EIP is in the CS segment limits */
650 if (new_eip > env->segs[R_CS].limit) {
651 /* XXX: different exception if CALL ? */
652 raise_exception_err(EXCP0D_GPF, 0);
653 }
654}
655
656/* check if Port I/O is allowed in TSS */
657#ifndef VBOX
658static inline void check_io(int addr, int size)
659{
660 int io_offset, val, mask;
661
662#else /* VBOX */
663DECLINLINE(void) check_io(int addr, int size)
664{
665 int val, mask;
666 unsigned int io_offset;
667#endif /* VBOX */
668 /* TSS must be a valid 32 bit one */
669 if (!(env->tr.flags & DESC_P_MASK) ||
670 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
671 env->tr.limit < 103)
672 goto fail;
673 io_offset = lduw_kernel(env->tr.base + 0x66);
674 io_offset += (addr >> 3);
675 /* Note: the check needs two bytes */
676 if ((io_offset + 1) > env->tr.limit)
677 goto fail;
678 val = lduw_kernel(env->tr.base + io_offset);
679 val >>= (addr & 7);
680 mask = (1 << size) - 1;
681 /* all bits must be zero to allow the I/O */
682 if ((val & mask) != 0) {
683 fail:
684 raise_exception_err(EXCP0D_GPF, 0);
685 }
686}
687
688#ifdef VBOX
689/* Keep in sync with gen_check_external_event() */
690void helper_check_external_event()
691{
692 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
693 | CPU_INTERRUPT_EXTERNAL_TIMER
694 | CPU_INTERRUPT_EXTERNAL_DMA))
695 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
696 && (env->eflags & IF_MASK)
697 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
698 {
699 helper_external_event();
700 }
701
702}
703
704void helper_sync_seg(uint32_t reg)
705{
706 if (env->segs[reg].newselector)
707 sync_seg(env, reg, env->segs[reg].newselector);
708}
709#endif
710
711void helper_check_iob(uint32_t t0)
712{
713 check_io(t0, 1);
714}
715
716void helper_check_iow(uint32_t t0)
717{
718 check_io(t0, 2);
719}
720
721void helper_check_iol(uint32_t t0)
722{
723 check_io(t0, 4);
724}
725
726void helper_outb(uint32_t port, uint32_t data)
727{
728 cpu_outb(env, port, data & 0xff);
729}
730
731target_ulong helper_inb(uint32_t port)
732{
733 return cpu_inb(env, port);
734}
735
736void helper_outw(uint32_t port, uint32_t data)
737{
738 cpu_outw(env, port, data & 0xffff);
739}
740
741target_ulong helper_inw(uint32_t port)
742{
743 return cpu_inw(env, port);
744}
745
746void helper_outl(uint32_t port, uint32_t data)
747{
748 cpu_outl(env, port, data);
749}
750
751target_ulong helper_inl(uint32_t port)
752{
753 return cpu_inl(env, port);
754}
755
756#ifndef VBOX
757static inline unsigned int get_sp_mask(unsigned int e2)
758#else /* VBOX */
759DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
760#endif /* VBOX */
761{
762 if (e2 & DESC_B_MASK)
763 return 0xffffffff;
764 else
765 return 0xffff;
766}
767
768#ifdef TARGET_X86_64
769#define SET_ESP(val, sp_mask)\
770do {\
771 if ((sp_mask) == 0xffff)\
772 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
773 else if ((sp_mask) == 0xffffffffLL)\
774 ESP = (uint32_t)(val);\
775 else\
776 ESP = (val);\
777} while (0)
778#else
779#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
780#endif
781
782/* in 64-bit machines, this can overflow. So this segment addition macro
783 * can be used to trim the value to 32-bit whenever needed */
784#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
785
786/* XXX: add a is_user flag to have proper security support */
787#define PUSHW(ssp, sp, sp_mask, val)\
788{\
789 sp -= 2;\
790 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
791}
792
793#define PUSHL(ssp, sp, sp_mask, val)\
794{\
795 sp -= 4;\
796 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
797}
798
799#define POPW(ssp, sp, sp_mask, val)\
800{\
801 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
802 sp += 2;\
803}
804
805#define POPL(ssp, sp, sp_mask, val)\
806{\
807 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
808 sp += 4;\
809}
810
811/* protected mode interrupt */
812static void do_interrupt_protected(int intno, int is_int, int error_code,
813 unsigned int next_eip, int is_hw)
814{
815 SegmentCache *dt;
816 target_ulong ptr, ssp;
817 int type, dpl, selector, ss_dpl, cpl;
818 int has_error_code, new_stack, shift;
819 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
820 uint32_t old_eip, sp_mask;
821
822#ifdef VBOX
823 ss = ss_e1 = ss_e2 = 0;
824# ifdef VBOX_WITH_VMI
825 if ( intno == 6
826 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
827 {
828 env->exception_index = EXCP_PARAV_CALL;
829 cpu_loop_exit();
830 }
831# endif
832 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
833 cpu_loop_exit();
834#endif
835
836 has_error_code = 0;
837 if (!is_int && !is_hw) {
838 switch(intno) {
839 case 8:
840 case 10:
841 case 11:
842 case 12:
843 case 13:
844 case 14:
845 case 17:
846 has_error_code = 1;
847 break;
848 }
849 }
850 if (is_int)
851 old_eip = next_eip;
852 else
853 old_eip = env->eip;
854
855 dt = &env->idt;
856#ifndef VBOX
857 if (intno * 8 + 7 > dt->limit)
858#else
859 if ((unsigned)intno * 8 + 7 > dt->limit)
860#endif
861 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
862 ptr = dt->base + intno * 8;
863 e1 = ldl_kernel(ptr);
864 e2 = ldl_kernel(ptr + 4);
865 /* check gate type */
866 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
867 switch(type) {
868 case 5: /* task gate */
869 /* must do that check here to return the correct error code */
870 if (!(e2 & DESC_P_MASK))
871 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
872 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
873 if (has_error_code) {
874 int type;
875 uint32_t mask;
876 /* push the error code */
877 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
878 shift = type >> 3;
879 if (env->segs[R_SS].flags & DESC_B_MASK)
880 mask = 0xffffffff;
881 else
882 mask = 0xffff;
883 esp = (ESP - (2 << shift)) & mask;
884 ssp = env->segs[R_SS].base + esp;
885 if (shift)
886 stl_kernel(ssp, error_code);
887 else
888 stw_kernel(ssp, error_code);
889 SET_ESP(esp, mask);
890 }
891 return;
892 case 6: /* 286 interrupt gate */
893 case 7: /* 286 trap gate */
894 case 14: /* 386 interrupt gate */
895 case 15: /* 386 trap gate */
896 break;
897 default:
898 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
899 break;
900 }
901 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
902 cpl = env->hflags & HF_CPL_MASK;
903 /* check privilege if software int */
904 if (is_int && dpl < cpl)
905 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
906 /* check valid bit */
907 if (!(e2 & DESC_P_MASK))
908 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
909 selector = e1 >> 16;
910 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
911 if ((selector & 0xfffc) == 0)
912 raise_exception_err(EXCP0D_GPF, 0);
913
914 if (load_segment(&e1, &e2, selector) != 0)
915 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
916 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
917 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
918 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
919 if (dpl > cpl)
920 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921 if (!(e2 & DESC_P_MASK))
922 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
923 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
924 /* to inner privilege */
925 get_ss_esp_from_tss(&ss, &esp, dpl);
926 if ((ss & 0xfffc) == 0)
927 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928 if ((ss & 3) != dpl)
929 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
930 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
931 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
932 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
933 if (ss_dpl != dpl)
934 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
935 if (!(ss_e2 & DESC_S_MASK) ||
936 (ss_e2 & DESC_CS_MASK) ||
937 !(ss_e2 & DESC_W_MASK))
938 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
939 if (!(ss_e2 & DESC_P_MASK))
940#ifdef VBOX /* See page 3-477 of 253666.pdf */
941 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
942#else
943 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
944#endif
945 new_stack = 1;
946 sp_mask = get_sp_mask(ss_e2);
947 ssp = get_seg_base(ss_e1, ss_e2);
948#if defined(VBOX) && defined(DEBUG)
949 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
950#endif
951 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
952 /* to same privilege */
953 if (env->eflags & VM_MASK)
954 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955 new_stack = 0;
956 sp_mask = get_sp_mask(env->segs[R_SS].flags);
957 ssp = env->segs[R_SS].base;
958 esp = ESP;
959 dpl = cpl;
960 } else {
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962 new_stack = 0; /* avoid warning */
963 sp_mask = 0; /* avoid warning */
964 ssp = 0; /* avoid warning */
965 esp = 0; /* avoid warning */
966 }
967
968 shift = type >> 3;
969
970#if 0
971 /* XXX: check that enough room is available */
972 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
973 if (env->eflags & VM_MASK)
974 push_size += 8;
975 push_size <<= shift;
976#endif
977 if (shift == 1) {
978 if (new_stack) {
979 if (env->eflags & VM_MASK) {
980 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
981 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
982 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
983 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
984 }
985 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
986 PUSHL(ssp, esp, sp_mask, ESP);
987 }
988 PUSHL(ssp, esp, sp_mask, compute_eflags());
989 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
990 PUSHL(ssp, esp, sp_mask, old_eip);
991 if (has_error_code) {
992 PUSHL(ssp, esp, sp_mask, error_code);
993 }
994 } else {
995 if (new_stack) {
996 if (env->eflags & VM_MASK) {
997 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
998 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
999 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1000 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1001 }
1002 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1003 PUSHW(ssp, esp, sp_mask, ESP);
1004 }
1005 PUSHW(ssp, esp, sp_mask, compute_eflags());
1006 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1007 PUSHW(ssp, esp, sp_mask, old_eip);
1008 if (has_error_code) {
1009 PUSHW(ssp, esp, sp_mask, error_code);
1010 }
1011 }
1012
1013 if (new_stack) {
1014 if (env->eflags & VM_MASK) {
1015 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1016 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1017 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1018 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1019 }
1020 ss = (ss & ~3) | dpl;
1021 cpu_x86_load_seg_cache(env, R_SS, ss,
1022 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1023 }
1024 SET_ESP(esp, sp_mask);
1025
1026 selector = (selector & ~3) | dpl;
1027 cpu_x86_load_seg_cache(env, R_CS, selector,
1028 get_seg_base(e1, e2),
1029 get_seg_limit(e1, e2),
1030 e2);
1031 cpu_x86_set_cpl(env, dpl);
1032 env->eip = offset;
1033
1034 /* interrupt gate clear IF mask */
1035 if ((type & 1) == 0) {
1036 env->eflags &= ~IF_MASK;
1037 }
1038#ifndef VBOX
1039 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1040#else
1041 /*
1042 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1043 * gets confused by seeingingly changed EFLAGS. See #3491 and
1044 * public bug #2341.
1045 */
1046 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1047#endif
1048}
1049#ifdef VBOX
1050
1051/* check if VME interrupt redirection is enabled in TSS */
1052DECLINLINE(bool) is_vme_irq_redirected(int intno)
1053{
1054 unsigned int io_offset, intredir_offset;
1055 unsigned char val, mask;
1056
1057 /* TSS must be a valid 32 bit one */
1058 if (!(env->tr.flags & DESC_P_MASK) ||
1059 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1060 env->tr.limit < 103)
1061 goto fail;
1062 io_offset = lduw_kernel(env->tr.base + 0x66);
1063 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1064 if (io_offset < 0x68 + 0x20)
1065 io_offset = 0x68 + 0x20;
1066 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1067 intredir_offset = io_offset - 0x20;
1068
1069 intredir_offset += (intno >> 3);
1070 if ((intredir_offset) > env->tr.limit)
1071 goto fail;
1072
1073 val = ldub_kernel(env->tr.base + intredir_offset);
1074 mask = 1 << (unsigned char)(intno & 7);
1075
1076 /* bit set means no redirection. */
1077 if ((val & mask) != 0) {
1078 return false;
1079 }
1080 return true;
1081
1082fail:
1083 raise_exception_err(EXCP0D_GPF, 0);
1084 return true;
1085}
1086
1087/* V86 mode software interrupt with CR4.VME=1 */
1088static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1089{
1090 target_ulong ptr, ssp;
1091 int selector;
1092 uint32_t offset, esp;
1093 uint32_t old_cs, old_eflags;
1094 uint32_t iopl;
1095
1096 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1097
1098 if (!is_vme_irq_redirected(intno))
1099 {
1100 if (iopl == 3)
1101 {
1102 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1103 return;
1104 }
1105 else
1106 raise_exception_err(EXCP0D_GPF, 0);
1107 }
1108
1109 /* virtual mode idt is at linear address 0 */
1110 ptr = 0 + intno * 4;
1111 offset = lduw_kernel(ptr);
1112 selector = lduw_kernel(ptr + 2);
1113 esp = ESP;
1114 ssp = env->segs[R_SS].base;
1115 old_cs = env->segs[R_CS].selector;
1116
1117 old_eflags = compute_eflags();
1118 if (iopl < 3)
1119 {
1120 /* copy VIF into IF and set IOPL to 3 */
1121 if (env->eflags & VIF_MASK)
1122 old_eflags |= IF_MASK;
1123 else
1124 old_eflags &= ~IF_MASK;
1125
1126 old_eflags |= (3 << IOPL_SHIFT);
1127 }
1128
1129 /* XXX: use SS segment size ? */
1130 PUSHW(ssp, esp, 0xffff, old_eflags);
1131 PUSHW(ssp, esp, 0xffff, old_cs);
1132 PUSHW(ssp, esp, 0xffff, next_eip);
1133
1134 /* update processor state */
1135 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1136 env->eip = offset;
1137 env->segs[R_CS].selector = selector;
1138 env->segs[R_CS].base = (selector << 4);
1139 env->eflags &= ~(TF_MASK | RF_MASK);
1140
1141 if (iopl < 3)
1142 env->eflags &= ~VIF_MASK;
1143 else
1144 env->eflags &= ~IF_MASK;
1145}
1146#endif /* VBOX */
1147
1148#ifdef TARGET_X86_64
1149
1150#define PUSHQ(sp, val)\
1151{\
1152 sp -= 8;\
1153 stq_kernel(sp, (val));\
1154}
1155
1156#define POPQ(sp, val)\
1157{\
1158 val = ldq_kernel(sp);\
1159 sp += 8;\
1160}
1161
1162#ifndef VBOX
1163static inline target_ulong get_rsp_from_tss(int level)
1164#else /* VBOX */
1165DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1166#endif /* VBOX */
1167{
1168 int index;
1169
1170#if 0
1171 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1172 env->tr.base, env->tr.limit);
1173#endif
1174
1175 if (!(env->tr.flags & DESC_P_MASK))
1176 cpu_abort(env, "invalid tss");
1177 index = 8 * level + 4;
1178 if ((index + 7) > env->tr.limit)
1179 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1180 return ldq_kernel(env->tr.base + index);
1181}
1182
1183/* 64 bit interrupt */
1184static void do_interrupt64(int intno, int is_int, int error_code,
1185 target_ulong next_eip, int is_hw)
1186{
1187 SegmentCache *dt;
1188 target_ulong ptr;
1189 int type, dpl, selector, cpl, ist;
1190 int has_error_code, new_stack;
1191 uint32_t e1, e2, e3, ss;
1192 target_ulong old_eip, esp, offset;
1193
1194#ifdef VBOX
1195 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1196 cpu_loop_exit();
1197#endif
1198
1199 has_error_code = 0;
1200 if (!is_int && !is_hw) {
1201 switch(intno) {
1202 case 8:
1203 case 10:
1204 case 11:
1205 case 12:
1206 case 13:
1207 case 14:
1208 case 17:
1209 has_error_code = 1;
1210 break;
1211 }
1212 }
1213 if (is_int)
1214 old_eip = next_eip;
1215 else
1216 old_eip = env->eip;
1217
1218 dt = &env->idt;
1219 if (intno * 16 + 15 > dt->limit)
1220 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1221 ptr = dt->base + intno * 16;
1222 e1 = ldl_kernel(ptr);
1223 e2 = ldl_kernel(ptr + 4);
1224 e3 = ldl_kernel(ptr + 8);
1225 /* check gate type */
1226 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1227 switch(type) {
1228 case 14: /* 386 interrupt gate */
1229 case 15: /* 386 trap gate */
1230 break;
1231 default:
1232 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1233 break;
1234 }
1235 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1236 cpl = env->hflags & HF_CPL_MASK;
1237 /* check privilege if software int */
1238 if (is_int && dpl < cpl)
1239 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1240 /* check valid bit */
1241 if (!(e2 & DESC_P_MASK))
1242 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1243 selector = e1 >> 16;
1244 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1245 ist = e2 & 7;
1246 if ((selector & 0xfffc) == 0)
1247 raise_exception_err(EXCP0D_GPF, 0);
1248
1249 if (load_segment(&e1, &e2, selector) != 0)
1250 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1251 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1252 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1253 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1254 if (dpl > cpl)
1255 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1256 if (!(e2 & DESC_P_MASK))
1257 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1258 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1259 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1260 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1261 /* to inner privilege */
1262 if (ist != 0)
1263 esp = get_rsp_from_tss(ist + 3);
1264 else
1265 esp = get_rsp_from_tss(dpl);
1266 esp &= ~0xfLL; /* align stack */
1267 ss = 0;
1268 new_stack = 1;
1269 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1270 /* to same privilege */
1271 if (env->eflags & VM_MASK)
1272 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1273 new_stack = 0;
1274 if (ist != 0)
1275 esp = get_rsp_from_tss(ist + 3);
1276 else
1277 esp = ESP;
1278 esp &= ~0xfLL; /* align stack */
1279 dpl = cpl;
1280 } else {
1281 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1282 new_stack = 0; /* avoid warning */
1283 esp = 0; /* avoid warning */
1284 }
1285
1286 PUSHQ(esp, env->segs[R_SS].selector);
1287 PUSHQ(esp, ESP);
1288 PUSHQ(esp, compute_eflags());
1289 PUSHQ(esp, env->segs[R_CS].selector);
1290 PUSHQ(esp, old_eip);
1291 if (has_error_code) {
1292 PUSHQ(esp, error_code);
1293 }
1294
1295 if (new_stack) {
1296 ss = 0 | dpl;
1297 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1298 }
1299 ESP = esp;
1300
1301 selector = (selector & ~3) | dpl;
1302 cpu_x86_load_seg_cache(env, R_CS, selector,
1303 get_seg_base(e1, e2),
1304 get_seg_limit(e1, e2),
1305 e2);
1306 cpu_x86_set_cpl(env, dpl);
1307 env->eip = offset;
1308
1309 /* interrupt gate clear IF mask */
1310 if ((type & 1) == 0) {
1311 env->eflags &= ~IF_MASK;
1312 }
1313
1314#ifndef VBOX
1315 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1316#else
1317 /*
1318 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1319 * gets confused by seeingingly changed EFLAGS. See #3491 and
1320 * public bug #2341.
1321 */
1322 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1323#endif
1324}
1325#endif
1326
1327#if defined(CONFIG_USER_ONLY)
1328void helper_syscall(int next_eip_addend)
1329{
1330 env->exception_index = EXCP_SYSCALL;
1331 env->exception_next_eip = env->eip + next_eip_addend;
1332 cpu_loop_exit();
1333}
1334#else
1335void helper_syscall(int next_eip_addend)
1336{
1337 int selector;
1338
1339 if (!(env->efer & MSR_EFER_SCE)) {
1340 raise_exception_err(EXCP06_ILLOP, 0);
1341 }
1342 selector = (env->star >> 32) & 0xffff;
1343#ifdef TARGET_X86_64
1344 if (env->hflags & HF_LMA_MASK) {
1345 int code64;
1346
1347 ECX = env->eip + next_eip_addend;
1348 env->regs[11] = compute_eflags();
1349
1350 code64 = env->hflags & HF_CS64_MASK;
1351
1352 cpu_x86_set_cpl(env, 0);
1353 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1354 0, 0xffffffff,
1355 DESC_G_MASK | DESC_P_MASK |
1356 DESC_S_MASK |
1357 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1358 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1359 0, 0xffffffff,
1360 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1361 DESC_S_MASK |
1362 DESC_W_MASK | DESC_A_MASK);
1363 env->eflags &= ~env->fmask;
1364 load_eflags(env->eflags, 0);
1365 if (code64)
1366 env->eip = env->lstar;
1367 else
1368 env->eip = env->cstar;
1369 } else
1370#endif
1371 {
1372 ECX = (uint32_t)(env->eip + next_eip_addend);
1373
1374 cpu_x86_set_cpl(env, 0);
1375 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1376 0, 0xffffffff,
1377 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1378 DESC_S_MASK |
1379 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1380 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1381 0, 0xffffffff,
1382 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1383 DESC_S_MASK |
1384 DESC_W_MASK | DESC_A_MASK);
1385 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1386 env->eip = (uint32_t)env->star;
1387 }
1388}
1389#endif
1390
1391void helper_sysret(int dflag)
1392{
1393 int cpl, selector;
1394
1395 if (!(env->efer & MSR_EFER_SCE)) {
1396 raise_exception_err(EXCP06_ILLOP, 0);
1397 }
1398 cpl = env->hflags & HF_CPL_MASK;
1399 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1400 raise_exception_err(EXCP0D_GPF, 0);
1401 }
1402 selector = (env->star >> 48) & 0xffff;
1403#ifdef TARGET_X86_64
1404 if (env->hflags & HF_LMA_MASK) {
1405 if (dflag == 2) {
1406 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1407 0, 0xffffffff,
1408 DESC_G_MASK | DESC_P_MASK |
1409 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1410 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1411 DESC_L_MASK);
1412 env->eip = ECX;
1413 } else {
1414 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1415 0, 0xffffffff,
1416 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1417 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1418 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1419 env->eip = (uint32_t)ECX;
1420 }
1421 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1422 0, 0xffffffff,
1423 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1424 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1425 DESC_W_MASK | DESC_A_MASK);
1426 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1427 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1428 cpu_x86_set_cpl(env, 3);
1429 } else
1430#endif
1431 {
1432 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1433 0, 0xffffffff,
1434 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1435 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1436 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1437 env->eip = (uint32_t)ECX;
1438 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1439 0, 0xffffffff,
1440 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1441 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1442 DESC_W_MASK | DESC_A_MASK);
1443 env->eflags |= IF_MASK;
1444 cpu_x86_set_cpl(env, 3);
1445 }
1446#ifdef USE_KQEMU
1447 if (kqemu_is_ok(env)) {
1448 if (env->hflags & HF_LMA_MASK)
1449 CC_OP = CC_OP_EFLAGS;
1450 env->exception_index = -1;
1451 cpu_loop_exit();
1452 }
1453#endif
1454}
1455
1456#ifdef VBOX
1457/**
1458 * Checks and processes external VMM events.
1459 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1460 */
1461void helper_external_event(void)
1462{
1463#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1464 uintptr_t uSP;
1465# ifdef RT_ARCH_AMD64
1466 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1467# else
1468 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1469# endif
1470 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1471#endif
1472 /* Keep in sync with flags checked by gen_check_external_event() */
1473 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1474 {
1475 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1476 ~CPU_INTERRUPT_EXTERNAL_HARD);
1477 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1478 }
1479 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1480 {
1481 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1482 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1483 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1484 }
1485 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1486 {
1487 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1488 ~CPU_INTERRUPT_EXTERNAL_DMA);
1489 remR3DmaRun(env);
1490 }
1491 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1492 {
1493 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1494 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1495 remR3TimersRun(env);
1496 }
1497}
1498/* helper for recording call instruction addresses for later scanning */
1499void helper_record_call()
1500{
1501 if ( !(env->state & CPU_RAW_RING0)
1502 && (env->cr[0] & CR0_PG_MASK)
1503 && !(env->eflags & X86_EFL_IF))
1504 remR3RecordCall(env);
1505}
1506#endif /* VBOX */
1507
1508/* real mode interrupt */
1509static void do_interrupt_real(int intno, int is_int, int error_code,
1510 unsigned int next_eip)
1511{
1512 SegmentCache *dt;
1513 target_ulong ptr, ssp;
1514 int selector;
1515 uint32_t offset, esp;
1516 uint32_t old_cs, old_eip;
1517
1518 /* real mode (simpler !) */
1519 dt = &env->idt;
1520#ifndef VBOX
1521 if (intno * 4 + 3 > dt->limit)
1522#else
1523 if ((unsigned)intno * 4 + 3 > dt->limit)
1524#endif
1525 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1526 ptr = dt->base + intno * 4;
1527 offset = lduw_kernel(ptr);
1528 selector = lduw_kernel(ptr + 2);
1529 esp = ESP;
1530 ssp = env->segs[R_SS].base;
1531 if (is_int)
1532 old_eip = next_eip;
1533 else
1534 old_eip = env->eip;
1535 old_cs = env->segs[R_CS].selector;
1536 /* XXX: use SS segment size ? */
1537 PUSHW(ssp, esp, 0xffff, compute_eflags());
1538 PUSHW(ssp, esp, 0xffff, old_cs);
1539 PUSHW(ssp, esp, 0xffff, old_eip);
1540
1541 /* update processor state */
1542 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1543 env->eip = offset;
1544 env->segs[R_CS].selector = selector;
1545 env->segs[R_CS].base = (selector << 4);
1546 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1547}
1548
1549/* fake user mode interrupt */
1550void do_interrupt_user(int intno, int is_int, int error_code,
1551 target_ulong next_eip)
1552{
1553 SegmentCache *dt;
1554 target_ulong ptr;
1555 int dpl, cpl, shift;
1556 uint32_t e2;
1557
1558 dt = &env->idt;
1559 if (env->hflags & HF_LMA_MASK) {
1560 shift = 4;
1561 } else {
1562 shift = 3;
1563 }
1564 ptr = dt->base + (intno << shift);
1565 e2 = ldl_kernel(ptr + 4);
1566
1567 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1568 cpl = env->hflags & HF_CPL_MASK;
1569 /* check privilege if software int */
1570 if (is_int && dpl < cpl)
1571 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1572
1573 /* Since we emulate only user space, we cannot do more than
1574 exiting the emulation with the suitable exception and error
1575 code */
1576 if (is_int)
1577 EIP = next_eip;
1578}
1579
1580/*
1581 * Begin execution of an interruption. is_int is TRUE if coming from
1582 * the int instruction. next_eip is the EIP value AFTER the interrupt
1583 * instruction. It is only relevant if is_int is TRUE.
1584 */
1585void do_interrupt(int intno, int is_int, int error_code,
1586 target_ulong next_eip, int is_hw)
1587{
1588 if (loglevel & CPU_LOG_INT) {
1589 if ((env->cr[0] & CR0_PE_MASK)) {
1590 static int count;
1591 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1592 count, intno, error_code, is_int,
1593 env->hflags & HF_CPL_MASK,
1594 env->segs[R_CS].selector, EIP,
1595 (int)env->segs[R_CS].base + EIP,
1596 env->segs[R_SS].selector, ESP);
1597 if (intno == 0x0e) {
1598 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1599 } else {
1600 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1601 }
1602 fprintf(logfile, "\n");
1603 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1604#if 0
1605 {
1606 int i;
1607 uint8_t *ptr;
1608 fprintf(logfile, " code=");
1609 ptr = env->segs[R_CS].base + env->eip;
1610 for(i = 0; i < 16; i++) {
1611 fprintf(logfile, " %02x", ldub(ptr + i));
1612 }
1613 fprintf(logfile, "\n");
1614 }
1615#endif
1616 count++;
1617 }
1618 }
1619 if (env->cr[0] & CR0_PE_MASK) {
1620#ifdef TARGET_X86_64
1621 if (env->hflags & HF_LMA_MASK) {
1622 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1623 } else
1624#endif
1625 {
1626#ifdef VBOX
1627 /* int xx *, v86 code and VME enabled? */
1628 if ( (env->eflags & VM_MASK)
1629 && (env->cr[4] & CR4_VME_MASK)
1630 && is_int
1631 && !is_hw
1632 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1633 )
1634 do_soft_interrupt_vme(intno, error_code, next_eip);
1635 else
1636#endif /* VBOX */
1637 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1638 }
1639 } else {
1640 do_interrupt_real(intno, is_int, error_code, next_eip);
1641 }
1642}
1643
1644/*
1645 * Check nested exceptions and change to double or triple fault if
1646 * needed. It should only be called, if this is not an interrupt.
1647 * Returns the new exception number.
1648 */
1649static int check_exception(int intno, int *error_code)
1650{
1651 int first_contributory = env->old_exception == 0 ||
1652 (env->old_exception >= 10 &&
1653 env->old_exception <= 13);
1654 int second_contributory = intno == 0 ||
1655 (intno >= 10 && intno <= 13);
1656
1657 if (loglevel & CPU_LOG_INT)
1658 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1659 env->old_exception, intno);
1660
1661 if (env->old_exception == EXCP08_DBLE)
1662 cpu_abort(env, "triple fault");
1663
1664 if ((first_contributory && second_contributory)
1665 || (env->old_exception == EXCP0E_PAGE &&
1666 (second_contributory || (intno == EXCP0E_PAGE)))) {
1667 intno = EXCP08_DBLE;
1668 *error_code = 0;
1669 }
1670
1671 if (second_contributory || (intno == EXCP0E_PAGE) ||
1672 (intno == EXCP08_DBLE))
1673 env->old_exception = intno;
1674
1675 return intno;
1676}
1677
1678/*
1679 * Signal an interruption. It is executed in the main CPU loop.
1680 * is_int is TRUE if coming from the int instruction. next_eip is the
1681 * EIP value AFTER the interrupt instruction. It is only relevant if
1682 * is_int is TRUE.
1683 */
1684void raise_interrupt(int intno, int is_int, int error_code,
1685 int next_eip_addend)
1686{
1687#if defined(VBOX) && defined(DEBUG)
1688 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend));
1689#endif
1690 if (!is_int) {
1691 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1692 intno = check_exception(intno, &error_code);
1693 } else {
1694 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1695 }
1696
1697 env->exception_index = intno;
1698 env->error_code = error_code;
1699 env->exception_is_int = is_int;
1700 env->exception_next_eip = env->eip + next_eip_addend;
1701 cpu_loop_exit();
1702}
1703
1704/* shortcuts to generate exceptions */
1705
1706void (raise_exception_err)(int exception_index, int error_code)
1707{
1708 raise_interrupt(exception_index, 0, error_code, 0);
1709}
1710
1711void raise_exception(int exception_index)
1712{
1713 raise_interrupt(exception_index, 0, 0, 0);
1714}
1715
1716/* SMM support */
1717
1718#if defined(CONFIG_USER_ONLY)
1719
1720void do_smm_enter(void)
1721{
1722}
1723
1724void helper_rsm(void)
1725{
1726}
1727
1728#else
1729
1730#ifdef TARGET_X86_64
1731#define SMM_REVISION_ID 0x00020064
1732#else
1733#define SMM_REVISION_ID 0x00020000
1734#endif
1735
1736void do_smm_enter(void)
1737{
1738 target_ulong sm_state;
1739 SegmentCache *dt;
1740 int i, offset;
1741
1742 if (loglevel & CPU_LOG_INT) {
1743 fprintf(logfile, "SMM: enter\n");
1744 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1745 }
1746
1747 env->hflags |= HF_SMM_MASK;
1748 cpu_smm_update(env);
1749
1750 sm_state = env->smbase + 0x8000;
1751
1752#ifdef TARGET_X86_64
1753 for(i = 0; i < 6; i++) {
1754 dt = &env->segs[i];
1755 offset = 0x7e00 + i * 16;
1756 stw_phys(sm_state + offset, dt->selector);
1757 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1758 stl_phys(sm_state + offset + 4, dt->limit);
1759 stq_phys(sm_state + offset + 8, dt->base);
1760 }
1761
1762 stq_phys(sm_state + 0x7e68, env->gdt.base);
1763 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1764
1765 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1766 stq_phys(sm_state + 0x7e78, env->ldt.base);
1767 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1768 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1769
1770 stq_phys(sm_state + 0x7e88, env->idt.base);
1771 stl_phys(sm_state + 0x7e84, env->idt.limit);
1772
1773 stw_phys(sm_state + 0x7e90, env->tr.selector);
1774 stq_phys(sm_state + 0x7e98, env->tr.base);
1775 stl_phys(sm_state + 0x7e94, env->tr.limit);
1776 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1777
1778 stq_phys(sm_state + 0x7ed0, env->efer);
1779
1780 stq_phys(sm_state + 0x7ff8, EAX);
1781 stq_phys(sm_state + 0x7ff0, ECX);
1782 stq_phys(sm_state + 0x7fe8, EDX);
1783 stq_phys(sm_state + 0x7fe0, EBX);
1784 stq_phys(sm_state + 0x7fd8, ESP);
1785 stq_phys(sm_state + 0x7fd0, EBP);
1786 stq_phys(sm_state + 0x7fc8, ESI);
1787 stq_phys(sm_state + 0x7fc0, EDI);
1788 for(i = 8; i < 16; i++)
1789 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1790 stq_phys(sm_state + 0x7f78, env->eip);
1791 stl_phys(sm_state + 0x7f70, compute_eflags());
1792 stl_phys(sm_state + 0x7f68, env->dr[6]);
1793 stl_phys(sm_state + 0x7f60, env->dr[7]);
1794
1795 stl_phys(sm_state + 0x7f48, env->cr[4]);
1796 stl_phys(sm_state + 0x7f50, env->cr[3]);
1797 stl_phys(sm_state + 0x7f58, env->cr[0]);
1798
1799 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1800 stl_phys(sm_state + 0x7f00, env->smbase);
1801#else
1802 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1803 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1804 stl_phys(sm_state + 0x7ff4, compute_eflags());
1805 stl_phys(sm_state + 0x7ff0, env->eip);
1806 stl_phys(sm_state + 0x7fec, EDI);
1807 stl_phys(sm_state + 0x7fe8, ESI);
1808 stl_phys(sm_state + 0x7fe4, EBP);
1809 stl_phys(sm_state + 0x7fe0, ESP);
1810 stl_phys(sm_state + 0x7fdc, EBX);
1811 stl_phys(sm_state + 0x7fd8, EDX);
1812 stl_phys(sm_state + 0x7fd4, ECX);
1813 stl_phys(sm_state + 0x7fd0, EAX);
1814 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1815 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1816
1817 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1818 stl_phys(sm_state + 0x7f64, env->tr.base);
1819 stl_phys(sm_state + 0x7f60, env->tr.limit);
1820 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1821
1822 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1823 stl_phys(sm_state + 0x7f80, env->ldt.base);
1824 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1825 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1826
1827 stl_phys(sm_state + 0x7f74, env->gdt.base);
1828 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1829
1830 stl_phys(sm_state + 0x7f58, env->idt.base);
1831 stl_phys(sm_state + 0x7f54, env->idt.limit);
1832
1833 for(i = 0; i < 6; i++) {
1834 dt = &env->segs[i];
1835 if (i < 3)
1836 offset = 0x7f84 + i * 12;
1837 else
1838 offset = 0x7f2c + (i - 3) * 12;
1839 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1840 stl_phys(sm_state + offset + 8, dt->base);
1841 stl_phys(sm_state + offset + 4, dt->limit);
1842 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1843 }
1844 stl_phys(sm_state + 0x7f14, env->cr[4]);
1845
1846 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1847 stl_phys(sm_state + 0x7ef8, env->smbase);
1848#endif
1849 /* init SMM cpu state */
1850
1851#ifdef TARGET_X86_64
1852 cpu_load_efer(env, 0);
1853#endif
1854 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1855 env->eip = 0x00008000;
1856 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1857 0xffffffff, 0);
1858 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1859 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1860 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1861 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1862 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1863
1864 cpu_x86_update_cr0(env,
1865 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1866 cpu_x86_update_cr4(env, 0);
1867 env->dr[7] = 0x00000400;
1868 CC_OP = CC_OP_EFLAGS;
1869}
1870
1871void helper_rsm(void)
1872{
1873#ifdef VBOX
1874 cpu_abort(env, "helper_rsm");
1875#else /* !VBOX */
1876 target_ulong sm_
1877
1878 target_ulong sm_state;
1879 int i, offset;
1880 uint32_t val;
1881
1882 sm_state = env->smbase + 0x8000;
1883#ifdef TARGET_X86_64
1884 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1885
1886 for(i = 0; i < 6; i++) {
1887 offset = 0x7e00 + i * 16;
1888 cpu_x86_load_seg_cache(env, i,
1889 lduw_phys(sm_state + offset),
1890 ldq_phys(sm_state + offset + 8),
1891 ldl_phys(sm_state + offset + 4),
1892 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1893 }
1894
1895 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1896 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1897
1898 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1899 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1900 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1901 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1902
1903 env->idt.base = ldq_phys(sm_state + 0x7e88);
1904 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1905
1906 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1907 env->tr.base = ldq_phys(sm_state + 0x7e98);
1908 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1909 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1910
1911 EAX = ldq_phys(sm_state + 0x7ff8);
1912 ECX = ldq_phys(sm_state + 0x7ff0);
1913 EDX = ldq_phys(sm_state + 0x7fe8);
1914 EBX = ldq_phys(sm_state + 0x7fe0);
1915 ESP = ldq_phys(sm_state + 0x7fd8);
1916 EBP = ldq_phys(sm_state + 0x7fd0);
1917 ESI = ldq_phys(sm_state + 0x7fc8);
1918 EDI = ldq_phys(sm_state + 0x7fc0);
1919 for(i = 8; i < 16; i++)
1920 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1921 env->eip = ldq_phys(sm_state + 0x7f78);
1922 load_eflags(ldl_phys(sm_state + 0x7f70),
1923 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1924 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1925 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1926
1927 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1928 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1929 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1930
1931 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1932 if (val & 0x20000) {
1933 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1934 }
1935#else
1936 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1937 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1938 load_eflags(ldl_phys(sm_state + 0x7ff4),
1939 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1940 env->eip = ldl_phys(sm_state + 0x7ff0);
1941 EDI = ldl_phys(sm_state + 0x7fec);
1942 ESI = ldl_phys(sm_state + 0x7fe8);
1943 EBP = ldl_phys(sm_state + 0x7fe4);
1944 ESP = ldl_phys(sm_state + 0x7fe0);
1945 EBX = ldl_phys(sm_state + 0x7fdc);
1946 EDX = ldl_phys(sm_state + 0x7fd8);
1947 ECX = ldl_phys(sm_state + 0x7fd4);
1948 EAX = ldl_phys(sm_state + 0x7fd0);
1949 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1950 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1951
1952 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1953 env->tr.base = ldl_phys(sm_state + 0x7f64);
1954 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1955 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1956
1957 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1958 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1959 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1960 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1961
1962 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1963 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1964
1965 env->idt.base = ldl_phys(sm_state + 0x7f58);
1966 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1967
1968 for(i = 0; i < 6; i++) {
1969 if (i < 3)
1970 offset = 0x7f84 + i * 12;
1971 else
1972 offset = 0x7f2c + (i - 3) * 12;
1973 cpu_x86_load_seg_cache(env, i,
1974 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1975 ldl_phys(sm_state + offset + 8),
1976 ldl_phys(sm_state + offset + 4),
1977 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1978 }
1979 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1980
1981 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1982 if (val & 0x20000) {
1983 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1984 }
1985#endif
1986 CC_OP = CC_OP_EFLAGS;
1987 env->hflags &= ~HF_SMM_MASK;
1988 cpu_smm_update(env);
1989
1990 if (loglevel & CPU_LOG_INT) {
1991 fprintf(logfile, "SMM: after RSM\n");
1992 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1993 }
1994#endif /* !VBOX */
1995}
1996
1997#endif /* !CONFIG_USER_ONLY */
1998
1999
2000/* division, flags are undefined */
2001
2002void helper_divb_AL(target_ulong t0)
2003{
2004 unsigned int num, den, q, r;
2005
2006 num = (EAX & 0xffff);
2007 den = (t0 & 0xff);
2008 if (den == 0) {
2009 raise_exception(EXCP00_DIVZ);
2010 }
2011 q = (num / den);
2012 if (q > 0xff)
2013 raise_exception(EXCP00_DIVZ);
2014 q &= 0xff;
2015 r = (num % den) & 0xff;
2016 EAX = (EAX & ~0xffff) | (r << 8) | q;
2017}
2018
2019void helper_idivb_AL(target_ulong t0)
2020{
2021 int num, den, q, r;
2022
2023 num = (int16_t)EAX;
2024 den = (int8_t)t0;
2025 if (den == 0) {
2026 raise_exception(EXCP00_DIVZ);
2027 }
2028 q = (num / den);
2029 if (q != (int8_t)q)
2030 raise_exception(EXCP00_DIVZ);
2031 q &= 0xff;
2032 r = (num % den) & 0xff;
2033 EAX = (EAX & ~0xffff) | (r << 8) | q;
2034}
2035
2036void helper_divw_AX(target_ulong t0)
2037{
2038 unsigned int num, den, q, r;
2039
2040 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2041 den = (t0 & 0xffff);
2042 if (den == 0) {
2043 raise_exception(EXCP00_DIVZ);
2044 }
2045 q = (num / den);
2046 if (q > 0xffff)
2047 raise_exception(EXCP00_DIVZ);
2048 q &= 0xffff;
2049 r = (num % den) & 0xffff;
2050 EAX = (EAX & ~0xffff) | q;
2051 EDX = (EDX & ~0xffff) | r;
2052}
2053
2054void helper_idivw_AX(target_ulong t0)
2055{
2056 int num, den, q, r;
2057
2058 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2059 den = (int16_t)t0;
2060 if (den == 0) {
2061 raise_exception(EXCP00_DIVZ);
2062 }
2063 q = (num / den);
2064 if (q != (int16_t)q)
2065 raise_exception(EXCP00_DIVZ);
2066 q &= 0xffff;
2067 r = (num % den) & 0xffff;
2068 EAX = (EAX & ~0xffff) | q;
2069 EDX = (EDX & ~0xffff) | r;
2070}
2071
2072void helper_divl_EAX(target_ulong t0)
2073{
2074 unsigned int den, r;
2075 uint64_t num, q;
2076
2077 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2078 den = t0;
2079 if (den == 0) {
2080 raise_exception(EXCP00_DIVZ);
2081 }
2082 q = (num / den);
2083 r = (num % den);
2084 if (q > 0xffffffff)
2085 raise_exception(EXCP00_DIVZ);
2086 EAX = (uint32_t)q;
2087 EDX = (uint32_t)r;
2088}
2089
2090void helper_idivl_EAX(target_ulong t0)
2091{
2092 int den, r;
2093 int64_t num, q;
2094
2095 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2096 den = t0;
2097 if (den == 0) {
2098 raise_exception(EXCP00_DIVZ);
2099 }
2100 q = (num / den);
2101 r = (num % den);
2102 if (q != (int32_t)q)
2103 raise_exception(EXCP00_DIVZ);
2104 EAX = (uint32_t)q;
2105 EDX = (uint32_t)r;
2106}
2107
2108/* bcd */
2109
2110/* XXX: exception */
2111void helper_aam(int base)
2112{
2113 int al, ah;
2114 al = EAX & 0xff;
2115 ah = al / base;
2116 al = al % base;
2117 EAX = (EAX & ~0xffff) | al | (ah << 8);
2118 CC_DST = al;
2119}
2120
2121void helper_aad(int base)
2122{
2123 int al, ah;
2124 al = EAX & 0xff;
2125 ah = (EAX >> 8) & 0xff;
2126 al = ((ah * base) + al) & 0xff;
2127 EAX = (EAX & ~0xffff) | al;
2128 CC_DST = al;
2129}
2130
2131void helper_aaa(void)
2132{
2133 int icarry;
2134 int al, ah, af;
2135 int eflags;
2136
2137 eflags = cc_table[CC_OP].compute_all();
2138 af = eflags & CC_A;
2139 al = EAX & 0xff;
2140 ah = (EAX >> 8) & 0xff;
2141
2142 icarry = (al > 0xf9);
2143 if (((al & 0x0f) > 9 ) || af) {
2144 al = (al + 6) & 0x0f;
2145 ah = (ah + 1 + icarry) & 0xff;
2146 eflags |= CC_C | CC_A;
2147 } else {
2148 eflags &= ~(CC_C | CC_A);
2149 al &= 0x0f;
2150 }
2151 EAX = (EAX & ~0xffff) | al | (ah << 8);
2152 CC_SRC = eflags;
2153 FORCE_RET();
2154}
2155
2156void helper_aas(void)
2157{
2158 int icarry;
2159 int al, ah, af;
2160 int eflags;
2161
2162 eflags = cc_table[CC_OP].compute_all();
2163 af = eflags & CC_A;
2164 al = EAX & 0xff;
2165 ah = (EAX >> 8) & 0xff;
2166
2167 icarry = (al < 6);
2168 if (((al & 0x0f) > 9 ) || af) {
2169 al = (al - 6) & 0x0f;
2170 ah = (ah - 1 - icarry) & 0xff;
2171 eflags |= CC_C | CC_A;
2172 } else {
2173 eflags &= ~(CC_C | CC_A);
2174 al &= 0x0f;
2175 }
2176 EAX = (EAX & ~0xffff) | al | (ah << 8);
2177 CC_SRC = eflags;
2178 FORCE_RET();
2179}
2180
2181void helper_daa(void)
2182{
2183 int al, af, cf;
2184 int eflags;
2185
2186 eflags = cc_table[CC_OP].compute_all();
2187 cf = eflags & CC_C;
2188 af = eflags & CC_A;
2189 al = EAX & 0xff;
2190
2191 eflags = 0;
2192 if (((al & 0x0f) > 9 ) || af) {
2193 al = (al + 6) & 0xff;
2194 eflags |= CC_A;
2195 }
2196 if ((al > 0x9f) || cf) {
2197 al = (al + 0x60) & 0xff;
2198 eflags |= CC_C;
2199 }
2200 EAX = (EAX & ~0xff) | al;
2201 /* well, speed is not an issue here, so we compute the flags by hand */
2202 eflags |= (al == 0) << 6; /* zf */
2203 eflags |= parity_table[al]; /* pf */
2204 eflags |= (al & 0x80); /* sf */
2205 CC_SRC = eflags;
2206 FORCE_RET();
2207}
2208
2209void helper_das(void)
2210{
2211 int al, al1, af, cf;
2212 int eflags;
2213
2214 eflags = cc_table[CC_OP].compute_all();
2215 cf = eflags & CC_C;
2216 af = eflags & CC_A;
2217 al = EAX & 0xff;
2218
2219 eflags = 0;
2220 al1 = al;
2221 if (((al & 0x0f) > 9 ) || af) {
2222 eflags |= CC_A;
2223 if (al < 6 || cf)
2224 eflags |= CC_C;
2225 al = (al - 6) & 0xff;
2226 }
2227 if ((al1 > 0x99) || cf) {
2228 al = (al - 0x60) & 0xff;
2229 eflags |= CC_C;
2230 }
2231 EAX = (EAX & ~0xff) | al;
2232 /* well, speed is not an issue here, so we compute the flags by hand */
2233 eflags |= (al == 0) << 6; /* zf */
2234 eflags |= parity_table[al]; /* pf */
2235 eflags |= (al & 0x80); /* sf */
2236 CC_SRC = eflags;
2237 FORCE_RET();
2238}
2239
2240void helper_into(int next_eip_addend)
2241{
2242 int eflags;
2243 eflags = cc_table[CC_OP].compute_all();
2244 if (eflags & CC_O) {
2245 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2246 }
2247}
2248
2249void helper_cmpxchg8b(target_ulong a0)
2250{
2251 uint64_t d;
2252 int eflags;
2253
2254 eflags = cc_table[CC_OP].compute_all();
2255 d = ldq(a0);
2256 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2257 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2258 eflags |= CC_Z;
2259 } else {
2260 /* always do the store */
2261 stq(a0, d);
2262 EDX = (uint32_t)(d >> 32);
2263 EAX = (uint32_t)d;
2264 eflags &= ~CC_Z;
2265 }
2266 CC_SRC = eflags;
2267}
2268
2269#ifdef TARGET_X86_64
2270void helper_cmpxchg16b(target_ulong a0)
2271{
2272 uint64_t d0, d1;
2273 int eflags;
2274
2275 if ((a0 & 0xf) != 0)
2276 raise_exception(EXCP0D_GPF);
2277 eflags = cc_table[CC_OP].compute_all();
2278 d0 = ldq(a0);
2279 d1 = ldq(a0 + 8);
2280 if (d0 == EAX && d1 == EDX) {
2281 stq(a0, EBX);
2282 stq(a0 + 8, ECX);
2283 eflags |= CC_Z;
2284 } else {
2285 /* always do the store */
2286 stq(a0, d0);
2287 stq(a0 + 8, d1);
2288 EDX = d1;
2289 EAX = d0;
2290 eflags &= ~CC_Z;
2291 }
2292 CC_SRC = eflags;
2293}
2294#endif
2295
2296void helper_single_step(void)
2297{
2298 env->dr[6] |= 0x4000;
2299 raise_exception(EXCP01_SSTP);
2300}
2301
2302void helper_cpuid(void)
2303{
2304#ifndef VBOX
2305 uint32_t index;
2306
2307 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2308
2309 index = (uint32_t)EAX;
2310 /* test if maximum index reached */
2311 if (index & 0x80000000) {
2312 if (index > env->cpuid_xlevel)
2313 index = env->cpuid_level;
2314 } else {
2315 if (index > env->cpuid_level)
2316 index = env->cpuid_level;
2317 }
2318
2319 switch(index) {
2320 case 0:
2321 EAX = env->cpuid_level;
2322 EBX = env->cpuid_vendor1;
2323 EDX = env->cpuid_vendor2;
2324 ECX = env->cpuid_vendor3;
2325 break;
2326 case 1:
2327 EAX = env->cpuid_version;
2328 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2329 ECX = env->cpuid_ext_features;
2330 EDX = env->cpuid_features;
2331 break;
2332 case 2:
2333 /* cache info: needed for Pentium Pro compatibility */
2334 EAX = 1;
2335 EBX = 0;
2336 ECX = 0;
2337 EDX = 0x2c307d;
2338 break;
2339 case 4:
2340 /* cache info: needed for Core compatibility */
2341 switch (ECX) {
2342 case 0: /* L1 dcache info */
2343 EAX = 0x0000121;
2344 EBX = 0x1c0003f;
2345 ECX = 0x000003f;
2346 EDX = 0x0000001;
2347 break;
2348 case 1: /* L1 icache info */
2349 EAX = 0x0000122;
2350 EBX = 0x1c0003f;
2351 ECX = 0x000003f;
2352 EDX = 0x0000001;
2353 break;
2354 case 2: /* L2 cache info */
2355 EAX = 0x0000143;
2356 EBX = 0x3c0003f;
2357 ECX = 0x0000fff;
2358 EDX = 0x0000001;
2359 break;
2360 default: /* end of info */
2361 EAX = 0;
2362 EBX = 0;
2363 ECX = 0;
2364 EDX = 0;
2365 break;
2366 }
2367
2368 break;
2369 case 5:
2370 /* mwait info: needed for Core compatibility */
2371 EAX = 0; /* Smallest monitor-line size in bytes */
2372 EBX = 0; /* Largest monitor-line size in bytes */
2373 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2374 EDX = 0;
2375 break;
2376 case 6:
2377 /* Thermal and Power Leaf */
2378 EAX = 0;
2379 EBX = 0;
2380 ECX = 0;
2381 EDX = 0;
2382 break;
2383 case 9:
2384 /* Direct Cache Access Information Leaf */
2385 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2386 EBX = 0;
2387 ECX = 0;
2388 EDX = 0;
2389 break;
2390 case 0xA:
2391 /* Architectural Performance Monitoring Leaf */
2392 EAX = 0;
2393 EBX = 0;
2394 ECX = 0;
2395 EDX = 0;
2396 break;
2397 case 0x80000000:
2398 EAX = env->cpuid_xlevel;
2399 EBX = env->cpuid_vendor1;
2400 EDX = env->cpuid_vendor2;
2401 ECX = env->cpuid_vendor3;
2402 break;
2403 case 0x80000001:
2404 EAX = env->cpuid_features;
2405 EBX = 0;
2406 ECX = env->cpuid_ext3_features;
2407 EDX = env->cpuid_ext2_features;
2408 break;
2409 case 0x80000002:
2410 case 0x80000003:
2411 case 0x80000004:
2412 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2413 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2414 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2415 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2416 break;
2417 case 0x80000005:
2418 /* cache info (L1 cache) */
2419 EAX = 0x01ff01ff;
2420 EBX = 0x01ff01ff;
2421 ECX = 0x40020140;
2422 EDX = 0x40020140;
2423 break;
2424 case 0x80000006:
2425 /* cache info (L2 cache) */
2426 EAX = 0;
2427 EBX = 0x42004200;
2428 ECX = 0x02008140;
2429 EDX = 0;
2430 break;
2431 case 0x80000008:
2432 /* virtual & phys address size in low 2 bytes. */
2433/* XXX: This value must match the one used in the MMU code. */
2434 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2435 /* 64 bit processor */
2436#if defined(USE_KQEMU)
2437 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2438#else
2439/* XXX: The physical address space is limited to 42 bits in exec.c. */
2440 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2441#endif
2442 } else {
2443#if defined(USE_KQEMU)
2444 EAX = 0x00000020; /* 32 bits physical */
2445#else
2446 if (env->cpuid_features & CPUID_PSE36)
2447 EAX = 0x00000024; /* 36 bits physical */
2448 else
2449 EAX = 0x00000020; /* 32 bits physical */
2450#endif
2451 }
2452 EBX = 0;
2453 ECX = 0;
2454 EDX = 0;
2455 break;
2456 case 0x8000000A:
2457 EAX = 0x00000001;
2458 EBX = 0;
2459 ECX = 0;
2460 EDX = 0;
2461 break;
2462 default:
2463 /* reserved values: zero */
2464 EAX = 0;
2465 EBX = 0;
2466 ECX = 0;
2467 EDX = 0;
2468 break;
2469 }
2470#else /* VBOX */
2471 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2472#endif /* VBOX */
2473}
2474
2475void helper_enter_level(int level, int data32, target_ulong t1)
2476{
2477 target_ulong ssp;
2478 uint32_t esp_mask, esp, ebp;
2479
2480 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2481 ssp = env->segs[R_SS].base;
2482 ebp = EBP;
2483 esp = ESP;
2484 if (data32) {
2485 /* 32 bit */
2486 esp -= 4;
2487 while (--level) {
2488 esp -= 4;
2489 ebp -= 4;
2490 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2491 }
2492 esp -= 4;
2493 stl(ssp + (esp & esp_mask), t1);
2494 } else {
2495 /* 16 bit */
2496 esp -= 2;
2497 while (--level) {
2498 esp -= 2;
2499 ebp -= 2;
2500 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2501 }
2502 esp -= 2;
2503 stw(ssp + (esp & esp_mask), t1);
2504 }
2505}
2506
2507#ifdef TARGET_X86_64
2508void helper_enter64_level(int level, int data64, target_ulong t1)
2509{
2510 target_ulong esp, ebp;
2511 ebp = EBP;
2512 esp = ESP;
2513
2514 if (data64) {
2515 /* 64 bit */
2516 esp -= 8;
2517 while (--level) {
2518 esp -= 8;
2519 ebp -= 8;
2520 stq(esp, ldq(ebp));
2521 }
2522 esp -= 8;
2523 stq(esp, t1);
2524 } else {
2525 /* 16 bit */
2526 esp -= 2;
2527 while (--level) {
2528 esp -= 2;
2529 ebp -= 2;
2530 stw(esp, lduw(ebp));
2531 }
2532 esp -= 2;
2533 stw(esp, t1);
2534 }
2535}
2536#endif
2537
2538void helper_lldt(int selector)
2539{
2540 SegmentCache *dt;
2541 uint32_t e1, e2;
2542#ifndef VBOX
2543 int index, entry_limit;
2544#else
2545 unsigned int index, entry_limit;
2546#endif
2547 target_ulong ptr;
2548
2549#ifdef VBOX
2550 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2551 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2552#endif
2553
2554 selector &= 0xffff;
2555 if ((selector & 0xfffc) == 0) {
2556 /* XXX: NULL selector case: invalid LDT */
2557 env->ldt.base = 0;
2558 env->ldt.limit = 0;
2559 } else {
2560 if (selector & 0x4)
2561 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2562 dt = &env->gdt;
2563 index = selector & ~7;
2564#ifdef TARGET_X86_64
2565 if (env->hflags & HF_LMA_MASK)
2566 entry_limit = 15;
2567 else
2568#endif
2569 entry_limit = 7;
2570 if ((index + entry_limit) > dt->limit)
2571 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2572 ptr = dt->base + index;
2573 e1 = ldl_kernel(ptr);
2574 e2 = ldl_kernel(ptr + 4);
2575 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2576 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2577 if (!(e2 & DESC_P_MASK))
2578 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2579#ifdef TARGET_X86_64
2580 if (env->hflags & HF_LMA_MASK) {
2581 uint32_t e3;
2582 e3 = ldl_kernel(ptr + 8);
2583 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2584 env->ldt.base |= (target_ulong)e3 << 32;
2585 } else
2586#endif
2587 {
2588 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2589 }
2590 }
2591 env->ldt.selector = selector;
2592#ifdef VBOX
2593 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2594 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2595#endif
2596}
2597
2598void helper_ltr(int selector)
2599{
2600 SegmentCache *dt;
2601 uint32_t e1, e2;
2602#ifndef VBOX
2603 int index, type, entry_limit;
2604#else
2605 unsigned int index;
2606 int type, entry_limit;
2607#endif
2608 target_ulong ptr;
2609
2610#ifdef VBOX
2611 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2612 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2613 env->tr.flags, (RTSEL)(selector & 0xffff)));
2614#endif
2615 selector &= 0xffff;
2616 if ((selector & 0xfffc) == 0) {
2617 /* NULL selector case: invalid TR */
2618 env->tr.base = 0;
2619 env->tr.limit = 0;
2620 env->tr.flags = 0;
2621 } else {
2622 if (selector & 0x4)
2623 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2624 dt = &env->gdt;
2625 index = selector & ~7;
2626#ifdef TARGET_X86_64
2627 if (env->hflags & HF_LMA_MASK)
2628 entry_limit = 15;
2629 else
2630#endif
2631 entry_limit = 7;
2632 if ((index + entry_limit) > dt->limit)
2633 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2634 ptr = dt->base + index;
2635 e1 = ldl_kernel(ptr);
2636 e2 = ldl_kernel(ptr + 4);
2637 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2638 if ((e2 & DESC_S_MASK) ||
2639 (type != 1 && type != 9))
2640 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2641 if (!(e2 & DESC_P_MASK))
2642 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2643#ifdef TARGET_X86_64
2644 if (env->hflags & HF_LMA_MASK) {
2645 uint32_t e3, e4;
2646 e3 = ldl_kernel(ptr + 8);
2647 e4 = ldl_kernel(ptr + 12);
2648 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2649 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2650 load_seg_cache_raw_dt(&env->tr, e1, e2);
2651 env->tr.base |= (target_ulong)e3 << 32;
2652 } else
2653#endif
2654 {
2655 load_seg_cache_raw_dt(&env->tr, e1, e2);
2656 }
2657 e2 |= DESC_TSS_BUSY_MASK;
2658 stl_kernel(ptr + 4, e2);
2659 }
2660 env->tr.selector = selector;
2661#ifdef VBOX
2662 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2663 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2664 env->tr.flags, (RTSEL)(selector & 0xffff)));
2665#endif
2666}
2667
2668/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2669void helper_load_seg(int seg_reg, int selector)
2670{
2671 uint32_t e1, e2;
2672 int cpl, dpl, rpl;
2673 SegmentCache *dt;
2674#ifndef VBOX
2675 int index;
2676#else
2677 unsigned int index;
2678#endif
2679 target_ulong ptr;
2680
2681 selector &= 0xffff;
2682 cpl = env->hflags & HF_CPL_MASK;
2683
2684#ifdef VBOX
2685 /* Trying to load a selector with CPL=1? */
2686 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2687 {
2688 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2689 selector = selector & 0xfffc;
2690 }
2691#endif
2692 if ((selector & 0xfffc) == 0) {
2693 /* null selector case */
2694 if (seg_reg == R_SS
2695#ifdef TARGET_X86_64
2696 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2697#endif
2698 )
2699 raise_exception_err(EXCP0D_GPF, 0);
2700 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2701 } else {
2702
2703 if (selector & 0x4)
2704 dt = &env->ldt;
2705 else
2706 dt = &env->gdt;
2707 index = selector & ~7;
2708 if ((index + 7) > dt->limit)
2709 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2710 ptr = dt->base + index;
2711 e1 = ldl_kernel(ptr);
2712 e2 = ldl_kernel(ptr + 4);
2713
2714 if (!(e2 & DESC_S_MASK))
2715 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2716 rpl = selector & 3;
2717 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2718 if (seg_reg == R_SS) {
2719 /* must be writable segment */
2720 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2721 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2722 if (rpl != cpl || dpl != cpl)
2723 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2724 } else {
2725 /* must be readable segment */
2726 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2727 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2728
2729 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2730 /* if not conforming code, test rights */
2731 if (dpl < cpl || dpl < rpl)
2732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2733 }
2734 }
2735
2736 if (!(e2 & DESC_P_MASK)) {
2737 if (seg_reg == R_SS)
2738 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2739 else
2740 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2741 }
2742
2743 /* set the access bit if not already set */
2744 if (!(e2 & DESC_A_MASK)) {
2745 e2 |= DESC_A_MASK;
2746 stl_kernel(ptr + 4, e2);
2747 }
2748
2749 cpu_x86_load_seg_cache(env, seg_reg, selector,
2750 get_seg_base(e1, e2),
2751 get_seg_limit(e1, e2),
2752 e2);
2753#if 0
2754 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2755 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2756#endif
2757 }
2758}
2759
2760/* protected mode jump */
2761void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2762 int next_eip_addend)
2763{
2764 int gate_cs, type;
2765 uint32_t e1, e2, cpl, dpl, rpl, limit;
2766 target_ulong next_eip;
2767
2768#ifdef VBOX
2769 e1 = e2 = 0;
2770#endif
2771 if ((new_cs & 0xfffc) == 0)
2772 raise_exception_err(EXCP0D_GPF, 0);
2773 if (load_segment(&e1, &e2, new_cs) != 0)
2774 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2775 cpl = env->hflags & HF_CPL_MASK;
2776 if (e2 & DESC_S_MASK) {
2777 if (!(e2 & DESC_CS_MASK))
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2780 if (e2 & DESC_C_MASK) {
2781 /* conforming code segment */
2782 if (dpl > cpl)
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 } else {
2785 /* non conforming code segment */
2786 rpl = new_cs & 3;
2787 if (rpl > cpl)
2788 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2789 if (dpl != cpl)
2790 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2791 }
2792 if (!(e2 & DESC_P_MASK))
2793 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2794 limit = get_seg_limit(e1, e2);
2795 if (new_eip > limit &&
2796 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2797 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2798 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2799 get_seg_base(e1, e2), limit, e2);
2800 EIP = new_eip;
2801 } else {
2802 /* jump to call or task gate */
2803 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2804 rpl = new_cs & 3;
2805 cpl = env->hflags & HF_CPL_MASK;
2806 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2807 switch(type) {
2808 case 1: /* 286 TSS */
2809 case 9: /* 386 TSS */
2810 case 5: /* task gate */
2811 if (dpl < cpl || dpl < rpl)
2812 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2813 next_eip = env->eip + next_eip_addend;
2814 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2815 CC_OP = CC_OP_EFLAGS;
2816 break;
2817 case 4: /* 286 call gate */
2818 case 12: /* 386 call gate */
2819 if ((dpl < cpl) || (dpl < rpl))
2820 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2821 if (!(e2 & DESC_P_MASK))
2822 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2823 gate_cs = e1 >> 16;
2824 new_eip = (e1 & 0xffff);
2825 if (type == 12)
2826 new_eip |= (e2 & 0xffff0000);
2827 if (load_segment(&e1, &e2, gate_cs) != 0)
2828 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2829 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2830 /* must be code segment */
2831 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2832 (DESC_S_MASK | DESC_CS_MASK)))
2833 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2834 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2835 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2836 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2837 if (!(e2 & DESC_P_MASK))
2838#ifdef VBOX /* See page 3-514 of 253666.pdf */
2839 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2840#else
2841 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2842#endif
2843 limit = get_seg_limit(e1, e2);
2844 if (new_eip > limit)
2845 raise_exception_err(EXCP0D_GPF, 0);
2846 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2847 get_seg_base(e1, e2), limit, e2);
2848 EIP = new_eip;
2849 break;
2850 default:
2851 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2852 break;
2853 }
2854 }
2855}
2856
2857/* real mode call */
2858void helper_lcall_real(int new_cs, target_ulong new_eip1,
2859 int shift, int next_eip)
2860{
2861 int new_eip;
2862 uint32_t esp, esp_mask;
2863 target_ulong ssp;
2864
2865 new_eip = new_eip1;
2866 esp = ESP;
2867 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2868 ssp = env->segs[R_SS].base;
2869 if (shift) {
2870 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2871 PUSHL(ssp, esp, esp_mask, next_eip);
2872 } else {
2873 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2874 PUSHW(ssp, esp, esp_mask, next_eip);
2875 }
2876
2877 SET_ESP(esp, esp_mask);
2878 env->eip = new_eip;
2879 env->segs[R_CS].selector = new_cs;
2880 env->segs[R_CS].base = (new_cs << 4);
2881}
2882
2883/* protected mode call */
2884void helper_lcall_protected(int new_cs, target_ulong new_eip,
2885 int shift, int next_eip_addend)
2886{
2887 int new_stack, i;
2888 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2889 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2890 uint32_t val, limit, old_sp_mask;
2891 target_ulong ssp, old_ssp, next_eip;
2892
2893#ifdef VBOX
2894 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2895#endif
2896 next_eip = env->eip + next_eip_addend;
2897#ifdef DEBUG_PCALL
2898 if (loglevel & CPU_LOG_PCALL) {
2899 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2900 new_cs, (uint32_t)new_eip, shift);
2901 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2902 }
2903#endif
2904 if ((new_cs & 0xfffc) == 0)
2905 raise_exception_err(EXCP0D_GPF, 0);
2906 if (load_segment(&e1, &e2, new_cs) != 0)
2907 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2908 cpl = env->hflags & HF_CPL_MASK;
2909#ifdef DEBUG_PCALL
2910 if (loglevel & CPU_LOG_PCALL) {
2911 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2912 }
2913#endif
2914 if (e2 & DESC_S_MASK) {
2915 if (!(e2 & DESC_CS_MASK))
2916 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2917 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2918 if (e2 & DESC_C_MASK) {
2919 /* conforming code segment */
2920 if (dpl > cpl)
2921 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2922 } else {
2923 /* non conforming code segment */
2924 rpl = new_cs & 3;
2925 if (rpl > cpl)
2926 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2927 if (dpl != cpl)
2928 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2929 }
2930 if (!(e2 & DESC_P_MASK))
2931 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2932
2933#ifdef TARGET_X86_64
2934 /* XXX: check 16/32 bit cases in long mode */
2935 if (shift == 2) {
2936 target_ulong rsp;
2937 /* 64 bit case */
2938 rsp = ESP;
2939 PUSHQ(rsp, env->segs[R_CS].selector);
2940 PUSHQ(rsp, next_eip);
2941 /* from this point, not restartable */
2942 ESP = rsp;
2943 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2944 get_seg_base(e1, e2),
2945 get_seg_limit(e1, e2), e2);
2946 EIP = new_eip;
2947 } else
2948#endif
2949 {
2950 sp = ESP;
2951 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2952 ssp = env->segs[R_SS].base;
2953 if (shift) {
2954 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2955 PUSHL(ssp, sp, sp_mask, next_eip);
2956 } else {
2957 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2958 PUSHW(ssp, sp, sp_mask, next_eip);
2959 }
2960
2961 limit = get_seg_limit(e1, e2);
2962 if (new_eip > limit)
2963 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2964 /* from this point, not restartable */
2965 SET_ESP(sp, sp_mask);
2966 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2967 get_seg_base(e1, e2), limit, e2);
2968 EIP = new_eip;
2969 }
2970 } else {
2971 /* check gate type */
2972 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2973 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2974 rpl = new_cs & 3;
2975 switch(type) {
2976 case 1: /* available 286 TSS */
2977 case 9: /* available 386 TSS */
2978 case 5: /* task gate */
2979 if (dpl < cpl || dpl < rpl)
2980 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2981 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2982 CC_OP = CC_OP_EFLAGS;
2983 return;
2984 case 4: /* 286 call gate */
2985 case 12: /* 386 call gate */
2986 break;
2987 default:
2988 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2989 break;
2990 }
2991 shift = type >> 3;
2992
2993 if (dpl < cpl || dpl < rpl)
2994 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2995 /* check valid bit */
2996 if (!(e2 & DESC_P_MASK))
2997 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2998 selector = e1 >> 16;
2999 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
3000 param_count = e2 & 0x1f;
3001 if ((selector & 0xfffc) == 0)
3002 raise_exception_err(EXCP0D_GPF, 0);
3003
3004 if (load_segment(&e1, &e2, selector) != 0)
3005 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3006 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3007 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3008 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3009 if (dpl > cpl)
3010 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3011 if (!(e2 & DESC_P_MASK))
3012 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3013
3014 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3015 /* to inner privilege */
3016 get_ss_esp_from_tss(&ss, &sp, dpl);
3017#ifdef DEBUG_PCALL
3018 if (loglevel & CPU_LOG_PCALL)
3019 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3020 ss, sp, param_count, ESP);
3021#endif
3022 if ((ss & 0xfffc) == 0)
3023 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3024 if ((ss & 3) != dpl)
3025 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3026 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3027 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3028 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3029 if (ss_dpl != dpl)
3030 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3031 if (!(ss_e2 & DESC_S_MASK) ||
3032 (ss_e2 & DESC_CS_MASK) ||
3033 !(ss_e2 & DESC_W_MASK))
3034 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3035 if (!(ss_e2 & DESC_P_MASK))
3036#ifdef VBOX /* See page 3-99 of 253666.pdf */
3037 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3038#else
3039 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3040#endif
3041
3042 // push_size = ((param_count * 2) + 8) << shift;
3043
3044 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3045 old_ssp = env->segs[R_SS].base;
3046
3047 sp_mask = get_sp_mask(ss_e2);
3048 ssp = get_seg_base(ss_e1, ss_e2);
3049 if (shift) {
3050 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3051 PUSHL(ssp, sp, sp_mask, ESP);
3052 for(i = param_count - 1; i >= 0; i--) {
3053 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3054 PUSHL(ssp, sp, sp_mask, val);
3055 }
3056 } else {
3057 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3058 PUSHW(ssp, sp, sp_mask, ESP);
3059 for(i = param_count - 1; i >= 0; i--) {
3060 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3061 PUSHW(ssp, sp, sp_mask, val);
3062 }
3063 }
3064 new_stack = 1;
3065 } else {
3066 /* to same privilege */
3067 sp = ESP;
3068 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3069 ssp = env->segs[R_SS].base;
3070 // push_size = (4 << shift);
3071 new_stack = 0;
3072 }
3073
3074 if (shift) {
3075 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3076 PUSHL(ssp, sp, sp_mask, next_eip);
3077 } else {
3078 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3079 PUSHW(ssp, sp, sp_mask, next_eip);
3080 }
3081
3082 /* from this point, not restartable */
3083
3084 if (new_stack) {
3085 ss = (ss & ~3) | dpl;
3086 cpu_x86_load_seg_cache(env, R_SS, ss,
3087 ssp,
3088 get_seg_limit(ss_e1, ss_e2),
3089 ss_e2);
3090 }
3091
3092 selector = (selector & ~3) | dpl;
3093 cpu_x86_load_seg_cache(env, R_CS, selector,
3094 get_seg_base(e1, e2),
3095 get_seg_limit(e1, e2),
3096 e2);
3097 cpu_x86_set_cpl(env, dpl);
3098 SET_ESP(sp, sp_mask);
3099 EIP = offset;
3100 }
3101#ifdef USE_KQEMU
3102 if (kqemu_is_ok(env)) {
3103 env->exception_index = -1;
3104 cpu_loop_exit();
3105 }
3106#endif
3107}
3108
3109/* real and vm86 mode iret */
3110void helper_iret_real(int shift)
3111{
3112 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3113 target_ulong ssp;
3114 int eflags_mask;
3115#ifdef VBOX
3116 bool fVME = false;
3117
3118 remR3TrapClear(env->pVM);
3119#endif /* VBOX */
3120
3121 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3122 sp = ESP;
3123 ssp = env->segs[R_SS].base;
3124 if (shift == 1) {
3125 /* 32 bits */
3126 POPL(ssp, sp, sp_mask, new_eip);
3127 POPL(ssp, sp, sp_mask, new_cs);
3128 new_cs &= 0xffff;
3129 POPL(ssp, sp, sp_mask, new_eflags);
3130 } else {
3131 /* 16 bits */
3132 POPW(ssp, sp, sp_mask, new_eip);
3133 POPW(ssp, sp, sp_mask, new_cs);
3134 POPW(ssp, sp, sp_mask, new_eflags);
3135 }
3136#ifdef VBOX
3137 if ( (env->eflags & VM_MASK)
3138 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3139 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3140 {
3141 fVME = true;
3142 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3143 /* if TF will be set -> #GP */
3144 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3145 || (new_eflags & TF_MASK))
3146 raise_exception(EXCP0D_GPF);
3147 }
3148#endif /* VBOX */
3149 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3150 env->segs[R_CS].selector = new_cs;
3151 env->segs[R_CS].base = (new_cs << 4);
3152 env->eip = new_eip;
3153#ifdef VBOX
3154 if (fVME)
3155 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3156 else
3157#endif
3158 if (env->eflags & VM_MASK)
3159 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3160 else
3161 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3162 if (shift == 0)
3163 eflags_mask &= 0xffff;
3164 load_eflags(new_eflags, eflags_mask);
3165 env->hflags2 &= ~HF2_NMI_MASK;
3166#ifdef VBOX
3167 if (fVME)
3168 {
3169 if (new_eflags & IF_MASK)
3170 env->eflags |= VIF_MASK;
3171 else
3172 env->eflags &= ~VIF_MASK;
3173 }
3174#endif /* VBOX */
3175}
3176
3177#ifndef VBOX
3178static inline void validate_seg(int seg_reg, int cpl)
3179#else /* VBOX */
3180DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3181#endif /* VBOX */
3182{
3183 int dpl;
3184 uint32_t e2;
3185
3186 /* XXX: on x86_64, we do not want to nullify FS and GS because
3187 they may still contain a valid base. I would be interested to
3188 know how a real x86_64 CPU behaves */
3189 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3190 (env->segs[seg_reg].selector & 0xfffc) == 0)
3191 return;
3192
3193 e2 = env->segs[seg_reg].flags;
3194 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3195 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3196 /* data or non conforming code segment */
3197 if (dpl < cpl) {
3198 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3199 }
3200 }
3201}
3202
3203/* protected mode iret */
3204#ifndef VBOX
3205static inline void helper_ret_protected(int shift, int is_iret, int addend)
3206#else /* VBOX */
3207DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3208#endif /* VBOX */
3209{
3210 uint32_t new_cs, new_eflags, new_ss;
3211 uint32_t new_es, new_ds, new_fs, new_gs;
3212 uint32_t e1, e2, ss_e1, ss_e2;
3213 int cpl, dpl, rpl, eflags_mask, iopl;
3214 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3215
3216#ifdef VBOX
3217 ss_e1 = ss_e2 = e1 = e2 = 0;
3218#endif
3219
3220#ifdef TARGET_X86_64
3221 if (shift == 2)
3222 sp_mask = -1;
3223 else
3224#endif
3225 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3226 sp = ESP;
3227 ssp = env->segs[R_SS].base;
3228 new_eflags = 0; /* avoid warning */
3229#ifdef TARGET_X86_64
3230 if (shift == 2) {
3231 POPQ(sp, new_eip);
3232 POPQ(sp, new_cs);
3233 new_cs &= 0xffff;
3234 if (is_iret) {
3235 POPQ(sp, new_eflags);
3236 }
3237 } else
3238#endif
3239 if (shift == 1) {
3240 /* 32 bits */
3241 POPL(ssp, sp, sp_mask, new_eip);
3242 POPL(ssp, sp, sp_mask, new_cs);
3243 new_cs &= 0xffff;
3244 if (is_iret) {
3245 POPL(ssp, sp, sp_mask, new_eflags);
3246#if defined(VBOX) && defined(DEBUG)
3247 printf("iret: new CS %04X\n", new_cs);
3248 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3249 printf("iret: new EFLAGS %08X\n", new_eflags);
3250 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3251#endif
3252 if (new_eflags & VM_MASK)
3253 goto return_to_vm86;
3254 }
3255#ifdef VBOX
3256 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3257 {
3258#ifdef DEBUG
3259 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3260#endif
3261 new_cs = new_cs & 0xfffc;
3262 }
3263#endif
3264 } else {
3265 /* 16 bits */
3266 POPW(ssp, sp, sp_mask, new_eip);
3267 POPW(ssp, sp, sp_mask, new_cs);
3268 if (is_iret)
3269 POPW(ssp, sp, sp_mask, new_eflags);
3270 }
3271#ifdef DEBUG_PCALL
3272 if (loglevel & CPU_LOG_PCALL) {
3273 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3274 new_cs, new_eip, shift, addend);
3275 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3276 }
3277#endif
3278 if ((new_cs & 0xfffc) == 0)
3279 {
3280#if defined(VBOX) && defined(DEBUG)
3281 printf("new_cs & 0xfffc) == 0\n");
3282#endif
3283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3284 }
3285 if (load_segment(&e1, &e2, new_cs) != 0)
3286 {
3287#if defined(VBOX) && defined(DEBUG)
3288 printf("load_segment failed\n");
3289#endif
3290 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3291 }
3292 if (!(e2 & DESC_S_MASK) ||
3293 !(e2 & DESC_CS_MASK))
3294 {
3295#if defined(VBOX) && defined(DEBUG)
3296 printf("e2 mask %08x\n", e2);
3297#endif
3298 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3299 }
3300 cpl = env->hflags & HF_CPL_MASK;
3301 rpl = new_cs & 3;
3302 if (rpl < cpl)
3303 {
3304#if defined(VBOX) && defined(DEBUG)
3305 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3306#endif
3307 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3308 }
3309 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3310 if (e2 & DESC_C_MASK) {
3311 if (dpl > rpl)
3312 {
3313#if defined(VBOX) && defined(DEBUG)
3314 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3315#endif
3316 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3317 }
3318 } else {
3319 if (dpl != rpl)
3320 {
3321#if defined(VBOX) && defined(DEBUG)
3322 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3323#endif
3324 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3325 }
3326 }
3327 if (!(e2 & DESC_P_MASK))
3328 {
3329#if defined(VBOX) && defined(DEBUG)
3330 printf("DESC_P_MASK e2=%08x\n", e2);
3331#endif
3332 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3333 }
3334
3335 sp += addend;
3336 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3337 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3338 /* return to same privilege level */
3339 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3340 get_seg_base(e1, e2),
3341 get_seg_limit(e1, e2),
3342 e2);
3343 } else {
3344 /* return to different privilege level */
3345#ifdef TARGET_X86_64
3346 if (shift == 2) {
3347 POPQ(sp, new_esp);
3348 POPQ(sp, new_ss);
3349 new_ss &= 0xffff;
3350 } else
3351#endif
3352 if (shift == 1) {
3353 /* 32 bits */
3354 POPL(ssp, sp, sp_mask, new_esp);
3355 POPL(ssp, sp, sp_mask, new_ss);
3356 new_ss &= 0xffff;
3357 } else {
3358 /* 16 bits */
3359 POPW(ssp, sp, sp_mask, new_esp);
3360 POPW(ssp, sp, sp_mask, new_ss);
3361 }
3362#ifdef DEBUG_PCALL
3363 if (loglevel & CPU_LOG_PCALL) {
3364 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3365 new_ss, new_esp);
3366 }
3367#endif
3368 if ((new_ss & 0xfffc) == 0) {
3369#ifdef TARGET_X86_64
3370 /* NULL ss is allowed in long mode if cpl != 3*/
3371 /* XXX: test CS64 ? */
3372 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3373 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3374 0, 0xffffffff,
3375 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3376 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3377 DESC_W_MASK | DESC_A_MASK);
3378 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3379 } else
3380#endif
3381 {
3382 raise_exception_err(EXCP0D_GPF, 0);
3383 }
3384 } else {
3385 if ((new_ss & 3) != rpl)
3386 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3387 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3388 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3389 if (!(ss_e2 & DESC_S_MASK) ||
3390 (ss_e2 & DESC_CS_MASK) ||
3391 !(ss_e2 & DESC_W_MASK))
3392 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3393 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3394 if (dpl != rpl)
3395 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3396 if (!(ss_e2 & DESC_P_MASK))
3397 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3398 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3399 get_seg_base(ss_e1, ss_e2),
3400 get_seg_limit(ss_e1, ss_e2),
3401 ss_e2);
3402 }
3403
3404 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3405 get_seg_base(e1, e2),
3406 get_seg_limit(e1, e2),
3407 e2);
3408 cpu_x86_set_cpl(env, rpl);
3409 sp = new_esp;
3410#ifdef TARGET_X86_64
3411 if (env->hflags & HF_CS64_MASK)
3412 sp_mask = -1;
3413 else
3414#endif
3415 sp_mask = get_sp_mask(ss_e2);
3416
3417 /* validate data segments */
3418 validate_seg(R_ES, rpl);
3419 validate_seg(R_DS, rpl);
3420 validate_seg(R_FS, rpl);
3421 validate_seg(R_GS, rpl);
3422
3423 sp += addend;
3424 }
3425 SET_ESP(sp, sp_mask);
3426 env->eip = new_eip;
3427 if (is_iret) {
3428 /* NOTE: 'cpl' is the _old_ CPL */
3429 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3430 if (cpl == 0)
3431#ifdef VBOX
3432 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3433#else
3434 eflags_mask |= IOPL_MASK;
3435#endif
3436 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3437 if (cpl <= iopl)
3438 eflags_mask |= IF_MASK;
3439 if (shift == 0)
3440 eflags_mask &= 0xffff;
3441 load_eflags(new_eflags, eflags_mask);
3442 }
3443 return;
3444
3445 return_to_vm86:
3446 POPL(ssp, sp, sp_mask, new_esp);
3447 POPL(ssp, sp, sp_mask, new_ss);
3448 POPL(ssp, sp, sp_mask, new_es);
3449 POPL(ssp, sp, sp_mask, new_ds);
3450 POPL(ssp, sp, sp_mask, new_fs);
3451 POPL(ssp, sp, sp_mask, new_gs);
3452
3453 /* modify processor state */
3454 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3455 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3456 load_seg_vm(R_CS, new_cs & 0xffff);
3457 cpu_x86_set_cpl(env, 3);
3458 load_seg_vm(R_SS, new_ss & 0xffff);
3459 load_seg_vm(R_ES, new_es & 0xffff);
3460 load_seg_vm(R_DS, new_ds & 0xffff);
3461 load_seg_vm(R_FS, new_fs & 0xffff);
3462 load_seg_vm(R_GS, new_gs & 0xffff);
3463
3464 env->eip = new_eip & 0xffff;
3465 ESP = new_esp;
3466}
3467
3468void helper_iret_protected(int shift, int next_eip)
3469{
3470 int tss_selector, type;
3471 uint32_t e1, e2;
3472
3473#ifdef VBOX
3474 e1 = e2 = 0;
3475 remR3TrapClear(env->pVM);
3476#endif
3477
3478 /* specific case for TSS */
3479 if (env->eflags & NT_MASK) {
3480#ifdef TARGET_X86_64
3481 if (env->hflags & HF_LMA_MASK)
3482 raise_exception_err(EXCP0D_GPF, 0);
3483#endif
3484 tss_selector = lduw_kernel(env->tr.base + 0);
3485 if (tss_selector & 4)
3486 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3487 if (load_segment(&e1, &e2, tss_selector) != 0)
3488 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3489 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3490 /* NOTE: we check both segment and busy TSS */
3491 if (type != 3)
3492 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3493 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3494 } else {
3495 helper_ret_protected(shift, 1, 0);
3496 }
3497 env->hflags2 &= ~HF2_NMI_MASK;
3498#ifdef USE_KQEMU
3499 if (kqemu_is_ok(env)) {
3500 CC_OP = CC_OP_EFLAGS;
3501 env->exception_index = -1;
3502 cpu_loop_exit();
3503 }
3504#endif
3505}
3506
3507void helper_lret_protected(int shift, int addend)
3508{
3509 helper_ret_protected(shift, 0, addend);
3510#ifdef USE_KQEMU
3511 if (kqemu_is_ok(env)) {
3512 env->exception_index = -1;
3513 cpu_loop_exit();
3514 }
3515#endif
3516}
3517
3518void helper_sysenter(void)
3519{
3520 if (env->sysenter_cs == 0) {
3521 raise_exception_err(EXCP0D_GPF, 0);
3522 }
3523 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3524 cpu_x86_set_cpl(env, 0);
3525
3526#ifdef TARGET_X86_64
3527 if (env->hflags & HF_LMA_MASK) {
3528 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3529 0, 0xffffffff,
3530 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3531 DESC_S_MASK |
3532 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3533 } else
3534#endif
3535 {
3536 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3537 0, 0xffffffff,
3538 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3539 DESC_S_MASK |
3540 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3541 }
3542 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3543 0, 0xffffffff,
3544 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3545 DESC_S_MASK |
3546 DESC_W_MASK | DESC_A_MASK);
3547 ESP = env->sysenter_esp;
3548 EIP = env->sysenter_eip;
3549}
3550
3551void helper_sysexit(int dflag)
3552{
3553 int cpl;
3554
3555 cpl = env->hflags & HF_CPL_MASK;
3556 if (env->sysenter_cs == 0 || cpl != 0) {
3557 raise_exception_err(EXCP0D_GPF, 0);
3558 }
3559 cpu_x86_set_cpl(env, 3);
3560#ifdef TARGET_X86_64
3561 if (dflag == 2) {
3562 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3563 0, 0xffffffff,
3564 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3565 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3566 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3567 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3568 0, 0xffffffff,
3569 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3570 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3571 DESC_W_MASK | DESC_A_MASK);
3572 } else
3573#endif
3574 {
3575 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3576 0, 0xffffffff,
3577 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3578 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3579 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3580 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3581 0, 0xffffffff,
3582 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3583 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3584 DESC_W_MASK | DESC_A_MASK);
3585 }
3586 ESP = ECX;
3587 EIP = EDX;
3588#ifdef USE_KQEMU
3589 if (kqemu_is_ok(env)) {
3590 env->exception_index = -1;
3591 cpu_loop_exit();
3592 }
3593#endif
3594}
3595
3596#if defined(CONFIG_USER_ONLY)
3597target_ulong helper_read_crN(int reg)
3598{
3599 return 0;
3600}
3601
3602void helper_write_crN(int reg, target_ulong t0)
3603{
3604}
3605#else
3606target_ulong helper_read_crN(int reg)
3607{
3608 target_ulong val;
3609
3610 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3611 switch(reg) {
3612 default:
3613 val = env->cr[reg];
3614 break;
3615 case 8:
3616 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3617 val = cpu_get_apic_tpr(env);
3618 } else {
3619 val = env->v_tpr;
3620 }
3621 break;
3622 }
3623 return val;
3624}
3625
3626void helper_write_crN(int reg, target_ulong t0)
3627{
3628 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3629 switch(reg) {
3630 case 0:
3631 cpu_x86_update_cr0(env, t0);
3632 break;
3633 case 3:
3634 cpu_x86_update_cr3(env, t0);
3635 break;
3636 case 4:
3637 cpu_x86_update_cr4(env, t0);
3638 break;
3639 case 8:
3640 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3641 cpu_set_apic_tpr(env, t0);
3642 }
3643 env->v_tpr = t0 & 0x0f;
3644 break;
3645 default:
3646 env->cr[reg] = t0;
3647 break;
3648 }
3649}
3650#endif
3651
3652void helper_lmsw(target_ulong t0)
3653{
3654 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3655 if already set to one. */
3656 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3657 helper_write_crN(0, t0);
3658}
3659
3660void helper_clts(void)
3661{
3662 env->cr[0] &= ~CR0_TS_MASK;
3663 env->hflags &= ~HF_TS_MASK;
3664}
3665
3666/* XXX: do more */
3667void helper_movl_drN_T0(int reg, target_ulong t0)
3668{
3669 env->dr[reg] = t0;
3670}
3671
3672void helper_invlpg(target_ulong addr)
3673{
3674 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3675 tlb_flush_page(env, addr);
3676}
3677
3678void helper_rdtsc(void)
3679{
3680 uint64_t val;
3681
3682 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3683 raise_exception(EXCP0D_GPF);
3684 }
3685 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3686
3687 val = cpu_get_tsc(env) + env->tsc_offset;
3688 EAX = (uint32_t)(val);
3689 EDX = (uint32_t)(val >> 32);
3690}
3691
3692#ifdef VBOX
3693void helper_rdtscp(void)
3694{
3695 uint64_t val;
3696 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3697 raise_exception(EXCP0D_GPF);
3698 }
3699
3700 val = cpu_get_tsc(env);
3701 EAX = (uint32_t)(val);
3702 EDX = (uint32_t)(val >> 32);
3703 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3704}
3705#endif
3706
3707void helper_rdpmc(void)
3708{
3709#ifdef VBOX
3710 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3711 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3712 raise_exception(EXCP0D_GPF);
3713 }
3714 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3715 EAX = 0;
3716 EDX = 0;
3717#else
3718 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3719 raise_exception(EXCP0D_GPF);
3720 }
3721 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3722
3723 /* currently unimplemented */
3724 raise_exception_err(EXCP06_ILLOP, 0);
3725#endif
3726}
3727
3728#if defined(CONFIG_USER_ONLY)
3729void helper_wrmsr(void)
3730{
3731}
3732
3733void helper_rdmsr(void)
3734{
3735}
3736#else
3737void helper_wrmsr(void)
3738{
3739 uint64_t val;
3740
3741 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3742
3743 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3744
3745 switch((uint32_t)ECX) {
3746 case MSR_IA32_SYSENTER_CS:
3747 env->sysenter_cs = val & 0xffff;
3748 break;
3749 case MSR_IA32_SYSENTER_ESP:
3750 env->sysenter_esp = val;
3751 break;
3752 case MSR_IA32_SYSENTER_EIP:
3753 env->sysenter_eip = val;
3754 break;
3755 case MSR_IA32_APICBASE:
3756 cpu_set_apic_base(env, val);
3757 break;
3758 case MSR_EFER:
3759 {
3760 uint64_t update_mask;
3761 update_mask = 0;
3762 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3763 update_mask |= MSR_EFER_SCE;
3764 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3765 update_mask |= MSR_EFER_LME;
3766 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3767 update_mask |= MSR_EFER_FFXSR;
3768 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3769 update_mask |= MSR_EFER_NXE;
3770 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3771 update_mask |= MSR_EFER_SVME;
3772 cpu_load_efer(env, (env->efer & ~update_mask) |
3773 (val & update_mask));
3774 }
3775 break;
3776 case MSR_STAR:
3777 env->star = val;
3778 break;
3779 case MSR_PAT:
3780 env->pat = val;
3781 break;
3782 case MSR_VM_HSAVE_PA:
3783 env->vm_hsave = val;
3784 break;
3785#ifdef TARGET_X86_64
3786 case MSR_LSTAR:
3787 env->lstar = val;
3788 break;
3789 case MSR_CSTAR:
3790 env->cstar = val;
3791 break;
3792 case MSR_FMASK:
3793 env->fmask = val;
3794 break;
3795 case MSR_FSBASE:
3796 env->segs[R_FS].base = val;
3797 break;
3798 case MSR_GSBASE:
3799 env->segs[R_GS].base = val;
3800 break;
3801 case MSR_KERNELGSBASE:
3802 env->kernelgsbase = val;
3803 break;
3804#endif
3805 default:
3806#ifndef VBOX
3807 /* XXX: exception ? */
3808 break;
3809#else /* VBOX */
3810 {
3811 uint32_t ecx = (uint32_t)ECX;
3812 /* In X2APIC specification this range is reserved for APIC control. */
3813 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3814 cpu_apic_wrmsr(env, ecx, val);
3815 /** @todo else exception? */
3816 break;
3817 }
3818 case MSR_K8_TSC_AUX:
3819 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3820 break;
3821#endif /* VBOX */
3822 }
3823}
3824
3825void helper_rdmsr(void)
3826{
3827 uint64_t val;
3828 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3829
3830 switch((uint32_t)ECX) {
3831 case MSR_IA32_SYSENTER_CS:
3832 val = env->sysenter_cs;
3833 break;
3834 case MSR_IA32_SYSENTER_ESP:
3835 val = env->sysenter_esp;
3836 break;
3837 case MSR_IA32_SYSENTER_EIP:
3838 val = env->sysenter_eip;
3839 break;
3840 case MSR_IA32_APICBASE:
3841 val = cpu_get_apic_base(env);
3842 break;
3843 case MSR_EFER:
3844 val = env->efer;
3845 break;
3846 case MSR_STAR:
3847 val = env->star;
3848 break;
3849 case MSR_PAT:
3850 val = env->pat;
3851 break;
3852 case MSR_VM_HSAVE_PA:
3853 val = env->vm_hsave;
3854 break;
3855#ifdef VBOX
3856 case MSR_IA32_PERF_STATUS:
3857 case MSR_IA32_PLATFORM_INFO:
3858 val = CPUMGetGuestMsr(env->pVCpu, (uint32_t)ECX);
3859 break;
3860#else
3861 case MSR_IA32_PERF_STATUS:
3862 /* tsc_increment_by_tick */
3863 val = 1000ULL;
3864 /* CPU multiplier */
3865 val |= ((uint64_t)4ULL << 40);
3866 break;
3867#endif
3868#ifdef TARGET_X86_64
3869 case MSR_LSTAR:
3870 val = env->lstar;
3871 break;
3872 case MSR_CSTAR:
3873 val = env->cstar;
3874 break;
3875 case MSR_FMASK:
3876 val = env->fmask;
3877 break;
3878 case MSR_FSBASE:
3879 val = env->segs[R_FS].base;
3880 break;
3881 case MSR_GSBASE:
3882 val = env->segs[R_GS].base;
3883 break;
3884 case MSR_KERNELGSBASE:
3885 val = env->kernelgsbase;
3886 break;
3887#endif
3888#ifdef USE_KQEMU
3889 case MSR_QPI_COMMBASE:
3890 if (env->kqemu_enabled) {
3891 val = kqemu_comm_base;
3892 } else {
3893 val = 0;
3894 }
3895 break;
3896#endif
3897 default:
3898#ifndef VBOX
3899 /* XXX: exception ? */
3900 val = 0;
3901 break;
3902#else /* VBOX */
3903 {
3904 uint32_t ecx = (uint32_t)ECX;
3905 /* In X2APIC specification this range is reserved for APIC control. */
3906 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3907 val = cpu_apic_rdmsr(env, ecx);
3908 else
3909 val = 0; /** @todo else exception? */
3910 break;
3911 }
3912 case MSR_IA32_TSC:
3913 case MSR_K8_TSC_AUX:
3914 val = cpu_rdmsr(env, (uint32_t)ECX);
3915 break;
3916#endif /* VBOX */
3917 }
3918 EAX = (uint32_t)(val);
3919 EDX = (uint32_t)(val >> 32);
3920}
3921#endif
3922
3923target_ulong helper_lsl(target_ulong selector1)
3924{
3925 unsigned int limit;
3926 uint32_t e1, e2, eflags, selector;
3927 int rpl, dpl, cpl, type;
3928
3929 selector = selector1 & 0xffff;
3930 eflags = cc_table[CC_OP].compute_all();
3931 if (load_segment(&e1, &e2, selector) != 0)
3932 goto fail;
3933 rpl = selector & 3;
3934 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3935 cpl = env->hflags & HF_CPL_MASK;
3936 if (e2 & DESC_S_MASK) {
3937 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3938 /* conforming */
3939 } else {
3940 if (dpl < cpl || dpl < rpl)
3941 goto fail;
3942 }
3943 } else {
3944 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3945 switch(type) {
3946 case 1:
3947 case 2:
3948 case 3:
3949 case 9:
3950 case 11:
3951 break;
3952 default:
3953 goto fail;
3954 }
3955 if (dpl < cpl || dpl < rpl) {
3956 fail:
3957 CC_SRC = eflags & ~CC_Z;
3958 return 0;
3959 }
3960 }
3961 limit = get_seg_limit(e1, e2);
3962 CC_SRC = eflags | CC_Z;
3963 return limit;
3964}
3965
3966target_ulong helper_lar(target_ulong selector1)
3967{
3968 uint32_t e1, e2, eflags, selector;
3969 int rpl, dpl, cpl, type;
3970
3971 selector = selector1 & 0xffff;
3972 eflags = cc_table[CC_OP].compute_all();
3973 if ((selector & 0xfffc) == 0)
3974 goto fail;
3975 if (load_segment(&e1, &e2, selector) != 0)
3976 goto fail;
3977 rpl = selector & 3;
3978 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3979 cpl = env->hflags & HF_CPL_MASK;
3980 if (e2 & DESC_S_MASK) {
3981 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3982 /* conforming */
3983 } else {
3984 if (dpl < cpl || dpl < rpl)
3985 goto fail;
3986 }
3987 } else {
3988 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3989 switch(type) {
3990 case 1:
3991 case 2:
3992 case 3:
3993 case 4:
3994 case 5:
3995 case 9:
3996 case 11:
3997 case 12:
3998 break;
3999 default:
4000 goto fail;
4001 }
4002 if (dpl < cpl || dpl < rpl) {
4003 fail:
4004 CC_SRC = eflags & ~CC_Z;
4005 return 0;
4006 }
4007 }
4008 CC_SRC = eflags | CC_Z;
4009 return e2 & 0x00f0ff00;
4010}
4011
4012void helper_verr(target_ulong selector1)
4013{
4014 uint32_t e1, e2, eflags, selector;
4015 int rpl, dpl, cpl;
4016
4017 selector = selector1 & 0xffff;
4018 eflags = cc_table[CC_OP].compute_all();
4019 if ((selector & 0xfffc) == 0)
4020 goto fail;
4021 if (load_segment(&e1, &e2, selector) != 0)
4022 goto fail;
4023 if (!(e2 & DESC_S_MASK))
4024 goto fail;
4025 rpl = selector & 3;
4026 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4027 cpl = env->hflags & HF_CPL_MASK;
4028 if (e2 & DESC_CS_MASK) {
4029 if (!(e2 & DESC_R_MASK))
4030 goto fail;
4031 if (!(e2 & DESC_C_MASK)) {
4032 if (dpl < cpl || dpl < rpl)
4033 goto fail;
4034 }
4035 } else {
4036 if (dpl < cpl || dpl < rpl) {
4037 fail:
4038 CC_SRC = eflags & ~CC_Z;
4039 return;
4040 }
4041 }
4042 CC_SRC = eflags | CC_Z;
4043}
4044
4045void helper_verw(target_ulong selector1)
4046{
4047 uint32_t e1, e2, eflags, selector;
4048 int rpl, dpl, cpl;
4049
4050 selector = selector1 & 0xffff;
4051 eflags = cc_table[CC_OP].compute_all();
4052 if ((selector & 0xfffc) == 0)
4053 goto fail;
4054 if (load_segment(&e1, &e2, selector) != 0)
4055 goto fail;
4056 if (!(e2 & DESC_S_MASK))
4057 goto fail;
4058 rpl = selector & 3;
4059 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4060 cpl = env->hflags & HF_CPL_MASK;
4061 if (e2 & DESC_CS_MASK) {
4062 goto fail;
4063 } else {
4064 if (dpl < cpl || dpl < rpl)
4065 goto fail;
4066 if (!(e2 & DESC_W_MASK)) {
4067 fail:
4068 CC_SRC = eflags & ~CC_Z;
4069 return;
4070 }
4071 }
4072 CC_SRC = eflags | CC_Z;
4073}
4074
4075/* x87 FPU helpers */
4076
4077static void fpu_set_exception(int mask)
4078{
4079 env->fpus |= mask;
4080 if (env->fpus & (~env->fpuc & FPUC_EM))
4081 env->fpus |= FPUS_SE | FPUS_B;
4082}
4083
4084#ifndef VBOX
4085static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4086#else /* VBOX */
4087DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4088#endif /* VBOX */
4089{
4090 if (b == 0.0)
4091 fpu_set_exception(FPUS_ZE);
4092 return a / b;
4093}
4094
4095void fpu_raise_exception(void)
4096{
4097 if (env->cr[0] & CR0_NE_MASK) {
4098 raise_exception(EXCP10_COPR);
4099 }
4100#if !defined(CONFIG_USER_ONLY)
4101 else {
4102 cpu_set_ferr(env);
4103 }
4104#endif
4105}
4106
4107void helper_flds_FT0(uint32_t val)
4108{
4109 union {
4110 float32 f;
4111 uint32_t i;
4112 } u;
4113 u.i = val;
4114 FT0 = float32_to_floatx(u.f, &env->fp_status);
4115}
4116
4117void helper_fldl_FT0(uint64_t val)
4118{
4119 union {
4120 float64 f;
4121 uint64_t i;
4122 } u;
4123 u.i = val;
4124 FT0 = float64_to_floatx(u.f, &env->fp_status);
4125}
4126
4127void helper_fildl_FT0(int32_t val)
4128{
4129 FT0 = int32_to_floatx(val, &env->fp_status);
4130}
4131
4132void helper_flds_ST0(uint32_t val)
4133{
4134 int new_fpstt;
4135 union {
4136 float32 f;
4137 uint32_t i;
4138 } u;
4139 new_fpstt = (env->fpstt - 1) & 7;
4140 u.i = val;
4141 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4142 env->fpstt = new_fpstt;
4143 env->fptags[new_fpstt] = 0; /* validate stack entry */
4144}
4145
4146void helper_fldl_ST0(uint64_t val)
4147{
4148 int new_fpstt;
4149 union {
4150 float64 f;
4151 uint64_t i;
4152 } u;
4153 new_fpstt = (env->fpstt - 1) & 7;
4154 u.i = val;
4155 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4156 env->fpstt = new_fpstt;
4157 env->fptags[new_fpstt] = 0; /* validate stack entry */
4158}
4159
4160void helper_fildl_ST0(int32_t val)
4161{
4162 int new_fpstt;
4163 new_fpstt = (env->fpstt - 1) & 7;
4164 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4165 env->fpstt = new_fpstt;
4166 env->fptags[new_fpstt] = 0; /* validate stack entry */
4167}
4168
4169void helper_fildll_ST0(int64_t val)
4170{
4171 int new_fpstt;
4172 new_fpstt = (env->fpstt - 1) & 7;
4173 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4174 env->fpstt = new_fpstt;
4175 env->fptags[new_fpstt] = 0; /* validate stack entry */
4176}
4177
4178#ifndef VBOX
4179uint32_t helper_fsts_ST0(void)
4180#else
4181RTCCUINTREG helper_fsts_ST0(void)
4182#endif
4183{
4184 union {
4185 float32 f;
4186 uint32_t i;
4187 } u;
4188 u.f = floatx_to_float32(ST0, &env->fp_status);
4189 return u.i;
4190}
4191
4192uint64_t helper_fstl_ST0(void)
4193{
4194 union {
4195 float64 f;
4196 uint64_t i;
4197 } u;
4198 u.f = floatx_to_float64(ST0, &env->fp_status);
4199 return u.i;
4200}
4201#ifndef VBOX
4202int32_t helper_fist_ST0(void)
4203#else
4204RTCCINTREG helper_fist_ST0(void)
4205#endif
4206{
4207 int32_t val;
4208 val = floatx_to_int32(ST0, &env->fp_status);
4209 if (val != (int16_t)val)
4210 val = -32768;
4211 return val;
4212}
4213
4214#ifndef VBOX
4215int32_t helper_fistl_ST0(void)
4216#else
4217RTCCINTREG helper_fistl_ST0(void)
4218#endif
4219{
4220 int32_t val;
4221 val = floatx_to_int32(ST0, &env->fp_status);
4222 return val;
4223}
4224
4225int64_t helper_fistll_ST0(void)
4226{
4227 int64_t val;
4228 val = floatx_to_int64(ST0, &env->fp_status);
4229 return val;
4230}
4231
4232#ifndef VBOX
4233int32_t helper_fistt_ST0(void)
4234#else
4235RTCCINTREG helper_fistt_ST0(void)
4236#endif
4237{
4238 int32_t val;
4239 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4240 if (val != (int16_t)val)
4241 val = -32768;
4242 return val;
4243}
4244
4245#ifndef VBOX
4246int32_t helper_fisttl_ST0(void)
4247#else
4248RTCCINTREG helper_fisttl_ST0(void)
4249#endif
4250{
4251 int32_t val;
4252 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4253 return val;
4254}
4255
4256int64_t helper_fisttll_ST0(void)
4257{
4258 int64_t val;
4259 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4260 return val;
4261}
4262
4263void helper_fldt_ST0(target_ulong ptr)
4264{
4265 int new_fpstt;
4266 new_fpstt = (env->fpstt - 1) & 7;
4267 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4268 env->fpstt = new_fpstt;
4269 env->fptags[new_fpstt] = 0; /* validate stack entry */
4270}
4271
4272void helper_fstt_ST0(target_ulong ptr)
4273{
4274 helper_fstt(ST0, ptr);
4275}
4276
4277void helper_fpush(void)
4278{
4279 fpush();
4280}
4281
4282void helper_fpop(void)
4283{
4284 fpop();
4285}
4286
4287void helper_fdecstp(void)
4288{
4289 env->fpstt = (env->fpstt - 1) & 7;
4290 env->fpus &= (~0x4700);
4291}
4292
4293void helper_fincstp(void)
4294{
4295 env->fpstt = (env->fpstt + 1) & 7;
4296 env->fpus &= (~0x4700);
4297}
4298
4299/* FPU move */
4300
4301void helper_ffree_STN(int st_index)
4302{
4303 env->fptags[(env->fpstt + st_index) & 7] = 1;
4304}
4305
4306void helper_fmov_ST0_FT0(void)
4307{
4308 ST0 = FT0;
4309}
4310
4311void helper_fmov_FT0_STN(int st_index)
4312{
4313 FT0 = ST(st_index);
4314}
4315
4316void helper_fmov_ST0_STN(int st_index)
4317{
4318 ST0 = ST(st_index);
4319}
4320
4321void helper_fmov_STN_ST0(int st_index)
4322{
4323 ST(st_index) = ST0;
4324}
4325
4326void helper_fxchg_ST0_STN(int st_index)
4327{
4328 CPU86_LDouble tmp;
4329 tmp = ST(st_index);
4330 ST(st_index) = ST0;
4331 ST0 = tmp;
4332}
4333
4334/* FPU operations */
4335
4336static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4337
4338void helper_fcom_ST0_FT0(void)
4339{
4340 int ret;
4341
4342 ret = floatx_compare(ST0, FT0, &env->fp_status);
4343 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4344 FORCE_RET();
4345}
4346
4347void helper_fucom_ST0_FT0(void)
4348{
4349 int ret;
4350
4351 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4352 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4353 FORCE_RET();
4354}
4355
4356static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4357
4358void helper_fcomi_ST0_FT0(void)
4359{
4360 int eflags;
4361 int ret;
4362
4363 ret = floatx_compare(ST0, FT0, &env->fp_status);
4364 eflags = cc_table[CC_OP].compute_all();
4365 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4366 CC_SRC = eflags;
4367 FORCE_RET();
4368}
4369
4370void helper_fucomi_ST0_FT0(void)
4371{
4372 int eflags;
4373 int ret;
4374
4375 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4376 eflags = cc_table[CC_OP].compute_all();
4377 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4378 CC_SRC = eflags;
4379 FORCE_RET();
4380}
4381
4382void helper_fadd_ST0_FT0(void)
4383{
4384 ST0 += FT0;
4385}
4386
4387void helper_fmul_ST0_FT0(void)
4388{
4389 ST0 *= FT0;
4390}
4391
4392void helper_fsub_ST0_FT0(void)
4393{
4394 ST0 -= FT0;
4395}
4396
4397void helper_fsubr_ST0_FT0(void)
4398{
4399 ST0 = FT0 - ST0;
4400}
4401
4402void helper_fdiv_ST0_FT0(void)
4403{
4404 ST0 = helper_fdiv(ST0, FT0);
4405}
4406
4407void helper_fdivr_ST0_FT0(void)
4408{
4409 ST0 = helper_fdiv(FT0, ST0);
4410}
4411
4412/* fp operations between STN and ST0 */
4413
4414void helper_fadd_STN_ST0(int st_index)
4415{
4416 ST(st_index) += ST0;
4417}
4418
4419void helper_fmul_STN_ST0(int st_index)
4420{
4421 ST(st_index) *= ST0;
4422}
4423
4424void helper_fsub_STN_ST0(int st_index)
4425{
4426 ST(st_index) -= ST0;
4427}
4428
4429void helper_fsubr_STN_ST0(int st_index)
4430{
4431 CPU86_LDouble *p;
4432 p = &ST(st_index);
4433 *p = ST0 - *p;
4434}
4435
4436void helper_fdiv_STN_ST0(int st_index)
4437{
4438 CPU86_LDouble *p;
4439 p = &ST(st_index);
4440 *p = helper_fdiv(*p, ST0);
4441}
4442
4443void helper_fdivr_STN_ST0(int st_index)
4444{
4445 CPU86_LDouble *p;
4446 p = &ST(st_index);
4447 *p = helper_fdiv(ST0, *p);
4448}
4449
4450/* misc FPU operations */
4451void helper_fchs_ST0(void)
4452{
4453 ST0 = floatx_chs(ST0);
4454}
4455
4456void helper_fabs_ST0(void)
4457{
4458 ST0 = floatx_abs(ST0);
4459}
4460
4461void helper_fld1_ST0(void)
4462{
4463 ST0 = f15rk[1];
4464}
4465
4466void helper_fldl2t_ST0(void)
4467{
4468 ST0 = f15rk[6];
4469}
4470
4471void helper_fldl2e_ST0(void)
4472{
4473 ST0 = f15rk[5];
4474}
4475
4476void helper_fldpi_ST0(void)
4477{
4478 ST0 = f15rk[2];
4479}
4480
4481void helper_fldlg2_ST0(void)
4482{
4483 ST0 = f15rk[3];
4484}
4485
4486void helper_fldln2_ST0(void)
4487{
4488 ST0 = f15rk[4];
4489}
4490
4491void helper_fldz_ST0(void)
4492{
4493 ST0 = f15rk[0];
4494}
4495
4496void helper_fldz_FT0(void)
4497{
4498 FT0 = f15rk[0];
4499}
4500
4501#ifndef VBOX
4502uint32_t helper_fnstsw(void)
4503#else
4504RTCCUINTREG helper_fnstsw(void)
4505#endif
4506{
4507 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4508}
4509
4510#ifndef VBOX
4511uint32_t helper_fnstcw(void)
4512#else
4513RTCCUINTREG helper_fnstcw(void)
4514#endif
4515{
4516 return env->fpuc;
4517}
4518
4519static void update_fp_status(void)
4520{
4521 int rnd_type;
4522
4523 /* set rounding mode */
4524 switch(env->fpuc & RC_MASK) {
4525 default:
4526 case RC_NEAR:
4527 rnd_type = float_round_nearest_even;
4528 break;
4529 case RC_DOWN:
4530 rnd_type = float_round_down;
4531 break;
4532 case RC_UP:
4533 rnd_type = float_round_up;
4534 break;
4535 case RC_CHOP:
4536 rnd_type = float_round_to_zero;
4537 break;
4538 }
4539 set_float_rounding_mode(rnd_type, &env->fp_status);
4540#ifdef FLOATX80
4541 switch((env->fpuc >> 8) & 3) {
4542 case 0:
4543 rnd_type = 32;
4544 break;
4545 case 2:
4546 rnd_type = 64;
4547 break;
4548 case 3:
4549 default:
4550 rnd_type = 80;
4551 break;
4552 }
4553 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4554#endif
4555}
4556
4557void helper_fldcw(uint32_t val)
4558{
4559 env->fpuc = val;
4560 update_fp_status();
4561}
4562
4563void helper_fclex(void)
4564{
4565 env->fpus &= 0x7f00;
4566}
4567
4568void helper_fwait(void)
4569{
4570 if (env->fpus & FPUS_SE)
4571 fpu_raise_exception();
4572 FORCE_RET();
4573}
4574
4575void helper_fninit(void)
4576{
4577 env->fpus = 0;
4578 env->fpstt = 0;
4579 env->fpuc = 0x37f;
4580 env->fptags[0] = 1;
4581 env->fptags[1] = 1;
4582 env->fptags[2] = 1;
4583 env->fptags[3] = 1;
4584 env->fptags[4] = 1;
4585 env->fptags[5] = 1;
4586 env->fptags[6] = 1;
4587 env->fptags[7] = 1;
4588}
4589
4590/* BCD ops */
4591
4592void helper_fbld_ST0(target_ulong ptr)
4593{
4594 CPU86_LDouble tmp;
4595 uint64_t val;
4596 unsigned int v;
4597 int i;
4598
4599 val = 0;
4600 for(i = 8; i >= 0; i--) {
4601 v = ldub(ptr + i);
4602 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4603 }
4604 tmp = val;
4605 if (ldub(ptr + 9) & 0x80)
4606 tmp = -tmp;
4607 fpush();
4608 ST0 = tmp;
4609}
4610
4611void helper_fbst_ST0(target_ulong ptr)
4612{
4613 int v;
4614 target_ulong mem_ref, mem_end;
4615 int64_t val;
4616
4617 val = floatx_to_int64(ST0, &env->fp_status);
4618 mem_ref = ptr;
4619 mem_end = mem_ref + 9;
4620 if (val < 0) {
4621 stb(mem_end, 0x80);
4622 val = -val;
4623 } else {
4624 stb(mem_end, 0x00);
4625 }
4626 while (mem_ref < mem_end) {
4627 if (val == 0)
4628 break;
4629 v = val % 100;
4630 val = val / 100;
4631 v = ((v / 10) << 4) | (v % 10);
4632 stb(mem_ref++, v);
4633 }
4634 while (mem_ref < mem_end) {
4635 stb(mem_ref++, 0);
4636 }
4637}
4638
4639void helper_f2xm1(void)
4640{
4641 ST0 = pow(2.0,ST0) - 1.0;
4642}
4643
4644void helper_fyl2x(void)
4645{
4646 CPU86_LDouble fptemp;
4647
4648 fptemp = ST0;
4649 if (fptemp>0.0){
4650 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4651 ST1 *= fptemp;
4652 fpop();
4653 } else {
4654 env->fpus &= (~0x4700);
4655 env->fpus |= 0x400;
4656 }
4657}
4658
4659void helper_fptan(void)
4660{
4661 CPU86_LDouble fptemp;
4662
4663 fptemp = ST0;
4664 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4665 env->fpus |= 0x400;
4666 } else {
4667 ST0 = tan(fptemp);
4668 fpush();
4669 ST0 = 1.0;
4670 env->fpus &= (~0x400); /* C2 <-- 0 */
4671 /* the above code is for |arg| < 2**52 only */
4672 }
4673}
4674
4675void helper_fpatan(void)
4676{
4677 CPU86_LDouble fptemp, fpsrcop;
4678
4679 fpsrcop = ST1;
4680 fptemp = ST0;
4681 ST1 = atan2(fpsrcop,fptemp);
4682 fpop();
4683}
4684
4685void helper_fxtract(void)
4686{
4687 CPU86_LDoubleU temp;
4688 unsigned int expdif;
4689
4690 temp.d = ST0;
4691 expdif = EXPD(temp) - EXPBIAS;
4692 /*DP exponent bias*/
4693 ST0 = expdif;
4694 fpush();
4695 BIASEXPONENT(temp);
4696 ST0 = temp.d;
4697}
4698
4699#ifdef VBOX
4700#ifdef _MSC_VER
4701/* MSC cannot divide by zero */
4702extern double _Nan;
4703#define NaN _Nan
4704#else
4705#define NaN (0.0 / 0.0)
4706#endif
4707#endif /* VBOX */
4708
4709void helper_fprem1(void)
4710{
4711 CPU86_LDouble dblq, fpsrcop, fptemp;
4712 CPU86_LDoubleU fpsrcop1, fptemp1;
4713 int expdif;
4714 signed long long int q;
4715
4716#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4717 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4718#else
4719 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4720#endif
4721 ST0 = 0.0 / 0.0; /* NaN */
4722 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4723 return;
4724 }
4725
4726 fpsrcop = ST0;
4727 fptemp = ST1;
4728 fpsrcop1.d = fpsrcop;
4729 fptemp1.d = fptemp;
4730 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4731
4732 if (expdif < 0) {
4733 /* optimisation? taken from the AMD docs */
4734 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4735 /* ST0 is unchanged */
4736 return;
4737 }
4738
4739 if (expdif < 53) {
4740 dblq = fpsrcop / fptemp;
4741 /* round dblq towards nearest integer */
4742 dblq = rint(dblq);
4743 ST0 = fpsrcop - fptemp * dblq;
4744
4745 /* convert dblq to q by truncating towards zero */
4746 if (dblq < 0.0)
4747 q = (signed long long int)(-dblq);
4748 else
4749 q = (signed long long int)dblq;
4750
4751 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4752 /* (C0,C3,C1) <-- (q2,q1,q0) */
4753 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4754 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4755 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4756 } else {
4757 env->fpus |= 0x400; /* C2 <-- 1 */
4758 fptemp = pow(2.0, expdif - 50);
4759 fpsrcop = (ST0 / ST1) / fptemp;
4760 /* fpsrcop = integer obtained by chopping */
4761 fpsrcop = (fpsrcop < 0.0) ?
4762 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4763 ST0 -= (ST1 * fpsrcop * fptemp);
4764 }
4765}
4766
4767void helper_fprem(void)
4768{
4769 CPU86_LDouble dblq, fpsrcop, fptemp;
4770 CPU86_LDoubleU fpsrcop1, fptemp1;
4771 int expdif;
4772 signed long long int q;
4773
4774#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4775 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4776#else
4777 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4778#endif
4779 ST0 = 0.0 / 0.0; /* NaN */
4780 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4781 return;
4782 }
4783
4784 fpsrcop = (CPU86_LDouble)ST0;
4785 fptemp = (CPU86_LDouble)ST1;
4786 fpsrcop1.d = fpsrcop;
4787 fptemp1.d = fptemp;
4788 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4789
4790 if (expdif < 0) {
4791 /* optimisation? taken from the AMD docs */
4792 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4793 /* ST0 is unchanged */
4794 return;
4795 }
4796
4797 if ( expdif < 53 ) {
4798 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4799 /* round dblq towards zero */
4800 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4801 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4802
4803 /* convert dblq to q by truncating towards zero */
4804 if (dblq < 0.0)
4805 q = (signed long long int)(-dblq);
4806 else
4807 q = (signed long long int)dblq;
4808
4809 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4810 /* (C0,C3,C1) <-- (q2,q1,q0) */
4811 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4812 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4813 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4814 } else {
4815 int N = 32 + (expdif % 32); /* as per AMD docs */
4816 env->fpus |= 0x400; /* C2 <-- 1 */
4817 fptemp = pow(2.0, (double)(expdif - N));
4818 fpsrcop = (ST0 / ST1) / fptemp;
4819 /* fpsrcop = integer obtained by chopping */
4820 fpsrcop = (fpsrcop < 0.0) ?
4821 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4822 ST0 -= (ST1 * fpsrcop * fptemp);
4823 }
4824}
4825
4826void helper_fyl2xp1(void)
4827{
4828 CPU86_LDouble fptemp;
4829
4830 fptemp = ST0;
4831 if ((fptemp+1.0)>0.0) {
4832 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4833 ST1 *= fptemp;
4834 fpop();
4835 } else {
4836 env->fpus &= (~0x4700);
4837 env->fpus |= 0x400;
4838 }
4839}
4840
4841void helper_fsqrt(void)
4842{
4843 CPU86_LDouble fptemp;
4844
4845 fptemp = ST0;
4846 if (fptemp<0.0) {
4847 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4848 env->fpus |= 0x400;
4849 }
4850 ST0 = sqrt(fptemp);
4851}
4852
4853void helper_fsincos(void)
4854{
4855 CPU86_LDouble fptemp;
4856
4857 fptemp = ST0;
4858 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4859 env->fpus |= 0x400;
4860 } else {
4861 ST0 = sin(fptemp);
4862 fpush();
4863 ST0 = cos(fptemp);
4864 env->fpus &= (~0x400); /* C2 <-- 0 */
4865 /* the above code is for |arg| < 2**63 only */
4866 }
4867}
4868
4869void helper_frndint(void)
4870{
4871 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4872}
4873
4874void helper_fscale(void)
4875{
4876 ST0 = ldexp (ST0, (int)(ST1));
4877}
4878
4879void helper_fsin(void)
4880{
4881 CPU86_LDouble fptemp;
4882
4883 fptemp = ST0;
4884 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4885 env->fpus |= 0x400;
4886 } else {
4887 ST0 = sin(fptemp);
4888 env->fpus &= (~0x400); /* C2 <-- 0 */
4889 /* the above code is for |arg| < 2**53 only */
4890 }
4891}
4892
4893void helper_fcos(void)
4894{
4895 CPU86_LDouble fptemp;
4896
4897 fptemp = ST0;
4898 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4899 env->fpus |= 0x400;
4900 } else {
4901 ST0 = cos(fptemp);
4902 env->fpus &= (~0x400); /* C2 <-- 0 */
4903 /* the above code is for |arg5 < 2**63 only */
4904 }
4905}
4906
4907void helper_fxam_ST0(void)
4908{
4909 CPU86_LDoubleU temp;
4910 int expdif;
4911
4912 temp.d = ST0;
4913
4914 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4915 if (SIGND(temp))
4916 env->fpus |= 0x200; /* C1 <-- 1 */
4917
4918 /* XXX: test fptags too */
4919 expdif = EXPD(temp);
4920 if (expdif == MAXEXPD) {
4921#ifdef USE_X86LDOUBLE
4922 if (MANTD(temp) == 0x8000000000000000ULL)
4923#else
4924 if (MANTD(temp) == 0)
4925#endif
4926 env->fpus |= 0x500 /*Infinity*/;
4927 else
4928 env->fpus |= 0x100 /*NaN*/;
4929 } else if (expdif == 0) {
4930 if (MANTD(temp) == 0)
4931 env->fpus |= 0x4000 /*Zero*/;
4932 else
4933 env->fpus |= 0x4400 /*Denormal*/;
4934 } else {
4935 env->fpus |= 0x400;
4936 }
4937}
4938
4939void helper_fstenv(target_ulong ptr, int data32)
4940{
4941 int fpus, fptag, exp, i;
4942 uint64_t mant;
4943 CPU86_LDoubleU tmp;
4944
4945 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4946 fptag = 0;
4947 for (i=7; i>=0; i--) {
4948 fptag <<= 2;
4949 if (env->fptags[i]) {
4950 fptag |= 3;
4951 } else {
4952 tmp.d = env->fpregs[i].d;
4953 exp = EXPD(tmp);
4954 mant = MANTD(tmp);
4955 if (exp == 0 && mant == 0) {
4956 /* zero */
4957 fptag |= 1;
4958 } else if (exp == 0 || exp == MAXEXPD
4959#ifdef USE_X86LDOUBLE
4960 || (mant & (1LL << 63)) == 0
4961#endif
4962 ) {
4963 /* NaNs, infinity, denormal */
4964 fptag |= 2;
4965 }
4966 }
4967 }
4968 if (data32) {
4969 /* 32 bit */
4970 stl(ptr, env->fpuc);
4971 stl(ptr + 4, fpus);
4972 stl(ptr + 8, fptag);
4973 stl(ptr + 12, 0); /* fpip */
4974 stl(ptr + 16, 0); /* fpcs */
4975 stl(ptr + 20, 0); /* fpoo */
4976 stl(ptr + 24, 0); /* fpos */
4977 } else {
4978 /* 16 bit */
4979 stw(ptr, env->fpuc);
4980 stw(ptr + 2, fpus);
4981 stw(ptr + 4, fptag);
4982 stw(ptr + 6, 0);
4983 stw(ptr + 8, 0);
4984 stw(ptr + 10, 0);
4985 stw(ptr + 12, 0);
4986 }
4987}
4988
4989void helper_fldenv(target_ulong ptr, int data32)
4990{
4991 int i, fpus, fptag;
4992
4993 if (data32) {
4994 env->fpuc = lduw(ptr);
4995 fpus = lduw(ptr + 4);
4996 fptag = lduw(ptr + 8);
4997 }
4998 else {
4999 env->fpuc = lduw(ptr);
5000 fpus = lduw(ptr + 2);
5001 fptag = lduw(ptr + 4);
5002 }
5003 env->fpstt = (fpus >> 11) & 7;
5004 env->fpus = fpus & ~0x3800;
5005 for(i = 0;i < 8; i++) {
5006 env->fptags[i] = ((fptag & 3) == 3);
5007 fptag >>= 2;
5008 }
5009}
5010
5011void helper_fsave(target_ulong ptr, int data32)
5012{
5013 CPU86_LDouble tmp;
5014 int i;
5015
5016 helper_fstenv(ptr, data32);
5017
5018 ptr += (14 << data32);
5019 for(i = 0;i < 8; i++) {
5020 tmp = ST(i);
5021 helper_fstt(tmp, ptr);
5022 ptr += 10;
5023 }
5024
5025 /* fninit */
5026 env->fpus = 0;
5027 env->fpstt = 0;
5028 env->fpuc = 0x37f;
5029 env->fptags[0] = 1;
5030 env->fptags[1] = 1;
5031 env->fptags[2] = 1;
5032 env->fptags[3] = 1;
5033 env->fptags[4] = 1;
5034 env->fptags[5] = 1;
5035 env->fptags[6] = 1;
5036 env->fptags[7] = 1;
5037}
5038
5039void helper_frstor(target_ulong ptr, int data32)
5040{
5041 CPU86_LDouble tmp;
5042 int i;
5043
5044 helper_fldenv(ptr, data32);
5045 ptr += (14 << data32);
5046
5047 for(i = 0;i < 8; i++) {
5048 tmp = helper_fldt(ptr);
5049 ST(i) = tmp;
5050 ptr += 10;
5051 }
5052}
5053
5054void helper_fxsave(target_ulong ptr, int data64)
5055{
5056 int fpus, fptag, i, nb_xmm_regs;
5057 CPU86_LDouble tmp;
5058 target_ulong addr;
5059
5060 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5061 fptag = 0;
5062 for(i = 0; i < 8; i++) {
5063 fptag |= (env->fptags[i] << i);
5064 }
5065 stw(ptr, env->fpuc);
5066 stw(ptr + 2, fpus);
5067 stw(ptr + 4, fptag ^ 0xff);
5068#ifdef TARGET_X86_64
5069 if (data64) {
5070 stq(ptr + 0x08, 0); /* rip */
5071 stq(ptr + 0x10, 0); /* rdp */
5072 } else
5073#endif
5074 {
5075 stl(ptr + 0x08, 0); /* eip */
5076 stl(ptr + 0x0c, 0); /* sel */
5077 stl(ptr + 0x10, 0); /* dp */
5078 stl(ptr + 0x14, 0); /* sel */
5079 }
5080
5081 addr = ptr + 0x20;
5082 for(i = 0;i < 8; i++) {
5083 tmp = ST(i);
5084 helper_fstt(tmp, addr);
5085 addr += 16;
5086 }
5087
5088 if (env->cr[4] & CR4_OSFXSR_MASK) {
5089 /* XXX: finish it */
5090 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5091 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5092 if (env->hflags & HF_CS64_MASK)
5093 nb_xmm_regs = 16;
5094 else
5095 nb_xmm_regs = 8;
5096 addr = ptr + 0xa0;
5097 for(i = 0; i < nb_xmm_regs; i++) {
5098 stq(addr, env->xmm_regs[i].XMM_Q(0));
5099 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5100 addr += 16;
5101 }
5102 }
5103}
5104
5105void helper_fxrstor(target_ulong ptr, int data64)
5106{
5107 int i, fpus, fptag, nb_xmm_regs;
5108 CPU86_LDouble tmp;
5109 target_ulong addr;
5110
5111 env->fpuc = lduw(ptr);
5112 fpus = lduw(ptr + 2);
5113 fptag = lduw(ptr + 4);
5114 env->fpstt = (fpus >> 11) & 7;
5115 env->fpus = fpus & ~0x3800;
5116 fptag ^= 0xff;
5117 for(i = 0;i < 8; i++) {
5118 env->fptags[i] = ((fptag >> i) & 1);
5119 }
5120
5121 addr = ptr + 0x20;
5122 for(i = 0;i < 8; i++) {
5123 tmp = helper_fldt(addr);
5124 ST(i) = tmp;
5125 addr += 16;
5126 }
5127
5128 if (env->cr[4] & CR4_OSFXSR_MASK) {
5129 /* XXX: finish it */
5130 env->mxcsr = ldl(ptr + 0x18);
5131 //ldl(ptr + 0x1c);
5132 if (env->hflags & HF_CS64_MASK)
5133 nb_xmm_regs = 16;
5134 else
5135 nb_xmm_regs = 8;
5136 addr = ptr + 0xa0;
5137 for(i = 0; i < nb_xmm_regs; i++) {
5138#if !defined(VBOX) || __GNUC__ < 4
5139 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5140 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5141#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5142# if 1
5143 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5144 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5145 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5146 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5147# else
5148 /* this works fine on Mac OS X, gcc 4.0.1 */
5149 uint64_t u64 = ldq(addr);
5150 env->xmm_regs[i].XMM_Q(0);
5151 u64 = ldq(addr + 4);
5152 env->xmm_regs[i].XMM_Q(1) = u64;
5153# endif
5154#endif
5155 addr += 16;
5156 }
5157 }
5158}
5159
5160#ifndef USE_X86LDOUBLE
5161
5162void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5163{
5164 CPU86_LDoubleU temp;
5165 int e;
5166
5167 temp.d = f;
5168 /* mantissa */
5169 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5170 /* exponent + sign */
5171 e = EXPD(temp) - EXPBIAS + 16383;
5172 e |= SIGND(temp) >> 16;
5173 *pexp = e;
5174}
5175
5176CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5177{
5178 CPU86_LDoubleU temp;
5179 int e;
5180 uint64_t ll;
5181
5182 /* XXX: handle overflow ? */
5183 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5184 e |= (upper >> 4) & 0x800; /* sign */
5185 ll = (mant >> 11) & ((1LL << 52) - 1);
5186#ifdef __arm__
5187 temp.l.upper = (e << 20) | (ll >> 32);
5188 temp.l.lower = ll;
5189#else
5190 temp.ll = ll | ((uint64_t)e << 52);
5191#endif
5192 return temp.d;
5193}
5194
5195#else
5196
5197void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5198{
5199 CPU86_LDoubleU temp;
5200
5201 temp.d = f;
5202 *pmant = temp.l.lower;
5203 *pexp = temp.l.upper;
5204}
5205
5206CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5207{
5208 CPU86_LDoubleU temp;
5209
5210 temp.l.upper = upper;
5211 temp.l.lower = mant;
5212 return temp.d;
5213}
5214#endif
5215
5216#ifdef TARGET_X86_64
5217
5218//#define DEBUG_MULDIV
5219
5220static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5221{
5222 *plow += a;
5223 /* carry test */
5224 if (*plow < a)
5225 (*phigh)++;
5226 *phigh += b;
5227}
5228
5229static void neg128(uint64_t *plow, uint64_t *phigh)
5230{
5231 *plow = ~ *plow;
5232 *phigh = ~ *phigh;
5233 add128(plow, phigh, 1, 0);
5234}
5235
5236/* return TRUE if overflow */
5237static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5238{
5239 uint64_t q, r, a1, a0;
5240 int i, qb, ab;
5241
5242 a0 = *plow;
5243 a1 = *phigh;
5244 if (a1 == 0) {
5245 q = a0 / b;
5246 r = a0 % b;
5247 *plow = q;
5248 *phigh = r;
5249 } else {
5250 if (a1 >= b)
5251 return 1;
5252 /* XXX: use a better algorithm */
5253 for(i = 0; i < 64; i++) {
5254 ab = a1 >> 63;
5255 a1 = (a1 << 1) | (a0 >> 63);
5256 if (ab || a1 >= b) {
5257 a1 -= b;
5258 qb = 1;
5259 } else {
5260 qb = 0;
5261 }
5262 a0 = (a0 << 1) | qb;
5263 }
5264#if defined(DEBUG_MULDIV)
5265 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5266 *phigh, *plow, b, a0, a1);
5267#endif
5268 *plow = a0;
5269 *phigh = a1;
5270 }
5271 return 0;
5272}
5273
5274/* return TRUE if overflow */
5275static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5276{
5277 int sa, sb;
5278 sa = ((int64_t)*phigh < 0);
5279 if (sa)
5280 neg128(plow, phigh);
5281 sb = (b < 0);
5282 if (sb)
5283 b = -b;
5284 if (div64(plow, phigh, b) != 0)
5285 return 1;
5286 if (sa ^ sb) {
5287 if (*plow > (1ULL << 63))
5288 return 1;
5289 *plow = - *plow;
5290 } else {
5291 if (*plow >= (1ULL << 63))
5292 return 1;
5293 }
5294 if (sa)
5295 *phigh = - *phigh;
5296 return 0;
5297}
5298
5299void helper_mulq_EAX_T0(target_ulong t0)
5300{
5301 uint64_t r0, r1;
5302
5303 mulu64(&r0, &r1, EAX, t0);
5304 EAX = r0;
5305 EDX = r1;
5306 CC_DST = r0;
5307 CC_SRC = r1;
5308}
5309
5310void helper_imulq_EAX_T0(target_ulong t0)
5311{
5312 uint64_t r0, r1;
5313
5314 muls64(&r0, &r1, EAX, t0);
5315 EAX = r0;
5316 EDX = r1;
5317 CC_DST = r0;
5318 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5319}
5320
5321target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5322{
5323 uint64_t r0, r1;
5324
5325 muls64(&r0, &r1, t0, t1);
5326 CC_DST = r0;
5327 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5328 return r0;
5329}
5330
5331void helper_divq_EAX(target_ulong t0)
5332{
5333 uint64_t r0, r1;
5334 if (t0 == 0) {
5335 raise_exception(EXCP00_DIVZ);
5336 }
5337 r0 = EAX;
5338 r1 = EDX;
5339 if (div64(&r0, &r1, t0))
5340 raise_exception(EXCP00_DIVZ);
5341 EAX = r0;
5342 EDX = r1;
5343}
5344
5345void helper_idivq_EAX(target_ulong t0)
5346{
5347 uint64_t r0, r1;
5348 if (t0 == 0) {
5349 raise_exception(EXCP00_DIVZ);
5350 }
5351 r0 = EAX;
5352 r1 = EDX;
5353 if (idiv64(&r0, &r1, t0))
5354 raise_exception(EXCP00_DIVZ);
5355 EAX = r0;
5356 EDX = r1;
5357}
5358#endif
5359
5360static void do_hlt(void)
5361{
5362 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5363 env->halted = 1;
5364 env->exception_index = EXCP_HLT;
5365 cpu_loop_exit();
5366}
5367
5368void helper_hlt(int next_eip_addend)
5369{
5370 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5371 EIP += next_eip_addend;
5372
5373 do_hlt();
5374}
5375
5376void helper_monitor(target_ulong ptr)
5377{
5378 if ((uint32_t)ECX != 0)
5379 raise_exception(EXCP0D_GPF);
5380 /* XXX: store address ? */
5381 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5382}
5383
5384void helper_mwait(int next_eip_addend)
5385{
5386 if ((uint32_t)ECX != 0)
5387 raise_exception(EXCP0D_GPF);
5388#ifdef VBOX
5389 helper_hlt(next_eip_addend);
5390#else
5391 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5392 EIP += next_eip_addend;
5393
5394 /* XXX: not complete but not completely erroneous */
5395 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5396 /* more than one CPU: do not sleep because another CPU may
5397 wake this one */
5398 } else {
5399 do_hlt();
5400 }
5401#endif
5402}
5403
5404void helper_debug(void)
5405{
5406 env->exception_index = EXCP_DEBUG;
5407 cpu_loop_exit();
5408}
5409
5410void helper_raise_interrupt(int intno, int next_eip_addend)
5411{
5412 raise_interrupt(intno, 1, 0, next_eip_addend);
5413}
5414
5415void helper_raise_exception(int exception_index)
5416{
5417 raise_exception(exception_index);
5418}
5419
5420void helper_cli(void)
5421{
5422 env->eflags &= ~IF_MASK;
5423}
5424
5425void helper_sti(void)
5426{
5427 env->eflags |= IF_MASK;
5428}
5429
5430#ifdef VBOX
5431void helper_cli_vme(void)
5432{
5433 env->eflags &= ~VIF_MASK;
5434}
5435
5436void helper_sti_vme(void)
5437{
5438 /* First check, then change eflags according to the AMD manual */
5439 if (env->eflags & VIP_MASK) {
5440 raise_exception(EXCP0D_GPF);
5441 }
5442 env->eflags |= VIF_MASK;
5443}
5444#endif
5445
5446#if 0
5447/* vm86plus instructions */
5448void helper_cli_vm(void)
5449{
5450 env->eflags &= ~VIF_MASK;
5451}
5452
5453void helper_sti_vm(void)
5454{
5455 env->eflags |= VIF_MASK;
5456 if (env->eflags & VIP_MASK) {
5457 raise_exception(EXCP0D_GPF);
5458 }
5459}
5460#endif
5461
5462void helper_set_inhibit_irq(void)
5463{
5464 env->hflags |= HF_INHIBIT_IRQ_MASK;
5465}
5466
5467void helper_reset_inhibit_irq(void)
5468{
5469 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5470}
5471
5472void helper_boundw(target_ulong a0, int v)
5473{
5474 int low, high;
5475 low = ldsw(a0);
5476 high = ldsw(a0 + 2);
5477 v = (int16_t)v;
5478 if (v < low || v > high) {
5479 raise_exception(EXCP05_BOUND);
5480 }
5481 FORCE_RET();
5482}
5483
5484void helper_boundl(target_ulong a0, int v)
5485{
5486 int low, high;
5487 low = ldl(a0);
5488 high = ldl(a0 + 4);
5489 if (v < low || v > high) {
5490 raise_exception(EXCP05_BOUND);
5491 }
5492 FORCE_RET();
5493}
5494
5495static float approx_rsqrt(float a)
5496{
5497 return 1.0 / sqrt(a);
5498}
5499
5500static float approx_rcp(float a)
5501{
5502 return 1.0 / a;
5503}
5504
5505#if !defined(CONFIG_USER_ONLY)
5506
5507#define MMUSUFFIX _mmu
5508
5509#define SHIFT 0
5510#include "softmmu_template.h"
5511
5512#define SHIFT 1
5513#include "softmmu_template.h"
5514
5515#define SHIFT 2
5516#include "softmmu_template.h"
5517
5518#define SHIFT 3
5519#include "softmmu_template.h"
5520
5521#endif
5522
5523#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5524/* This code assumes real physical address always fit into host CPU reg,
5525 which is wrong in general, but true for our current use cases. */
5526RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5527{
5528 return remR3PhysReadS8(addr);
5529}
5530RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5531{
5532 return remR3PhysReadU8(addr);
5533}
5534void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5535{
5536 remR3PhysWriteU8(addr, val);
5537}
5538RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5539{
5540 return remR3PhysReadS16(addr);
5541}
5542RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5543{
5544 return remR3PhysReadU16(addr);
5545}
5546void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5547{
5548 remR3PhysWriteU16(addr, val);
5549}
5550RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5551{
5552 return remR3PhysReadS32(addr);
5553}
5554RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5555{
5556 return remR3PhysReadU32(addr);
5557}
5558void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5559{
5560 remR3PhysWriteU32(addr, val);
5561}
5562uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5563{
5564 return remR3PhysReadU64(addr);
5565}
5566void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5567{
5568 remR3PhysWriteU64(addr, val);
5569}
5570#endif
5571
5572/* try to fill the TLB and return an exception if error. If retaddr is
5573 NULL, it means that the function was called in C code (i.e. not
5574 from generated code or from helper.c) */
5575/* XXX: fix it to restore all registers */
5576void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5577{
5578 TranslationBlock *tb;
5579 int ret;
5580 unsigned long pc;
5581 CPUX86State *saved_env;
5582
5583 /* XXX: hack to restore env in all cases, even if not called from
5584 generated code */
5585 saved_env = env;
5586 env = cpu_single_env;
5587
5588 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5589 if (ret) {
5590 if (retaddr) {
5591 /* now we have a real cpu fault */
5592 pc = (unsigned long)retaddr;
5593 tb = tb_find_pc(pc);
5594 if (tb) {
5595 /* the PC is inside the translated code. It means that we have
5596 a virtual CPU fault */
5597 cpu_restore_state(tb, env, pc, NULL);
5598 }
5599 }
5600 raise_exception_err(env->exception_index, env->error_code);
5601 }
5602 env = saved_env;
5603}
5604
5605#ifdef VBOX
5606
5607/**
5608 * Correctly computes the eflags.
5609 * @returns eflags.
5610 * @param env1 CPU environment.
5611 */
5612uint32_t raw_compute_eflags(CPUX86State *env1)
5613{
5614 CPUX86State *savedenv = env;
5615 uint32_t efl;
5616 env = env1;
5617 efl = compute_eflags();
5618 env = savedenv;
5619 return efl;
5620}
5621
5622/**
5623 * Reads byte from virtual address in guest memory area.
5624 * XXX: is it working for any addresses? swapped out pages?
5625 * @returns readed data byte.
5626 * @param env1 CPU environment.
5627 * @param pvAddr GC Virtual address.
5628 */
5629uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5630{
5631 CPUX86State *savedenv = env;
5632 uint8_t u8;
5633 env = env1;
5634 u8 = ldub_kernel(addr);
5635 env = savedenv;
5636 return u8;
5637}
5638
5639/**
5640 * Reads byte from virtual address in guest memory area.
5641 * XXX: is it working for any addresses? swapped out pages?
5642 * @returns readed data byte.
5643 * @param env1 CPU environment.
5644 * @param pvAddr GC Virtual address.
5645 */
5646uint16_t read_word(CPUX86State *env1, target_ulong addr)
5647{
5648 CPUX86State *savedenv = env;
5649 uint16_t u16;
5650 env = env1;
5651 u16 = lduw_kernel(addr);
5652 env = savedenv;
5653 return u16;
5654}
5655
5656/**
5657 * Reads byte from virtual address in guest memory area.
5658 * XXX: is it working for any addresses? swapped out pages?
5659 * @returns readed data byte.
5660 * @param env1 CPU environment.
5661 * @param pvAddr GC Virtual address.
5662 */
5663uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5664{
5665 CPUX86State *savedenv = env;
5666 uint32_t u32;
5667 env = env1;
5668 u32 = ldl_kernel(addr);
5669 env = savedenv;
5670 return u32;
5671}
5672
5673/**
5674 * Writes byte to virtual address in guest memory area.
5675 * XXX: is it working for any addresses? swapped out pages?
5676 * @returns readed data byte.
5677 * @param env1 CPU environment.
5678 * @param pvAddr GC Virtual address.
5679 * @param val byte value
5680 */
5681void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5682{
5683 CPUX86State *savedenv = env;
5684 env = env1;
5685 stb(addr, val);
5686 env = savedenv;
5687}
5688
5689void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5690{
5691 CPUX86State *savedenv = env;
5692 env = env1;
5693 stw(addr, val);
5694 env = savedenv;
5695}
5696
5697void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5698{
5699 CPUX86State *savedenv = env;
5700 env = env1;
5701 stl(addr, val);
5702 env = savedenv;
5703}
5704
5705/**
5706 * Correctly loads selector into segment register with updating internal
5707 * qemu data/caches.
5708 * @param env1 CPU environment.
5709 * @param seg_reg Segment register.
5710 * @param selector Selector to load.
5711 */
5712void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5713{
5714 CPUX86State *savedenv = env;
5715#ifdef FORCE_SEGMENT_SYNC
5716 jmp_buf old_buf;
5717#endif
5718
5719 env = env1;
5720
5721 if ( env->eflags & X86_EFL_VM
5722 || !(env->cr[0] & X86_CR0_PE))
5723 {
5724 load_seg_vm(seg_reg, selector);
5725
5726 env = savedenv;
5727
5728 /* Successful sync. */
5729 env1->segs[seg_reg].newselector = 0;
5730 }
5731 else
5732 {
5733 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5734 time critical - let's not do that */
5735#ifdef FORCE_SEGMENT_SYNC
5736 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5737#endif
5738 if (setjmp(env1->jmp_env) == 0)
5739 {
5740 if (seg_reg == R_CS)
5741 {
5742 uint32_t e1, e2;
5743 e1 = e2 = 0;
5744 load_segment(&e1, &e2, selector);
5745 cpu_x86_load_seg_cache(env, R_CS, selector,
5746 get_seg_base(e1, e2),
5747 get_seg_limit(e1, e2),
5748 e2);
5749 }
5750 else
5751 helper_load_seg(seg_reg, selector);
5752 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5753 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5754
5755 env = savedenv;
5756
5757 /* Successful sync. */
5758 env1->segs[seg_reg].newselector = 0;
5759 }
5760 else
5761 {
5762 env = savedenv;
5763
5764 /* Postpone sync until the guest uses the selector. */
5765 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5766 env1->segs[seg_reg].newselector = selector;
5767 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5768 env1->exception_index = -1;
5769 env1->error_code = 0;
5770 env1->old_exception = -1;
5771 }
5772#ifdef FORCE_SEGMENT_SYNC
5773 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5774#endif
5775 }
5776
5777}
5778
5779DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5780{
5781 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5782}
5783
5784
5785int emulate_single_instr(CPUX86State *env1)
5786{
5787 TranslationBlock *tb;
5788 TranslationBlock *current;
5789 int flags;
5790 uint8_t *tc_ptr;
5791 target_ulong old_eip;
5792
5793 /* ensures env is loaded! */
5794 CPUX86State *savedenv = env;
5795 env = env1;
5796
5797 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5798
5799 current = env->current_tb;
5800 env->current_tb = NULL;
5801 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5802
5803 /*
5804 * Translate only one instruction.
5805 */
5806 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5807 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5808 env->segs[R_CS].base, flags, 0);
5809
5810 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5811
5812
5813 /* tb_link_phys: */
5814 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5815 tb->jmp_next[0] = NULL;
5816 tb->jmp_next[1] = NULL;
5817 Assert(tb->jmp_next[0] == NULL);
5818 Assert(tb->jmp_next[1] == NULL);
5819 if (tb->tb_next_offset[0] != 0xffff)
5820 tb_reset_jump(tb, 0);
5821 if (tb->tb_next_offset[1] != 0xffff)
5822 tb_reset_jump(tb, 1);
5823
5824 /*
5825 * Execute it using emulation
5826 */
5827 old_eip = env->eip;
5828 env->current_tb = tb;
5829
5830 /*
5831 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5832 * perhaps not a very safe hack
5833 */
5834 while(old_eip == env->eip)
5835 {
5836 tc_ptr = tb->tc_ptr;
5837
5838#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5839 int fake_ret;
5840 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5841#else
5842 tcg_qemu_tb_exec(tc_ptr);
5843#endif
5844 /*
5845 * Exit once we detect an external interrupt and interrupts are enabled
5846 */
5847 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5848 ( (env->eflags & IF_MASK) &&
5849 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5850 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5851 {
5852 break;
5853 }
5854 }
5855 env->current_tb = current;
5856
5857 tb_phys_invalidate(tb, -1);
5858 tb_free(tb);
5859/*
5860 Assert(tb->tb_next_offset[0] == 0xffff);
5861 Assert(tb->tb_next_offset[1] == 0xffff);
5862 Assert(tb->tb_next[0] == 0xffff);
5863 Assert(tb->tb_next[1] == 0xffff);
5864 Assert(tb->jmp_next[0] == NULL);
5865 Assert(tb->jmp_next[1] == NULL);
5866 Assert(tb->jmp_first == NULL); */
5867
5868 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5869
5870 /*
5871 * Execute the next instruction when we encounter instruction fusing.
5872 */
5873 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5874 {
5875 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5876 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5877 emulate_single_instr(env);
5878 }
5879
5880 env = savedenv;
5881 return 0;
5882}
5883
5884/**
5885 * Correctly loads a new ldtr selector.
5886 *
5887 * @param env1 CPU environment.
5888 * @param selector Selector to load.
5889 */
5890void sync_ldtr(CPUX86State *env1, int selector)
5891{
5892 CPUX86State *saved_env = env;
5893 if (setjmp(env1->jmp_env) == 0)
5894 {
5895 env = env1;
5896 helper_lldt(selector);
5897 env = saved_env;
5898 }
5899 else
5900 {
5901 env = saved_env;
5902#ifdef VBOX_STRICT
5903 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5904#endif
5905 }
5906}
5907
5908int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5909 uint32_t *esp_ptr, int dpl)
5910{
5911 int type, index, shift;
5912
5913 CPUX86State *savedenv = env;
5914 env = env1;
5915
5916 if (!(env->tr.flags & DESC_P_MASK))
5917 cpu_abort(env, "invalid tss");
5918 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5919 if ((type & 7) != 1)
5920 cpu_abort(env, "invalid tss type %d", type);
5921 shift = type >> 3;
5922 index = (dpl * 4 + 2) << shift;
5923 if (index + (4 << shift) - 1 > env->tr.limit)
5924 {
5925 env = savedenv;
5926 return 0;
5927 }
5928 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5929
5930 if (shift == 0) {
5931 *esp_ptr = lduw_kernel(env->tr.base + index);
5932 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5933 } else {
5934 *esp_ptr = ldl_kernel(env->tr.base + index);
5935 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5936 }
5937
5938 env = savedenv;
5939 return 1;
5940}
5941
5942//*****************************************************************************
5943// Needs to be at the bottom of the file (overriding macros)
5944
5945#ifndef VBOX
5946static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5947#else /* VBOX */
5948DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5949#endif /* VBOX */
5950{
5951 return *(CPU86_LDouble *)ptr;
5952}
5953
5954#ifndef VBOX
5955static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5956#else /* VBOX */
5957DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5958#endif /* VBOX */
5959{
5960 *(CPU86_LDouble *)ptr = f;
5961}
5962
5963#undef stw
5964#undef stl
5965#undef stq
5966#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5967#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5968#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5969
5970//*****************************************************************************
5971void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5972{
5973 int fpus, fptag, i, nb_xmm_regs;
5974 CPU86_LDouble tmp;
5975 uint8_t *addr;
5976 int data64 = !!(env->hflags & HF_LMA_MASK);
5977
5978 if (env->cpuid_features & CPUID_FXSR)
5979 {
5980 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5981 fptag = 0;
5982 for(i = 0; i < 8; i++) {
5983 fptag |= (env->fptags[i] << i);
5984 }
5985 stw(ptr, env->fpuc);
5986 stw(ptr + 2, fpus);
5987 stw(ptr + 4, fptag ^ 0xff);
5988
5989 addr = ptr + 0x20;
5990 for(i = 0;i < 8; i++) {
5991 tmp = ST(i);
5992 helper_fstt_raw(tmp, addr);
5993 addr += 16;
5994 }
5995
5996 if (env->cr[4] & CR4_OSFXSR_MASK) {
5997 /* XXX: finish it */
5998 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5999 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6000 nb_xmm_regs = 8 << data64;
6001 addr = ptr + 0xa0;
6002 for(i = 0; i < nb_xmm_regs; i++) {
6003#if __GNUC__ < 4
6004 stq(addr, env->xmm_regs[i].XMM_Q(0));
6005 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6006#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6007 stl(addr, env->xmm_regs[i].XMM_L(0));
6008 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6009 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6010 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6011#endif
6012 addr += 16;
6013 }
6014 }
6015 }
6016 else
6017 {
6018 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6019 int fptag;
6020
6021 fp->FCW = env->fpuc;
6022 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6023 fptag = 0;
6024 for (i=7; i>=0; i--) {
6025 fptag <<= 2;
6026 if (env->fptags[i]) {
6027 fptag |= 3;
6028 } else {
6029 /* the FPU automatically computes it */
6030 }
6031 }
6032 fp->FTW = fptag;
6033
6034 for(i = 0;i < 8; i++) {
6035 tmp = ST(i);
6036 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6037 }
6038 }
6039}
6040
6041//*****************************************************************************
6042#undef lduw
6043#undef ldl
6044#undef ldq
6045#define lduw(a) *(uint16_t *)(a)
6046#define ldl(a) *(uint32_t *)(a)
6047#define ldq(a) *(uint64_t *)(a)
6048//*****************************************************************************
6049void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6050{
6051 int i, fpus, fptag, nb_xmm_regs;
6052 CPU86_LDouble tmp;
6053 uint8_t *addr;
6054 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6055
6056 if (env->cpuid_features & CPUID_FXSR)
6057 {
6058 env->fpuc = lduw(ptr);
6059 fpus = lduw(ptr + 2);
6060 fptag = lduw(ptr + 4);
6061 env->fpstt = (fpus >> 11) & 7;
6062 env->fpus = fpus & ~0x3800;
6063 fptag ^= 0xff;
6064 for(i = 0;i < 8; i++) {
6065 env->fptags[i] = ((fptag >> i) & 1);
6066 }
6067
6068 addr = ptr + 0x20;
6069 for(i = 0;i < 8; i++) {
6070 tmp = helper_fldt_raw(addr);
6071 ST(i) = tmp;
6072 addr += 16;
6073 }
6074
6075 if (env->cr[4] & CR4_OSFXSR_MASK) {
6076 /* XXX: finish it, endianness */
6077 env->mxcsr = ldl(ptr + 0x18);
6078 //ldl(ptr + 0x1c);
6079 nb_xmm_regs = 8 << data64;
6080 addr = ptr + 0xa0;
6081 for(i = 0; i < nb_xmm_regs; i++) {
6082#if HC_ARCH_BITS == 32
6083 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6084 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6085 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6086 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6087 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6088#else
6089 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6090 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6091#endif
6092 addr += 16;
6093 }
6094 }
6095 }
6096 else
6097 {
6098 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6099 int fptag, j;
6100
6101 env->fpuc = fp->FCW;
6102 env->fpstt = (fp->FSW >> 11) & 7;
6103 env->fpus = fp->FSW & ~0x3800;
6104 fptag = fp->FTW;
6105 for(i = 0;i < 8; i++) {
6106 env->fptags[i] = ((fptag & 3) == 3);
6107 fptag >>= 2;
6108 }
6109 j = env->fpstt;
6110 for(i = 0;i < 8; i++) {
6111 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6112 ST(i) = tmp;
6113 }
6114 }
6115}
6116//*****************************************************************************
6117//*****************************************************************************
6118
6119#endif /* VBOX */
6120
6121/* Secure Virtual Machine helpers */
6122
6123#if defined(CONFIG_USER_ONLY)
6124
6125void helper_vmrun(int aflag, int next_eip_addend)
6126{
6127}
6128void helper_vmmcall(void)
6129{
6130}
6131void helper_vmload(int aflag)
6132{
6133}
6134void helper_vmsave(int aflag)
6135{
6136}
6137void helper_stgi(void)
6138{
6139}
6140void helper_clgi(void)
6141{
6142}
6143void helper_skinit(void)
6144{
6145}
6146void helper_invlpga(int aflag)
6147{
6148}
6149void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6150{
6151}
6152void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6153{
6154}
6155
6156void helper_svm_check_io(uint32_t port, uint32_t param,
6157 uint32_t next_eip_addend)
6158{
6159}
6160#else
6161
6162#ifndef VBOX
6163static inline void svm_save_seg(target_phys_addr_t addr,
6164#else /* VBOX */
6165DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6166#endif /* VBOX */
6167 const SegmentCache *sc)
6168{
6169 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6170 sc->selector);
6171 stq_phys(addr + offsetof(struct vmcb_seg, base),
6172 sc->base);
6173 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6174 sc->limit);
6175 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6176 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6177}
6178
6179#ifndef VBOX
6180static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6181#else /* VBOX */
6182DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6183#endif /* VBOX */
6184{
6185 unsigned int flags;
6186
6187 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6188 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6189 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6190 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6191 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6192}
6193
6194#ifndef VBOX
6195static inline void svm_load_seg_cache(target_phys_addr_t addr,
6196#else /* VBOX */
6197DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6198#endif /* VBOX */
6199 CPUState *env, int seg_reg)
6200{
6201 SegmentCache sc1, *sc = &sc1;
6202 svm_load_seg(addr, sc);
6203 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6204 sc->base, sc->limit, sc->flags);
6205}
6206
6207void helper_vmrun(int aflag, int next_eip_addend)
6208{
6209 target_ulong addr;
6210 uint32_t event_inj;
6211 uint32_t int_ctl;
6212
6213 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6214
6215 if (aflag == 2)
6216 addr = EAX;
6217 else
6218 addr = (uint32_t)EAX;
6219
6220 if (loglevel & CPU_LOG_TB_IN_ASM)
6221 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6222
6223 env->vm_vmcb = addr;
6224
6225 /* save the current CPU state in the hsave page */
6226 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6227 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6228
6229 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6230 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6231
6232 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6233 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6234 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6235 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6236 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6237 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6238
6239 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6240 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6241
6242 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6243 &env->segs[R_ES]);
6244 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6245 &env->segs[R_CS]);
6246 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6247 &env->segs[R_SS]);
6248 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6249 &env->segs[R_DS]);
6250
6251 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6252 EIP + next_eip_addend);
6253 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6254 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6255
6256 /* load the interception bitmaps so we do not need to access the
6257 vmcb in svm mode */
6258 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6259 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6260 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6261 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6262 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6263 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6264
6265 /* enable intercepts */
6266 env->hflags |= HF_SVMI_MASK;
6267
6268 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6269
6270 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6271 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6272
6273 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6274 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6275
6276 /* clear exit_info_2 so we behave like the real hardware */
6277 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6278
6279 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6280 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6281 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6282 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6283 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6284 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6285 if (int_ctl & V_INTR_MASKING_MASK) {
6286 env->v_tpr = int_ctl & V_TPR_MASK;
6287 env->hflags2 |= HF2_VINTR_MASK;
6288 if (env->eflags & IF_MASK)
6289 env->hflags2 |= HF2_HIF_MASK;
6290 }
6291
6292 cpu_load_efer(env,
6293 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6294 env->eflags = 0;
6295 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6296 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6297 CC_OP = CC_OP_EFLAGS;
6298
6299 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6300 env, R_ES);
6301 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6302 env, R_CS);
6303 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6304 env, R_SS);
6305 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6306 env, R_DS);
6307
6308 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6309 env->eip = EIP;
6310 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6311 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6312 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6313 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6314 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6315
6316 /* FIXME: guest state consistency checks */
6317
6318 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6319 case TLB_CONTROL_DO_NOTHING:
6320 break;
6321 case TLB_CONTROL_FLUSH_ALL_ASID:
6322 /* FIXME: this is not 100% correct but should work for now */
6323 tlb_flush(env, 1);
6324 break;
6325 }
6326
6327 env->hflags2 |= HF2_GIF_MASK;
6328
6329 if (int_ctl & V_IRQ_MASK) {
6330 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6331 }
6332
6333 /* maybe we need to inject an event */
6334 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6335 if (event_inj & SVM_EVTINJ_VALID) {
6336 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6337 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6338 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6339 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6340
6341 if (loglevel & CPU_LOG_TB_IN_ASM)
6342 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6343 /* FIXME: need to implement valid_err */
6344 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6345 case SVM_EVTINJ_TYPE_INTR:
6346 env->exception_index = vector;
6347 env->error_code = event_inj_err;
6348 env->exception_is_int = 0;
6349 env->exception_next_eip = -1;
6350 if (loglevel & CPU_LOG_TB_IN_ASM)
6351 fprintf(logfile, "INTR");
6352 /* XXX: is it always correct ? */
6353 do_interrupt(vector, 0, 0, 0, 1);
6354 break;
6355 case SVM_EVTINJ_TYPE_NMI:
6356 env->exception_index = EXCP02_NMI;
6357 env->error_code = event_inj_err;
6358 env->exception_is_int = 0;
6359 env->exception_next_eip = EIP;
6360 if (loglevel & CPU_LOG_TB_IN_ASM)
6361 fprintf(logfile, "NMI");
6362 cpu_loop_exit();
6363 break;
6364 case SVM_EVTINJ_TYPE_EXEPT:
6365 env->exception_index = vector;
6366 env->error_code = event_inj_err;
6367 env->exception_is_int = 0;
6368 env->exception_next_eip = -1;
6369 if (loglevel & CPU_LOG_TB_IN_ASM)
6370 fprintf(logfile, "EXEPT");
6371 cpu_loop_exit();
6372 break;
6373 case SVM_EVTINJ_TYPE_SOFT:
6374 env->exception_index = vector;
6375 env->error_code = event_inj_err;
6376 env->exception_is_int = 1;
6377 env->exception_next_eip = EIP;
6378 if (loglevel & CPU_LOG_TB_IN_ASM)
6379 fprintf(logfile, "SOFT");
6380 cpu_loop_exit();
6381 break;
6382 }
6383 if (loglevel & CPU_LOG_TB_IN_ASM)
6384 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6385 }
6386}
6387
6388void helper_vmmcall(void)
6389{
6390 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6391 raise_exception(EXCP06_ILLOP);
6392}
6393
6394void helper_vmload(int aflag)
6395{
6396 target_ulong addr;
6397 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6398
6399 if (aflag == 2)
6400 addr = EAX;
6401 else
6402 addr = (uint32_t)EAX;
6403
6404 if (loglevel & CPU_LOG_TB_IN_ASM)
6405 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6406 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6407 env->segs[R_FS].base);
6408
6409 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6410 env, R_FS);
6411 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6412 env, R_GS);
6413 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6414 &env->tr);
6415 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6416 &env->ldt);
6417
6418#ifdef TARGET_X86_64
6419 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6420 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6421 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6422 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6423#endif
6424 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6425 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6426 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6427 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6428}
6429
6430void helper_vmsave(int aflag)
6431{
6432 target_ulong addr;
6433 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6434
6435 if (aflag == 2)
6436 addr = EAX;
6437 else
6438 addr = (uint32_t)EAX;
6439
6440 if (loglevel & CPU_LOG_TB_IN_ASM)
6441 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6442 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6443 env->segs[R_FS].base);
6444
6445 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6446 &env->segs[R_FS]);
6447 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6448 &env->segs[R_GS]);
6449 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6450 &env->tr);
6451 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6452 &env->ldt);
6453
6454#ifdef TARGET_X86_64
6455 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6456 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6457 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6458 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6459#endif
6460 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6461 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6462 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6463 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6464}
6465
6466void helper_stgi(void)
6467{
6468 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6469 env->hflags2 |= HF2_GIF_MASK;
6470}
6471
6472void helper_clgi(void)
6473{
6474 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6475 env->hflags2 &= ~HF2_GIF_MASK;
6476}
6477
6478void helper_skinit(void)
6479{
6480 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6481 /* XXX: not implemented */
6482 raise_exception(EXCP06_ILLOP);
6483}
6484
6485void helper_invlpga(int aflag)
6486{
6487 target_ulong addr;
6488 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6489
6490 if (aflag == 2)
6491 addr = EAX;
6492 else
6493 addr = (uint32_t)EAX;
6494
6495 /* XXX: could use the ASID to see if it is needed to do the
6496 flush */
6497 tlb_flush_page(env, addr);
6498}
6499
6500void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6501{
6502 if (likely(!(env->hflags & HF_SVMI_MASK)))
6503 return;
6504#ifndef VBOX
6505 switch(type) {
6506#ifndef VBOX
6507 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6508#else
6509 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6510 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6511 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6512#endif
6513 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6514 helper_vmexit(type, param);
6515 }
6516 break;
6517#ifndef VBOX
6518 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6519#else
6520 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6521 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6522 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6523#endif
6524 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6525 helper_vmexit(type, param);
6526 }
6527 break;
6528 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6529 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6530 helper_vmexit(type, param);
6531 }
6532 break;
6533 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6534 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6535 helper_vmexit(type, param);
6536 }
6537 break;
6538 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6539 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6540 helper_vmexit(type, param);
6541 }
6542 break;
6543 case SVM_EXIT_MSR:
6544 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6545 /* FIXME: this should be read in at vmrun (faster this way?) */
6546 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6547 uint32_t t0, t1;
6548 switch((uint32_t)ECX) {
6549 case 0 ... 0x1fff:
6550 t0 = (ECX * 2) % 8;
6551 t1 = ECX / 8;
6552 break;
6553 case 0xc0000000 ... 0xc0001fff:
6554 t0 = (8192 + ECX - 0xc0000000) * 2;
6555 t1 = (t0 / 8);
6556 t0 %= 8;
6557 break;
6558 case 0xc0010000 ... 0xc0011fff:
6559 t0 = (16384 + ECX - 0xc0010000) * 2;
6560 t1 = (t0 / 8);
6561 t0 %= 8;
6562 break;
6563 default:
6564 helper_vmexit(type, param);
6565 t0 = 0;
6566 t1 = 0;
6567 break;
6568 }
6569 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6570 helper_vmexit(type, param);
6571 }
6572 break;
6573 default:
6574 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6575 helper_vmexit(type, param);
6576 }
6577 break;
6578 }
6579#else
6580 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6581#endif
6582}
6583
6584void helper_svm_check_io(uint32_t port, uint32_t param,
6585 uint32_t next_eip_addend)
6586{
6587 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6588 /* FIXME: this should be read in at vmrun (faster this way?) */
6589 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6590 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6591 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6592 /* next EIP */
6593 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6594 env->eip + next_eip_addend);
6595 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6596 }
6597 }
6598}
6599
6600/* Note: currently only 32 bits of exit_code are used */
6601void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6602{
6603 uint32_t int_ctl;
6604
6605 if (loglevel & CPU_LOG_TB_IN_ASM)
6606 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6607 exit_code, exit_info_1,
6608 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6609 EIP);
6610
6611 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6612 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6613 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6614 } else {
6615 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6616 }
6617
6618 /* Save the VM state in the vmcb */
6619 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6620 &env->segs[R_ES]);
6621 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6622 &env->segs[R_CS]);
6623 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6624 &env->segs[R_SS]);
6625 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6626 &env->segs[R_DS]);
6627
6628 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6629 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6630
6631 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6632 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6633
6634 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6635 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6636 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6637 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6638 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6639
6640 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6641 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6642 int_ctl |= env->v_tpr & V_TPR_MASK;
6643 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6644 int_ctl |= V_IRQ_MASK;
6645 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6646
6647 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6648 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6649 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6650 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6651 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6652 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6653 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6654
6655 /* Reload the host state from vm_hsave */
6656 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6657 env->hflags &= ~HF_SVMI_MASK;
6658 env->intercept = 0;
6659 env->intercept_exceptions = 0;
6660 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6661 env->tsc_offset = 0;
6662
6663 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6664 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6665
6666 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6667 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6668
6669 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6670 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6671 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6672 /* we need to set the efer after the crs so the hidden flags get
6673 set properly */
6674 cpu_load_efer(env,
6675 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6676 env->eflags = 0;
6677 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6678 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6679 CC_OP = CC_OP_EFLAGS;
6680
6681 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6682 env, R_ES);
6683 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6684 env, R_CS);
6685 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6686 env, R_SS);
6687 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6688 env, R_DS);
6689
6690 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6691 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6692 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6693
6694 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6695 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6696
6697 /* other setups */
6698 cpu_x86_set_cpl(env, 0);
6699 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6700 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6701
6702 env->hflags2 &= ~HF2_GIF_MASK;
6703 /* FIXME: Resets the current ASID register to zero (host ASID). */
6704
6705 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6706
6707 /* Clears the TSC_OFFSET inside the processor. */
6708
6709 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6710 from the page table indicated the host's CR3. If the PDPEs contain
6711 illegal state, the processor causes a shutdown. */
6712
6713 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6714 env->cr[0] |= CR0_PE_MASK;
6715 env->eflags &= ~VM_MASK;
6716
6717 /* Disables all breakpoints in the host DR7 register. */
6718
6719 /* Checks the reloaded host state for consistency. */
6720
6721 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6722 host's code segment or non-canonical (in the case of long mode), a
6723 #GP fault is delivered inside the host.) */
6724
6725 /* remove any pending exception */
6726 env->exception_index = -1;
6727 env->error_code = 0;
6728 env->old_exception = -1;
6729
6730 cpu_loop_exit();
6731}
6732
6733#endif
6734
6735/* MMX/SSE */
6736/* XXX: optimize by storing fptt and fptags in the static cpu state */
6737void helper_enter_mmx(void)
6738{
6739 env->fpstt = 0;
6740 *(uint32_t *)(env->fptags) = 0;
6741 *(uint32_t *)(env->fptags + 4) = 0;
6742}
6743
6744void helper_emms(void)
6745{
6746 /* set to empty state */
6747 *(uint32_t *)(env->fptags) = 0x01010101;
6748 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6749}
6750
6751/* XXX: suppress */
6752void helper_movq(uint64_t *d, uint64_t *s)
6753{
6754 *d = *s;
6755}
6756
6757#define SHIFT 0
6758#include "ops_sse.h"
6759
6760#define SHIFT 1
6761#include "ops_sse.h"
6762
6763#define SHIFT 0
6764#include "helper_template.h"
6765#undef SHIFT
6766
6767#define SHIFT 1
6768#include "helper_template.h"
6769#undef SHIFT
6770
6771#define SHIFT 2
6772#include "helper_template.h"
6773#undef SHIFT
6774
6775#ifdef TARGET_X86_64
6776
6777#define SHIFT 3
6778#include "helper_template.h"
6779#undef SHIFT
6780
6781#endif
6782
6783/* bit operations */
6784target_ulong helper_bsf(target_ulong t0)
6785{
6786 int count;
6787 target_ulong res;
6788
6789 res = t0;
6790 count = 0;
6791 while ((res & 1) == 0) {
6792 count++;
6793 res >>= 1;
6794 }
6795 return count;
6796}
6797
6798target_ulong helper_bsr(target_ulong t0)
6799{
6800 int count;
6801 target_ulong res, mask;
6802
6803 res = t0;
6804 count = TARGET_LONG_BITS - 1;
6805 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6806 while ((res & mask) == 0) {
6807 count--;
6808 res <<= 1;
6809 }
6810 return count;
6811}
6812
6813
6814static int compute_all_eflags(void)
6815{
6816 return CC_SRC;
6817}
6818
6819static int compute_c_eflags(void)
6820{
6821 return CC_SRC & CC_C;
6822}
6823
6824#ifndef VBOX
6825CCTable cc_table[CC_OP_NB] = {
6826 [CC_OP_DYNAMIC] = { /* should never happen */ },
6827
6828 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6829
6830 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6831 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6832 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6833
6834 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6835 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6836 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6837
6838 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6839 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6840 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6841
6842 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6843 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6844 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6845
6846 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6847 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6848 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6849
6850 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6851 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6852 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6853
6854 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6855 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6856 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6857
6858 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6859 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6860 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6861
6862 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6863 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6864 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6865
6866 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6867 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6868 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6869
6870#ifdef TARGET_X86_64
6871 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6872
6873 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6874
6875 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6876
6877 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6878
6879 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6880
6881 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6882
6883 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6884
6885 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6886
6887 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6888
6889 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6890#endif
6891};
6892#else /* VBOX */
6893/* Sync carefully with cpu.h */
6894CCTable cc_table[CC_OP_NB] = {
6895 /* CC_OP_DYNAMIC */ { 0, 0 },
6896
6897 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6898
6899 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6900 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6901 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6902#ifdef TARGET_X86_64
6903 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6904#else
6905 /* CC_OP_MULQ */ { 0, 0 },
6906#endif
6907
6908 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6909 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6910 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6911#ifdef TARGET_X86_64
6912 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6913#else
6914 /* CC_OP_ADDQ */ { 0, 0 },
6915#endif
6916
6917 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6918 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6919 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6920#ifdef TARGET_X86_64
6921 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6922#else
6923 /* CC_OP_ADCQ */ { 0, 0 },
6924#endif
6925
6926 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6927 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6928 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6929#ifdef TARGET_X86_64
6930 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6931#else
6932 /* CC_OP_SUBQ */ { 0, 0 },
6933#endif
6934
6935 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6936 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6937 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6938#ifdef TARGET_X86_64
6939 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6940#else
6941 /* CC_OP_SBBQ */ { 0, 0 },
6942#endif
6943
6944 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6945 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6946 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6947#ifdef TARGET_X86_64
6948 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6949#else
6950 /* CC_OP_LOGICQ */ { 0, 0 },
6951#endif
6952
6953 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6954 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6955 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6956#ifdef TARGET_X86_64
6957 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6958#else
6959 /* CC_OP_INCQ */ { 0, 0 },
6960#endif
6961
6962 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6963 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6964 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6965#ifdef TARGET_X86_64
6966 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6967#else
6968 /* CC_OP_DECQ */ { 0, 0 },
6969#endif
6970
6971 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6972 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6973 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6974#ifdef TARGET_X86_64
6975 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6976#else
6977 /* CC_OP_SHLQ */ { 0, 0 },
6978#endif
6979
6980 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6981 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6982 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6983#ifdef TARGET_X86_64
6984 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6985#else
6986 /* CC_OP_SARQ */ { 0, 0 },
6987#endif
6988};
6989#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette