VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 28288

Last change on this file since 28288 was 28030, checked in by vboxsync, 15 years ago

VMM: SpeedStep and relatives MSRs

  • Property svn:eol-style set to native
File size: 195.2 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176
177 /* According to AMD manual, should be read with IOPL == 3 */
178 eflags |= (3 << IOPL_SHIFT);
179
180 /* We only use helper_read_eflags_vme() in 16-bits mode */
181 return eflags & 0xffff;
182}
183
184void helper_dump_state()
185{
186 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
187 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
188 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
189 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
190 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
191 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
192 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
193}
194#endif
195
196/* return non zero if error */
197#ifndef VBOX
198static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#else /* VBOX */
200DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
201#endif /* VBOX */
202 int selector)
203{
204 SegmentCache *dt;
205 int index;
206 target_ulong ptr;
207
208#ifdef VBOX
209 /* Trying to load a selector with CPL=1? */
210 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
211 {
212 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
213 selector = selector & 0xfffc;
214 }
215#endif
216
217 if (selector & 0x4)
218 dt = &env->ldt;
219 else
220 dt = &env->gdt;
221 index = selector & ~7;
222 if ((index + 7) > dt->limit)
223 return -1;
224 ptr = dt->base + index;
225 *e1_ptr = ldl_kernel(ptr);
226 *e2_ptr = ldl_kernel(ptr + 4);
227 return 0;
228}
229
230#ifndef VBOX
231static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
232#else /* VBOX */
233DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
234#endif /* VBOX */
235{
236 unsigned int limit;
237 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
238 if (e2 & DESC_G_MASK)
239 limit = (limit << 12) | 0xfff;
240 return limit;
241}
242
243#ifndef VBOX
244static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
245#else /* VBOX */
246DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
247#endif /* VBOX */
248{
249 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
250}
251
252#ifndef VBOX
253static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#else /* VBOX */
255DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
256#endif /* VBOX */
257{
258 sc->base = get_seg_base(e1, e2);
259 sc->limit = get_seg_limit(e1, e2);
260 sc->flags = e2;
261}
262
263/* init the segment cache in vm86 mode. */
264#ifndef VBOX
265static inline void load_seg_vm(int seg, int selector)
266#else /* VBOX */
267DECLINLINE(void) load_seg_vm(int seg, int selector)
268#endif /* VBOX */
269{
270 selector &= 0xffff;
271#ifdef VBOX
272 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
273 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
274 flags |= (3 << DESC_DPL_SHIFT);
275
276 cpu_x86_load_seg_cache(env, seg, selector,
277 (selector << 4), 0xffff, flags);
278#else
279 cpu_x86_load_seg_cache(env, seg, selector,
280 (selector << 4), 0xffff, 0);
281#endif
282}
283
284#ifndef VBOX
285static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
286#else /* VBOX */
287DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
288#endif /* VBOX */
289 uint32_t *esp_ptr, int dpl)
290{
291#ifndef VBOX
292 int type, index, shift;
293#else
294 unsigned int type, index, shift;
295#endif
296
297#if 0
298 {
299 int i;
300 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
301 for(i=0;i<env->tr.limit;i++) {
302 printf("%02x ", env->tr.base[i]);
303 if ((i & 7) == 7) printf("\n");
304 }
305 printf("\n");
306 }
307#endif
308
309 if (!(env->tr.flags & DESC_P_MASK))
310 cpu_abort(env, "invalid tss");
311 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
312 if ((type & 7) != 1)
313 cpu_abort(env, "invalid tss type");
314 shift = type >> 3;
315 index = (dpl * 4 + 2) << shift;
316 if (index + (4 << shift) - 1 > env->tr.limit)
317 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
318 if (shift == 0) {
319 *esp_ptr = lduw_kernel(env->tr.base + index);
320 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
321 } else {
322 *esp_ptr = ldl_kernel(env->tr.base + index);
323 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
324 }
325}
326
327/* XXX: merge with load_seg() */
328static void tss_load_seg(int seg_reg, int selector)
329{
330 uint32_t e1, e2;
331 int rpl, dpl, cpl;
332
333#ifdef VBOX
334 e1 = e2 = 0;
335 cpl = env->hflags & HF_CPL_MASK;
336 /* Trying to load a selector with CPL=1? */
337 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
338 {
339 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
340 selector = selector & 0xfffc;
341 }
342#endif
343
344 if ((selector & 0xfffc) != 0) {
345 if (load_segment(&e1, &e2, selector) != 0)
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 if (!(e2 & DESC_S_MASK))
348 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
349 rpl = selector & 3;
350 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
351 cpl = env->hflags & HF_CPL_MASK;
352 if (seg_reg == R_CS) {
353 if (!(e2 & DESC_CS_MASK))
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 /* XXX: is it correct ? */
356 if (dpl != rpl)
357 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
358 if ((e2 & DESC_C_MASK) && dpl > rpl)
359 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
360 } else if (seg_reg == R_SS) {
361 /* SS must be writable data */
362 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
363 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
364 if (dpl != cpl || dpl != rpl)
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
366 } else {
367 /* not readable code */
368 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
369 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
370 /* if data or non conforming code, checks the rights */
371 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
372 if (dpl < cpl || dpl < rpl)
373 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
374 }
375 }
376 if (!(e2 & DESC_P_MASK))
377 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
378 cpu_x86_load_seg_cache(env, seg_reg, selector,
379 get_seg_base(e1, e2),
380 get_seg_limit(e1, e2),
381 e2);
382 } else {
383 if (seg_reg == R_SS || seg_reg == R_CS)
384 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
385#ifdef VBOX
386#if 0
387 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
388 cpu_x86_load_seg_cache(env, seg_reg, selector,
389 0, 0, 0);
390#endif
391#endif
392 }
393}
394
395#define SWITCH_TSS_JMP 0
396#define SWITCH_TSS_IRET 1
397#define SWITCH_TSS_CALL 2
398
399/* XXX: restore CPU state in registers (PowerPC case) */
400static void switch_tss(int tss_selector,
401 uint32_t e1, uint32_t e2, int source,
402 uint32_t next_eip)
403{
404 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
405 target_ulong tss_base;
406 uint32_t new_regs[8], new_segs[6];
407 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
408 uint32_t old_eflags, eflags_mask;
409 SegmentCache *dt;
410#ifndef VBOX
411 int index;
412#else
413 unsigned int index;
414#endif
415 target_ulong ptr;
416
417 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
418#ifdef DEBUG_PCALL
419 if (loglevel & CPU_LOG_PCALL)
420 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
421#endif
422
423#if defined(VBOX) && defined(DEBUG)
424 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
425#endif
426
427 /* if task gate, we read the TSS segment and we load it */
428 if (type == 5) {
429 if (!(e2 & DESC_P_MASK))
430 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
431 tss_selector = e1 >> 16;
432 if (tss_selector & 4)
433 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
434 if (load_segment(&e1, &e2, tss_selector) != 0)
435 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
436 if (e2 & DESC_S_MASK)
437 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
438 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
439 if ((type & 7) != 1)
440 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
441 }
442
443 if (!(e2 & DESC_P_MASK))
444 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
445
446 if (type & 8)
447 tss_limit_max = 103;
448 else
449 tss_limit_max = 43;
450 tss_limit = get_seg_limit(e1, e2);
451 tss_base = get_seg_base(e1, e2);
452 if ((tss_selector & 4) != 0 ||
453 tss_limit < tss_limit_max)
454 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
455 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
456 if (old_type & 8)
457 old_tss_limit_max = 103;
458 else
459 old_tss_limit_max = 43;
460
461 /* read all the registers from the new TSS */
462 if (type & 8) {
463 /* 32 bit */
464 new_cr3 = ldl_kernel(tss_base + 0x1c);
465 new_eip = ldl_kernel(tss_base + 0x20);
466 new_eflags = ldl_kernel(tss_base + 0x24);
467 for(i = 0; i < 8; i++)
468 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
469 for(i = 0; i < 6; i++)
470 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
471 new_ldt = lduw_kernel(tss_base + 0x60);
472 new_trap = ldl_kernel(tss_base + 0x64);
473 } else {
474 /* 16 bit */
475 new_cr3 = 0;
476 new_eip = lduw_kernel(tss_base + 0x0e);
477 new_eflags = lduw_kernel(tss_base + 0x10);
478 for(i = 0; i < 8; i++)
479 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
480 for(i = 0; i < 4; i++)
481 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
482 new_ldt = lduw_kernel(tss_base + 0x2a);
483 new_segs[R_FS] = 0;
484 new_segs[R_GS] = 0;
485 new_trap = 0;
486 }
487
488 /* NOTE: we must avoid memory exceptions during the task switch,
489 so we make dummy accesses before */
490 /* XXX: it can still fail in some cases, so a bigger hack is
491 necessary to valid the TLB after having done the accesses */
492
493 v1 = ldub_kernel(env->tr.base);
494 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
495 stb_kernel(env->tr.base, v1);
496 stb_kernel(env->tr.base + old_tss_limit_max, v2);
497
498 /* clear busy bit (it is restartable) */
499 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
500 target_ulong ptr;
501 uint32_t e2;
502 ptr = env->gdt.base + (env->tr.selector & ~7);
503 e2 = ldl_kernel(ptr + 4);
504 e2 &= ~DESC_TSS_BUSY_MASK;
505 stl_kernel(ptr + 4, e2);
506 }
507 old_eflags = compute_eflags();
508 if (source == SWITCH_TSS_IRET)
509 old_eflags &= ~NT_MASK;
510
511 /* save the current state in the old TSS */
512 if (type & 8) {
513 /* 32 bit */
514 stl_kernel(env->tr.base + 0x20, next_eip);
515 stl_kernel(env->tr.base + 0x24, old_eflags);
516 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
517 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
518 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
519 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
520 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
521 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
522 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
523 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
524 for(i = 0; i < 6; i++)
525 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
526#ifdef VBOX
527 /* Must store the ldt as it gets reloaded and might have been changed. */
528 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
529#endif
530#if defined(VBOX) && defined(DEBUG)
531 printf("TSS 32 bits switch\n");
532 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
533#endif
534 } else {
535 /* 16 bit */
536 stw_kernel(env->tr.base + 0x0e, next_eip);
537 stw_kernel(env->tr.base + 0x10, old_eflags);
538 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
539 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
540 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
541 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
542 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
543 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
544 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
545 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
546 for(i = 0; i < 4; i++)
547 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
548#ifdef VBOX
549 /* Must store the ldt as it gets reloaded and might have been changed. */
550 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
551#endif
552 }
553
554 /* now if an exception occurs, it will occurs in the next task
555 context */
556
557 if (source == SWITCH_TSS_CALL) {
558 stw_kernel(tss_base, env->tr.selector);
559 new_eflags |= NT_MASK;
560 }
561
562 /* set busy bit */
563 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
564 target_ulong ptr;
565 uint32_t e2;
566 ptr = env->gdt.base + (tss_selector & ~7);
567 e2 = ldl_kernel(ptr + 4);
568 e2 |= DESC_TSS_BUSY_MASK;
569 stl_kernel(ptr + 4, e2);
570 }
571
572 /* set the new CPU state */
573 /* from this point, any exception which occurs can give problems */
574 env->cr[0] |= CR0_TS_MASK;
575 env->hflags |= HF_TS_MASK;
576 env->tr.selector = tss_selector;
577 env->tr.base = tss_base;
578 env->tr.limit = tss_limit;
579 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
580
581 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
582 cpu_x86_update_cr3(env, new_cr3);
583 }
584
585 /* load all registers without an exception, then reload them with
586 possible exception */
587 env->eip = new_eip;
588 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
589 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
590 if (!(type & 8))
591 eflags_mask &= 0xffff;
592 load_eflags(new_eflags, eflags_mask);
593 /* XXX: what to do in 16 bit case ? */
594 EAX = new_regs[0];
595 ECX = new_regs[1];
596 EDX = new_regs[2];
597 EBX = new_regs[3];
598 ESP = new_regs[4];
599 EBP = new_regs[5];
600 ESI = new_regs[6];
601 EDI = new_regs[7];
602 if (new_eflags & VM_MASK) {
603 for(i = 0; i < 6; i++)
604 load_seg_vm(i, new_segs[i]);
605 /* in vm86, CPL is always 3 */
606 cpu_x86_set_cpl(env, 3);
607 } else {
608 /* CPL is set the RPL of CS */
609 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
610 /* first just selectors as the rest may trigger exceptions */
611 for(i = 0; i < 6; i++)
612 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
613 }
614
615 env->ldt.selector = new_ldt & ~4;
616 env->ldt.base = 0;
617 env->ldt.limit = 0;
618 env->ldt.flags = 0;
619
620 /* load the LDT */
621 if (new_ldt & 4)
622 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
623
624 if ((new_ldt & 0xfffc) != 0) {
625 dt = &env->gdt;
626 index = new_ldt & ~7;
627 if ((index + 7) > dt->limit)
628 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
629 ptr = dt->base + index;
630 e1 = ldl_kernel(ptr);
631 e2 = ldl_kernel(ptr + 4);
632 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
633 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
634 if (!(e2 & DESC_P_MASK))
635 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
636 load_seg_cache_raw_dt(&env->ldt, e1, e2);
637 }
638
639 /* load the segments */
640 if (!(new_eflags & VM_MASK)) {
641 tss_load_seg(R_CS, new_segs[R_CS]);
642 tss_load_seg(R_SS, new_segs[R_SS]);
643 tss_load_seg(R_ES, new_segs[R_ES]);
644 tss_load_seg(R_DS, new_segs[R_DS]);
645 tss_load_seg(R_FS, new_segs[R_FS]);
646 tss_load_seg(R_GS, new_segs[R_GS]);
647 }
648
649 /* check that EIP is in the CS segment limits */
650 if (new_eip > env->segs[R_CS].limit) {
651 /* XXX: different exception if CALL ? */
652 raise_exception_err(EXCP0D_GPF, 0);
653 }
654}
655
656/* check if Port I/O is allowed in TSS */
657#ifndef VBOX
658static inline void check_io(int addr, int size)
659{
660 int io_offset, val, mask;
661
662#else /* VBOX */
663DECLINLINE(void) check_io(int addr, int size)
664{
665 int val, mask;
666 unsigned int io_offset;
667#endif /* VBOX */
668 /* TSS must be a valid 32 bit one */
669 if (!(env->tr.flags & DESC_P_MASK) ||
670 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
671 env->tr.limit < 103)
672 goto fail;
673 io_offset = lduw_kernel(env->tr.base + 0x66);
674 io_offset += (addr >> 3);
675 /* Note: the check needs two bytes */
676 if ((io_offset + 1) > env->tr.limit)
677 goto fail;
678 val = lduw_kernel(env->tr.base + io_offset);
679 val >>= (addr & 7);
680 mask = (1 << size) - 1;
681 /* all bits must be zero to allow the I/O */
682 if ((val & mask) != 0) {
683 fail:
684 raise_exception_err(EXCP0D_GPF, 0);
685 }
686}
687
688#ifdef VBOX
689/* Keep in sync with gen_check_external_event() */
690void helper_check_external_event()
691{
692 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
693 | CPU_INTERRUPT_EXTERNAL_TIMER
694 | CPU_INTERRUPT_EXTERNAL_DMA))
695 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
696 && (env->eflags & IF_MASK)
697 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
698 {
699 helper_external_event();
700 }
701
702}
703
704void helper_sync_seg(uint32_t reg)
705{
706 if (env->segs[reg].newselector)
707 sync_seg(env, reg, env->segs[reg].newselector);
708}
709#endif
710
711void helper_check_iob(uint32_t t0)
712{
713 check_io(t0, 1);
714}
715
716void helper_check_iow(uint32_t t0)
717{
718 check_io(t0, 2);
719}
720
721void helper_check_iol(uint32_t t0)
722{
723 check_io(t0, 4);
724}
725
726void helper_outb(uint32_t port, uint32_t data)
727{
728 cpu_outb(env, port, data & 0xff);
729}
730
731target_ulong helper_inb(uint32_t port)
732{
733 return cpu_inb(env, port);
734}
735
736void helper_outw(uint32_t port, uint32_t data)
737{
738 cpu_outw(env, port, data & 0xffff);
739}
740
741target_ulong helper_inw(uint32_t port)
742{
743 return cpu_inw(env, port);
744}
745
746void helper_outl(uint32_t port, uint32_t data)
747{
748 cpu_outl(env, port, data);
749}
750
751target_ulong helper_inl(uint32_t port)
752{
753 return cpu_inl(env, port);
754}
755
756#ifndef VBOX
757static inline unsigned int get_sp_mask(unsigned int e2)
758#else /* VBOX */
759DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
760#endif /* VBOX */
761{
762 if (e2 & DESC_B_MASK)
763 return 0xffffffff;
764 else
765 return 0xffff;
766}
767
768#ifdef TARGET_X86_64
769#define SET_ESP(val, sp_mask)\
770do {\
771 if ((sp_mask) == 0xffff)\
772 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
773 else if ((sp_mask) == 0xffffffffLL)\
774 ESP = (uint32_t)(val);\
775 else\
776 ESP = (val);\
777} while (0)
778#else
779#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
780#endif
781
782/* in 64-bit machines, this can overflow. So this segment addition macro
783 * can be used to trim the value to 32-bit whenever needed */
784#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
785
786/* XXX: add a is_user flag to have proper security support */
787#define PUSHW(ssp, sp, sp_mask, val)\
788{\
789 sp -= 2;\
790 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
791}
792
793#define PUSHL(ssp, sp, sp_mask, val)\
794{\
795 sp -= 4;\
796 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
797}
798
799#define POPW(ssp, sp, sp_mask, val)\
800{\
801 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
802 sp += 2;\
803}
804
805#define POPL(ssp, sp, sp_mask, val)\
806{\
807 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
808 sp += 4;\
809}
810
811/* protected mode interrupt */
812static void do_interrupt_protected(int intno, int is_int, int error_code,
813 unsigned int next_eip, int is_hw)
814{
815 SegmentCache *dt;
816 target_ulong ptr, ssp;
817 int type, dpl, selector, ss_dpl, cpl;
818 int has_error_code, new_stack, shift;
819 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
820 uint32_t old_eip, sp_mask;
821
822#ifdef VBOX
823 ss = ss_e1 = ss_e2 = 0;
824# ifdef VBOX_WITH_VMI
825 if ( intno == 6
826 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
827 {
828 env->exception_index = EXCP_PARAV_CALL;
829 cpu_loop_exit();
830 }
831# endif
832 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
833 cpu_loop_exit();
834#endif
835
836 has_error_code = 0;
837 if (!is_int && !is_hw) {
838 switch(intno) {
839 case 8:
840 case 10:
841 case 11:
842 case 12:
843 case 13:
844 case 14:
845 case 17:
846 has_error_code = 1;
847 break;
848 }
849 }
850 if (is_int)
851 old_eip = next_eip;
852 else
853 old_eip = env->eip;
854
855 dt = &env->idt;
856#ifndef VBOX
857 if (intno * 8 + 7 > dt->limit)
858#else
859 if ((unsigned)intno * 8 + 7 > dt->limit)
860#endif
861 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
862 ptr = dt->base + intno * 8;
863 e1 = ldl_kernel(ptr);
864 e2 = ldl_kernel(ptr + 4);
865 /* check gate type */
866 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
867 switch(type) {
868 case 5: /* task gate */
869 /* must do that check here to return the correct error code */
870 if (!(e2 & DESC_P_MASK))
871 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
872 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
873 if (has_error_code) {
874 int type;
875 uint32_t mask;
876 /* push the error code */
877 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
878 shift = type >> 3;
879 if (env->segs[R_SS].flags & DESC_B_MASK)
880 mask = 0xffffffff;
881 else
882 mask = 0xffff;
883 esp = (ESP - (2 << shift)) & mask;
884 ssp = env->segs[R_SS].base + esp;
885 if (shift)
886 stl_kernel(ssp, error_code);
887 else
888 stw_kernel(ssp, error_code);
889 SET_ESP(esp, mask);
890 }
891 return;
892 case 6: /* 286 interrupt gate */
893 case 7: /* 286 trap gate */
894 case 14: /* 386 interrupt gate */
895 case 15: /* 386 trap gate */
896 break;
897 default:
898 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
899 break;
900 }
901 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
902 cpl = env->hflags & HF_CPL_MASK;
903 /* check privilege if software int */
904 if (is_int && dpl < cpl)
905 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
906 /* check valid bit */
907 if (!(e2 & DESC_P_MASK))
908 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
909 selector = e1 >> 16;
910 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
911 if ((selector & 0xfffc) == 0)
912 raise_exception_err(EXCP0D_GPF, 0);
913
914 if (load_segment(&e1, &e2, selector) != 0)
915 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
916 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
917 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
918 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
919 if (dpl > cpl)
920 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921 if (!(e2 & DESC_P_MASK))
922 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
923 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
924 /* to inner privilege */
925 get_ss_esp_from_tss(&ss, &esp, dpl);
926 if ((ss & 0xfffc) == 0)
927 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928 if ((ss & 3) != dpl)
929 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
930 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
931 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
932 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
933 if (ss_dpl != dpl)
934 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
935 if (!(ss_e2 & DESC_S_MASK) ||
936 (ss_e2 & DESC_CS_MASK) ||
937 !(ss_e2 & DESC_W_MASK))
938 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
939 if (!(ss_e2 & DESC_P_MASK))
940#ifdef VBOX /* See page 3-477 of 253666.pdf */
941 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
942#else
943 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
944#endif
945 new_stack = 1;
946 sp_mask = get_sp_mask(ss_e2);
947 ssp = get_seg_base(ss_e1, ss_e2);
948#if defined(VBOX) && defined(DEBUG)
949 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
950#endif
951 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
952 /* to same privilege */
953 if (env->eflags & VM_MASK)
954 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955 new_stack = 0;
956 sp_mask = get_sp_mask(env->segs[R_SS].flags);
957 ssp = env->segs[R_SS].base;
958 esp = ESP;
959 dpl = cpl;
960 } else {
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962 new_stack = 0; /* avoid warning */
963 sp_mask = 0; /* avoid warning */
964 ssp = 0; /* avoid warning */
965 esp = 0; /* avoid warning */
966 }
967
968 shift = type >> 3;
969
970#if 0
971 /* XXX: check that enough room is available */
972 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
973 if (env->eflags & VM_MASK)
974 push_size += 8;
975 push_size <<= shift;
976#endif
977 if (shift == 1) {
978 if (new_stack) {
979 if (env->eflags & VM_MASK) {
980 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
981 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
982 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
983 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
984 }
985 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
986 PUSHL(ssp, esp, sp_mask, ESP);
987 }
988 PUSHL(ssp, esp, sp_mask, compute_eflags());
989 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
990 PUSHL(ssp, esp, sp_mask, old_eip);
991 if (has_error_code) {
992 PUSHL(ssp, esp, sp_mask, error_code);
993 }
994 } else {
995 if (new_stack) {
996 if (env->eflags & VM_MASK) {
997 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
998 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
999 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1000 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1001 }
1002 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1003 PUSHW(ssp, esp, sp_mask, ESP);
1004 }
1005 PUSHW(ssp, esp, sp_mask, compute_eflags());
1006 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1007 PUSHW(ssp, esp, sp_mask, old_eip);
1008 if (has_error_code) {
1009 PUSHW(ssp, esp, sp_mask, error_code);
1010 }
1011 }
1012
1013 if (new_stack) {
1014 if (env->eflags & VM_MASK) {
1015 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1016 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1017 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1018 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1019 }
1020 ss = (ss & ~3) | dpl;
1021 cpu_x86_load_seg_cache(env, R_SS, ss,
1022 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1023 }
1024 SET_ESP(esp, sp_mask);
1025
1026 selector = (selector & ~3) | dpl;
1027 cpu_x86_load_seg_cache(env, R_CS, selector,
1028 get_seg_base(e1, e2),
1029 get_seg_limit(e1, e2),
1030 e2);
1031 cpu_x86_set_cpl(env, dpl);
1032 env->eip = offset;
1033
1034 /* interrupt gate clear IF mask */
1035 if ((type & 1) == 0) {
1036 env->eflags &= ~IF_MASK;
1037 }
1038#ifndef VBOX
1039 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1040#else
1041 /*
1042 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1043 * gets confused by seeingingly changed EFLAGS. See #3491 and
1044 * public bug #2341.
1045 */
1046 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1047#endif
1048}
1049#ifdef VBOX
1050
1051/* check if VME interrupt redirection is enabled in TSS */
1052DECLINLINE(bool) is_vme_irq_redirected(int intno)
1053{
1054 unsigned int io_offset, intredir_offset;
1055 unsigned char val, mask;
1056
1057 /* TSS must be a valid 32 bit one */
1058 if (!(env->tr.flags & DESC_P_MASK) ||
1059 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1060 env->tr.limit < 103)
1061 goto fail;
1062 io_offset = lduw_kernel(env->tr.base + 0x66);
1063 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1064 if (io_offset < 0x68 + 0x20)
1065 io_offset = 0x68 + 0x20;
1066 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1067 intredir_offset = io_offset - 0x20;
1068
1069 intredir_offset += (intno >> 3);
1070 if ((intredir_offset) > env->tr.limit)
1071 goto fail;
1072
1073 val = ldub_kernel(env->tr.base + intredir_offset);
1074 mask = 1 << (unsigned char)(intno & 7);
1075
1076 /* bit set means no redirection. */
1077 if ((val & mask) != 0) {
1078 return false;
1079 }
1080 return true;
1081
1082fail:
1083 raise_exception_err(EXCP0D_GPF, 0);
1084 return true;
1085}
1086
1087/* V86 mode software interrupt with CR4.VME=1 */
1088static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1089{
1090 target_ulong ptr, ssp;
1091 int selector;
1092 uint32_t offset, esp;
1093 uint32_t old_cs, old_eflags;
1094 uint32_t iopl;
1095
1096 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1097
1098 if (!is_vme_irq_redirected(intno))
1099 {
1100 if (iopl == 3)
1101 {
1102 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1103 return;
1104 }
1105 else
1106 raise_exception_err(EXCP0D_GPF, 0);
1107 }
1108
1109 /* virtual mode idt is at linear address 0 */
1110 ptr = 0 + intno * 4;
1111 offset = lduw_kernel(ptr);
1112 selector = lduw_kernel(ptr + 2);
1113 esp = ESP;
1114 ssp = env->segs[R_SS].base;
1115 old_cs = env->segs[R_CS].selector;
1116
1117 old_eflags = compute_eflags();
1118 if (iopl < 3)
1119 {
1120 /* copy VIF into IF and set IOPL to 3 */
1121 if (env->eflags & VIF_MASK)
1122 old_eflags |= IF_MASK;
1123 else
1124 old_eflags &= ~IF_MASK;
1125
1126 old_eflags |= (3 << IOPL_SHIFT);
1127 }
1128
1129 /* XXX: use SS segment size ? */
1130 PUSHW(ssp, esp, 0xffff, old_eflags);
1131 PUSHW(ssp, esp, 0xffff, old_cs);
1132 PUSHW(ssp, esp, 0xffff, next_eip);
1133
1134 /* update processor state */
1135 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1136 env->eip = offset;
1137 env->segs[R_CS].selector = selector;
1138 env->segs[R_CS].base = (selector << 4);
1139 env->eflags &= ~(TF_MASK | RF_MASK);
1140
1141 if (iopl < 3)
1142 env->eflags &= ~VIF_MASK;
1143 else
1144 env->eflags &= ~IF_MASK;
1145}
1146#endif /* VBOX */
1147
1148#ifdef TARGET_X86_64
1149
1150#define PUSHQ(sp, val)\
1151{\
1152 sp -= 8;\
1153 stq_kernel(sp, (val));\
1154}
1155
1156#define POPQ(sp, val)\
1157{\
1158 val = ldq_kernel(sp);\
1159 sp += 8;\
1160}
1161
1162#ifndef VBOX
1163static inline target_ulong get_rsp_from_tss(int level)
1164#else /* VBOX */
1165DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1166#endif /* VBOX */
1167{
1168 int index;
1169
1170#if 0
1171 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1172 env->tr.base, env->tr.limit);
1173#endif
1174
1175 if (!(env->tr.flags & DESC_P_MASK))
1176 cpu_abort(env, "invalid tss");
1177 index = 8 * level + 4;
1178 if ((index + 7) > env->tr.limit)
1179 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1180 return ldq_kernel(env->tr.base + index);
1181}
1182
1183/* 64 bit interrupt */
1184static void do_interrupt64(int intno, int is_int, int error_code,
1185 target_ulong next_eip, int is_hw)
1186{
1187 SegmentCache *dt;
1188 target_ulong ptr;
1189 int type, dpl, selector, cpl, ist;
1190 int has_error_code, new_stack;
1191 uint32_t e1, e2, e3, ss;
1192 target_ulong old_eip, esp, offset;
1193
1194#ifdef VBOX
1195 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1196 cpu_loop_exit();
1197#endif
1198
1199 has_error_code = 0;
1200 if (!is_int && !is_hw) {
1201 switch(intno) {
1202 case 8:
1203 case 10:
1204 case 11:
1205 case 12:
1206 case 13:
1207 case 14:
1208 case 17:
1209 has_error_code = 1;
1210 break;
1211 }
1212 }
1213 if (is_int)
1214 old_eip = next_eip;
1215 else
1216 old_eip = env->eip;
1217
1218 dt = &env->idt;
1219 if (intno * 16 + 15 > dt->limit)
1220 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1221 ptr = dt->base + intno * 16;
1222 e1 = ldl_kernel(ptr);
1223 e2 = ldl_kernel(ptr + 4);
1224 e3 = ldl_kernel(ptr + 8);
1225 /* check gate type */
1226 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1227 switch(type) {
1228 case 14: /* 386 interrupt gate */
1229 case 15: /* 386 trap gate */
1230 break;
1231 default:
1232 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1233 break;
1234 }
1235 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1236 cpl = env->hflags & HF_CPL_MASK;
1237 /* check privilege if software int */
1238 if (is_int && dpl < cpl)
1239 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1240 /* check valid bit */
1241 if (!(e2 & DESC_P_MASK))
1242 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1243 selector = e1 >> 16;
1244 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1245 ist = e2 & 7;
1246 if ((selector & 0xfffc) == 0)
1247 raise_exception_err(EXCP0D_GPF, 0);
1248
1249 if (load_segment(&e1, &e2, selector) != 0)
1250 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1251 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1252 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1253 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1254 if (dpl > cpl)
1255 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1256 if (!(e2 & DESC_P_MASK))
1257 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1258 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1259 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1260 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1261 /* to inner privilege */
1262 if (ist != 0)
1263 esp = get_rsp_from_tss(ist + 3);
1264 else
1265 esp = get_rsp_from_tss(dpl);
1266 esp &= ~0xfLL; /* align stack */
1267 ss = 0;
1268 new_stack = 1;
1269 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1270 /* to same privilege */
1271 if (env->eflags & VM_MASK)
1272 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1273 new_stack = 0;
1274 if (ist != 0)
1275 esp = get_rsp_from_tss(ist + 3);
1276 else
1277 esp = ESP;
1278 esp &= ~0xfLL; /* align stack */
1279 dpl = cpl;
1280 } else {
1281 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1282 new_stack = 0; /* avoid warning */
1283 esp = 0; /* avoid warning */
1284 }
1285
1286 PUSHQ(esp, env->segs[R_SS].selector);
1287 PUSHQ(esp, ESP);
1288 PUSHQ(esp, compute_eflags());
1289 PUSHQ(esp, env->segs[R_CS].selector);
1290 PUSHQ(esp, old_eip);
1291 if (has_error_code) {
1292 PUSHQ(esp, error_code);
1293 }
1294
1295 if (new_stack) {
1296 ss = 0 | dpl;
1297 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1298 }
1299 ESP = esp;
1300
1301 selector = (selector & ~3) | dpl;
1302 cpu_x86_load_seg_cache(env, R_CS, selector,
1303 get_seg_base(e1, e2),
1304 get_seg_limit(e1, e2),
1305 e2);
1306 cpu_x86_set_cpl(env, dpl);
1307 env->eip = offset;
1308
1309 /* interrupt gate clear IF mask */
1310 if ((type & 1) == 0) {
1311 env->eflags &= ~IF_MASK;
1312 }
1313
1314#ifndef VBOX
1315 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1316#else
1317 /*
1318 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1319 * gets confused by seeingingly changed EFLAGS. See #3491 and
1320 * public bug #2341.
1321 */
1322 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1323#endif
1324}
1325#endif
1326
1327#if defined(CONFIG_USER_ONLY)
1328void helper_syscall(int next_eip_addend)
1329{
1330 env->exception_index = EXCP_SYSCALL;
1331 env->exception_next_eip = env->eip + next_eip_addend;
1332 cpu_loop_exit();
1333}
1334#else
1335void helper_syscall(int next_eip_addend)
1336{
1337 int selector;
1338
1339 if (!(env->efer & MSR_EFER_SCE)) {
1340 raise_exception_err(EXCP06_ILLOP, 0);
1341 }
1342 selector = (env->star >> 32) & 0xffff;
1343#ifdef TARGET_X86_64
1344 if (env->hflags & HF_LMA_MASK) {
1345 int code64;
1346
1347 ECX = env->eip + next_eip_addend;
1348 env->regs[11] = compute_eflags();
1349
1350 code64 = env->hflags & HF_CS64_MASK;
1351
1352 cpu_x86_set_cpl(env, 0);
1353 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1354 0, 0xffffffff,
1355 DESC_G_MASK | DESC_P_MASK |
1356 DESC_S_MASK |
1357 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1358 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1359 0, 0xffffffff,
1360 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1361 DESC_S_MASK |
1362 DESC_W_MASK | DESC_A_MASK);
1363 env->eflags &= ~env->fmask;
1364 load_eflags(env->eflags, 0);
1365 if (code64)
1366 env->eip = env->lstar;
1367 else
1368 env->eip = env->cstar;
1369 } else
1370#endif
1371 {
1372 ECX = (uint32_t)(env->eip + next_eip_addend);
1373
1374 cpu_x86_set_cpl(env, 0);
1375 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1376 0, 0xffffffff,
1377 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1378 DESC_S_MASK |
1379 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1380 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1381 0, 0xffffffff,
1382 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1383 DESC_S_MASK |
1384 DESC_W_MASK | DESC_A_MASK);
1385 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1386 env->eip = (uint32_t)env->star;
1387 }
1388}
1389#endif
1390
1391void helper_sysret(int dflag)
1392{
1393 int cpl, selector;
1394
1395 if (!(env->efer & MSR_EFER_SCE)) {
1396 raise_exception_err(EXCP06_ILLOP, 0);
1397 }
1398 cpl = env->hflags & HF_CPL_MASK;
1399 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1400 raise_exception_err(EXCP0D_GPF, 0);
1401 }
1402 selector = (env->star >> 48) & 0xffff;
1403#ifdef TARGET_X86_64
1404 if (env->hflags & HF_LMA_MASK) {
1405 if (dflag == 2) {
1406 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1407 0, 0xffffffff,
1408 DESC_G_MASK | DESC_P_MASK |
1409 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1410 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1411 DESC_L_MASK);
1412 env->eip = ECX;
1413 } else {
1414 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1415 0, 0xffffffff,
1416 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1417 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1418 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1419 env->eip = (uint32_t)ECX;
1420 }
1421 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1422 0, 0xffffffff,
1423 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1424 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1425 DESC_W_MASK | DESC_A_MASK);
1426 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1427 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1428 cpu_x86_set_cpl(env, 3);
1429 } else
1430#endif
1431 {
1432 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1433 0, 0xffffffff,
1434 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1435 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1436 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1437 env->eip = (uint32_t)ECX;
1438 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1439 0, 0xffffffff,
1440 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1441 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1442 DESC_W_MASK | DESC_A_MASK);
1443 env->eflags |= IF_MASK;
1444 cpu_x86_set_cpl(env, 3);
1445 }
1446#ifdef USE_KQEMU
1447 if (kqemu_is_ok(env)) {
1448 if (env->hflags & HF_LMA_MASK)
1449 CC_OP = CC_OP_EFLAGS;
1450 env->exception_index = -1;
1451 cpu_loop_exit();
1452 }
1453#endif
1454}
1455
1456#ifdef VBOX
1457/**
1458 * Checks and processes external VMM events.
1459 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1460 */
1461void helper_external_event(void)
1462{
1463#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1464 uintptr_t uSP;
1465# ifdef RT_ARCH_AMD64
1466 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1467# else
1468 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1469# endif
1470 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1471#endif
1472 /* Keep in sync with flags checked by gen_check_external_event() */
1473 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1474 {
1475 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1476 ~CPU_INTERRUPT_EXTERNAL_HARD);
1477 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1478 }
1479 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1480 {
1481 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1482 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1483 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1484 }
1485 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1486 {
1487 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1488 ~CPU_INTERRUPT_EXTERNAL_DMA);
1489 remR3DmaRun(env);
1490 }
1491 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1492 {
1493 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1494 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1495 remR3TimersRun(env);
1496 }
1497}
1498/* helper for recording call instruction addresses for later scanning */
1499void helper_record_call()
1500{
1501 if ( !(env->state & CPU_RAW_RING0)
1502 && (env->cr[0] & CR0_PG_MASK)
1503 && !(env->eflags & X86_EFL_IF))
1504 remR3RecordCall(env);
1505}
1506#endif /* VBOX */
1507
1508/* real mode interrupt */
1509static void do_interrupt_real(int intno, int is_int, int error_code,
1510 unsigned int next_eip)
1511{
1512 SegmentCache *dt;
1513 target_ulong ptr, ssp;
1514 int selector;
1515 uint32_t offset, esp;
1516 uint32_t old_cs, old_eip;
1517
1518 /* real mode (simpler !) */
1519 dt = &env->idt;
1520#ifndef VBOX
1521 if (intno * 4 + 3 > dt->limit)
1522#else
1523 if ((unsigned)intno * 4 + 3 > dt->limit)
1524#endif
1525 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1526 ptr = dt->base + intno * 4;
1527 offset = lduw_kernel(ptr);
1528 selector = lduw_kernel(ptr + 2);
1529 esp = ESP;
1530 ssp = env->segs[R_SS].base;
1531 if (is_int)
1532 old_eip = next_eip;
1533 else
1534 old_eip = env->eip;
1535 old_cs = env->segs[R_CS].selector;
1536 /* XXX: use SS segment size ? */
1537 PUSHW(ssp, esp, 0xffff, compute_eflags());
1538 PUSHW(ssp, esp, 0xffff, old_cs);
1539 PUSHW(ssp, esp, 0xffff, old_eip);
1540
1541 /* update processor state */
1542 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1543 env->eip = offset;
1544 env->segs[R_CS].selector = selector;
1545 env->segs[R_CS].base = (selector << 4);
1546 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1547}
1548
1549/* fake user mode interrupt */
1550void do_interrupt_user(int intno, int is_int, int error_code,
1551 target_ulong next_eip)
1552{
1553 SegmentCache *dt;
1554 target_ulong ptr;
1555 int dpl, cpl, shift;
1556 uint32_t e2;
1557
1558 dt = &env->idt;
1559 if (env->hflags & HF_LMA_MASK) {
1560 shift = 4;
1561 } else {
1562 shift = 3;
1563 }
1564 ptr = dt->base + (intno << shift);
1565 e2 = ldl_kernel(ptr + 4);
1566
1567 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1568 cpl = env->hflags & HF_CPL_MASK;
1569 /* check privilege if software int */
1570 if (is_int && dpl < cpl)
1571 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1572
1573 /* Since we emulate only user space, we cannot do more than
1574 exiting the emulation with the suitable exception and error
1575 code */
1576 if (is_int)
1577 EIP = next_eip;
1578}
1579
1580/*
1581 * Begin execution of an interruption. is_int is TRUE if coming from
1582 * the int instruction. next_eip is the EIP value AFTER the interrupt
1583 * instruction. It is only relevant if is_int is TRUE.
1584 */
1585void do_interrupt(int intno, int is_int, int error_code,
1586 target_ulong next_eip, int is_hw)
1587{
1588 if (loglevel & CPU_LOG_INT) {
1589 if ((env->cr[0] & CR0_PE_MASK)) {
1590 static int count;
1591 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1592 count, intno, error_code, is_int,
1593 env->hflags & HF_CPL_MASK,
1594 env->segs[R_CS].selector, EIP,
1595 (int)env->segs[R_CS].base + EIP,
1596 env->segs[R_SS].selector, ESP);
1597 if (intno == 0x0e) {
1598 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1599 } else {
1600 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1601 }
1602 fprintf(logfile, "\n");
1603 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1604#if 0
1605 {
1606 int i;
1607 uint8_t *ptr;
1608 fprintf(logfile, " code=");
1609 ptr = env->segs[R_CS].base + env->eip;
1610 for(i = 0; i < 16; i++) {
1611 fprintf(logfile, " %02x", ldub(ptr + i));
1612 }
1613 fprintf(logfile, "\n");
1614 }
1615#endif
1616 count++;
1617 }
1618 }
1619 if (env->cr[0] & CR0_PE_MASK) {
1620#ifdef TARGET_X86_64
1621 if (env->hflags & HF_LMA_MASK) {
1622 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1623 } else
1624#endif
1625 {
1626#ifdef VBOX
1627 /* int xx *, v86 code and VME enabled? */
1628 if ( (env->eflags & VM_MASK)
1629 && (env->cr[4] & CR4_VME_MASK)
1630 && is_int
1631 && !is_hw
1632 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1633 )
1634 do_soft_interrupt_vme(intno, error_code, next_eip);
1635 else
1636#endif /* VBOX */
1637 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1638 }
1639 } else {
1640 do_interrupt_real(intno, is_int, error_code, next_eip);
1641 }
1642}
1643
1644/*
1645 * Check nested exceptions and change to double or triple fault if
1646 * needed. It should only be called, if this is not an interrupt.
1647 * Returns the new exception number.
1648 */
1649static int check_exception(int intno, int *error_code)
1650{
1651 int first_contributory = env->old_exception == 0 ||
1652 (env->old_exception >= 10 &&
1653 env->old_exception <= 13);
1654 int second_contributory = intno == 0 ||
1655 (intno >= 10 && intno <= 13);
1656
1657 if (loglevel & CPU_LOG_INT)
1658 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1659 env->old_exception, intno);
1660
1661 if (env->old_exception == EXCP08_DBLE)
1662 cpu_abort(env, "triple fault");
1663
1664 if ((first_contributory && second_contributory)
1665 || (env->old_exception == EXCP0E_PAGE &&
1666 (second_contributory || (intno == EXCP0E_PAGE)))) {
1667 intno = EXCP08_DBLE;
1668 *error_code = 0;
1669 }
1670
1671 if (second_contributory || (intno == EXCP0E_PAGE) ||
1672 (intno == EXCP08_DBLE))
1673 env->old_exception = intno;
1674
1675 return intno;
1676}
1677
1678/*
1679 * Signal an interruption. It is executed in the main CPU loop.
1680 * is_int is TRUE if coming from the int instruction. next_eip is the
1681 * EIP value AFTER the interrupt instruction. It is only relevant if
1682 * is_int is TRUE.
1683 */
1684void raise_interrupt(int intno, int is_int, int error_code,
1685 int next_eip_addend)
1686{
1687#if defined(VBOX) && defined(DEBUG)
1688 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend));
1689#endif
1690 if (!is_int) {
1691 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1692 intno = check_exception(intno, &error_code);
1693 } else {
1694 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1695 }
1696
1697 env->exception_index = intno;
1698 env->error_code = error_code;
1699 env->exception_is_int = is_int;
1700 env->exception_next_eip = env->eip + next_eip_addend;
1701 cpu_loop_exit();
1702}
1703
1704/* shortcuts to generate exceptions */
1705
1706void (raise_exception_err)(int exception_index, int error_code)
1707{
1708 raise_interrupt(exception_index, 0, error_code, 0);
1709}
1710
1711void raise_exception(int exception_index)
1712{
1713 raise_interrupt(exception_index, 0, 0, 0);
1714}
1715
1716/* SMM support */
1717
1718#if defined(CONFIG_USER_ONLY)
1719
1720void do_smm_enter(void)
1721{
1722}
1723
1724void helper_rsm(void)
1725{
1726}
1727
1728#else
1729
1730#ifdef TARGET_X86_64
1731#define SMM_REVISION_ID 0x00020064
1732#else
1733#define SMM_REVISION_ID 0x00020000
1734#endif
1735
1736void do_smm_enter(void)
1737{
1738 target_ulong sm_state;
1739 SegmentCache *dt;
1740 int i, offset;
1741
1742 if (loglevel & CPU_LOG_INT) {
1743 fprintf(logfile, "SMM: enter\n");
1744 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1745 }
1746
1747 env->hflags |= HF_SMM_MASK;
1748 cpu_smm_update(env);
1749
1750 sm_state = env->smbase + 0x8000;
1751
1752#ifdef TARGET_X86_64
1753 for(i = 0; i < 6; i++) {
1754 dt = &env->segs[i];
1755 offset = 0x7e00 + i * 16;
1756 stw_phys(sm_state + offset, dt->selector);
1757 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1758 stl_phys(sm_state + offset + 4, dt->limit);
1759 stq_phys(sm_state + offset + 8, dt->base);
1760 }
1761
1762 stq_phys(sm_state + 0x7e68, env->gdt.base);
1763 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1764
1765 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1766 stq_phys(sm_state + 0x7e78, env->ldt.base);
1767 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1768 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1769
1770 stq_phys(sm_state + 0x7e88, env->idt.base);
1771 stl_phys(sm_state + 0x7e84, env->idt.limit);
1772
1773 stw_phys(sm_state + 0x7e90, env->tr.selector);
1774 stq_phys(sm_state + 0x7e98, env->tr.base);
1775 stl_phys(sm_state + 0x7e94, env->tr.limit);
1776 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1777
1778 stq_phys(sm_state + 0x7ed0, env->efer);
1779
1780 stq_phys(sm_state + 0x7ff8, EAX);
1781 stq_phys(sm_state + 0x7ff0, ECX);
1782 stq_phys(sm_state + 0x7fe8, EDX);
1783 stq_phys(sm_state + 0x7fe0, EBX);
1784 stq_phys(sm_state + 0x7fd8, ESP);
1785 stq_phys(sm_state + 0x7fd0, EBP);
1786 stq_phys(sm_state + 0x7fc8, ESI);
1787 stq_phys(sm_state + 0x7fc0, EDI);
1788 for(i = 8; i < 16; i++)
1789 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1790 stq_phys(sm_state + 0x7f78, env->eip);
1791 stl_phys(sm_state + 0x7f70, compute_eflags());
1792 stl_phys(sm_state + 0x7f68, env->dr[6]);
1793 stl_phys(sm_state + 0x7f60, env->dr[7]);
1794
1795 stl_phys(sm_state + 0x7f48, env->cr[4]);
1796 stl_phys(sm_state + 0x7f50, env->cr[3]);
1797 stl_phys(sm_state + 0x7f58, env->cr[0]);
1798
1799 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1800 stl_phys(sm_state + 0x7f00, env->smbase);
1801#else
1802 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1803 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1804 stl_phys(sm_state + 0x7ff4, compute_eflags());
1805 stl_phys(sm_state + 0x7ff0, env->eip);
1806 stl_phys(sm_state + 0x7fec, EDI);
1807 stl_phys(sm_state + 0x7fe8, ESI);
1808 stl_phys(sm_state + 0x7fe4, EBP);
1809 stl_phys(sm_state + 0x7fe0, ESP);
1810 stl_phys(sm_state + 0x7fdc, EBX);
1811 stl_phys(sm_state + 0x7fd8, EDX);
1812 stl_phys(sm_state + 0x7fd4, ECX);
1813 stl_phys(sm_state + 0x7fd0, EAX);
1814 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1815 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1816
1817 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1818 stl_phys(sm_state + 0x7f64, env->tr.base);
1819 stl_phys(sm_state + 0x7f60, env->tr.limit);
1820 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1821
1822 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1823 stl_phys(sm_state + 0x7f80, env->ldt.base);
1824 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1825 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1826
1827 stl_phys(sm_state + 0x7f74, env->gdt.base);
1828 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1829
1830 stl_phys(sm_state + 0x7f58, env->idt.base);
1831 stl_phys(sm_state + 0x7f54, env->idt.limit);
1832
1833 for(i = 0; i < 6; i++) {
1834 dt = &env->segs[i];
1835 if (i < 3)
1836 offset = 0x7f84 + i * 12;
1837 else
1838 offset = 0x7f2c + (i - 3) * 12;
1839 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1840 stl_phys(sm_state + offset + 8, dt->base);
1841 stl_phys(sm_state + offset + 4, dt->limit);
1842 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1843 }
1844 stl_phys(sm_state + 0x7f14, env->cr[4]);
1845
1846 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1847 stl_phys(sm_state + 0x7ef8, env->smbase);
1848#endif
1849 /* init SMM cpu state */
1850
1851#ifdef TARGET_X86_64
1852 cpu_load_efer(env, 0);
1853#endif
1854 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1855 env->eip = 0x00008000;
1856 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1857 0xffffffff, 0);
1858 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1859 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1860 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1861 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1862 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1863
1864 cpu_x86_update_cr0(env,
1865 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1866 cpu_x86_update_cr4(env, 0);
1867 env->dr[7] = 0x00000400;
1868 CC_OP = CC_OP_EFLAGS;
1869}
1870
1871void helper_rsm(void)
1872{
1873#ifdef VBOX
1874 cpu_abort(env, "helper_rsm");
1875#else /* !VBOX */
1876 target_ulong sm_
1877
1878 target_ulong sm_state;
1879 int i, offset;
1880 uint32_t val;
1881
1882 sm_state = env->smbase + 0x8000;
1883#ifdef TARGET_X86_64
1884 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1885
1886 for(i = 0; i < 6; i++) {
1887 offset = 0x7e00 + i * 16;
1888 cpu_x86_load_seg_cache(env, i,
1889 lduw_phys(sm_state + offset),
1890 ldq_phys(sm_state + offset + 8),
1891 ldl_phys(sm_state + offset + 4),
1892 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1893 }
1894
1895 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1896 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1897
1898 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1899 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1900 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1901 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1902
1903 env->idt.base = ldq_phys(sm_state + 0x7e88);
1904 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1905
1906 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1907 env->tr.base = ldq_phys(sm_state + 0x7e98);
1908 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1909 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1910
1911 EAX = ldq_phys(sm_state + 0x7ff8);
1912 ECX = ldq_phys(sm_state + 0x7ff0);
1913 EDX = ldq_phys(sm_state + 0x7fe8);
1914 EBX = ldq_phys(sm_state + 0x7fe0);
1915 ESP = ldq_phys(sm_state + 0x7fd8);
1916 EBP = ldq_phys(sm_state + 0x7fd0);
1917 ESI = ldq_phys(sm_state + 0x7fc8);
1918 EDI = ldq_phys(sm_state + 0x7fc0);
1919 for(i = 8; i < 16; i++)
1920 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1921 env->eip = ldq_phys(sm_state + 0x7f78);
1922 load_eflags(ldl_phys(sm_state + 0x7f70),
1923 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1924 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1925 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1926
1927 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1928 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1929 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1930
1931 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1932 if (val & 0x20000) {
1933 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1934 }
1935#else
1936 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1937 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1938 load_eflags(ldl_phys(sm_state + 0x7ff4),
1939 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1940 env->eip = ldl_phys(sm_state + 0x7ff0);
1941 EDI = ldl_phys(sm_state + 0x7fec);
1942 ESI = ldl_phys(sm_state + 0x7fe8);
1943 EBP = ldl_phys(sm_state + 0x7fe4);
1944 ESP = ldl_phys(sm_state + 0x7fe0);
1945 EBX = ldl_phys(sm_state + 0x7fdc);
1946 EDX = ldl_phys(sm_state + 0x7fd8);
1947 ECX = ldl_phys(sm_state + 0x7fd4);
1948 EAX = ldl_phys(sm_state + 0x7fd0);
1949 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1950 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1951
1952 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1953 env->tr.base = ldl_phys(sm_state + 0x7f64);
1954 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1955 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1956
1957 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1958 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1959 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1960 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1961
1962 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1963 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1964
1965 env->idt.base = ldl_phys(sm_state + 0x7f58);
1966 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1967
1968 for(i = 0; i < 6; i++) {
1969 if (i < 3)
1970 offset = 0x7f84 + i * 12;
1971 else
1972 offset = 0x7f2c + (i - 3) * 12;
1973 cpu_x86_load_seg_cache(env, i,
1974 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1975 ldl_phys(sm_state + offset + 8),
1976 ldl_phys(sm_state + offset + 4),
1977 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1978 }
1979 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1980
1981 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1982 if (val & 0x20000) {
1983 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1984 }
1985#endif
1986 CC_OP = CC_OP_EFLAGS;
1987 env->hflags &= ~HF_SMM_MASK;
1988 cpu_smm_update(env);
1989
1990 if (loglevel & CPU_LOG_INT) {
1991 fprintf(logfile, "SMM: after RSM\n");
1992 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1993 }
1994#endif /* !VBOX */
1995}
1996
1997#endif /* !CONFIG_USER_ONLY */
1998
1999
2000/* division, flags are undefined */
2001
2002void helper_divb_AL(target_ulong t0)
2003{
2004 unsigned int num, den, q, r;
2005
2006 num = (EAX & 0xffff);
2007 den = (t0 & 0xff);
2008 if (den == 0) {
2009 raise_exception(EXCP00_DIVZ);
2010 }
2011 q = (num / den);
2012 if (q > 0xff)
2013 raise_exception(EXCP00_DIVZ);
2014 q &= 0xff;
2015 r = (num % den) & 0xff;
2016 EAX = (EAX & ~0xffff) | (r << 8) | q;
2017}
2018
2019void helper_idivb_AL(target_ulong t0)
2020{
2021 int num, den, q, r;
2022
2023 num = (int16_t)EAX;
2024 den = (int8_t)t0;
2025 if (den == 0) {
2026 raise_exception(EXCP00_DIVZ);
2027 }
2028 q = (num / den);
2029 if (q != (int8_t)q)
2030 raise_exception(EXCP00_DIVZ);
2031 q &= 0xff;
2032 r = (num % den) & 0xff;
2033 EAX = (EAX & ~0xffff) | (r << 8) | q;
2034}
2035
2036void helper_divw_AX(target_ulong t0)
2037{
2038 unsigned int num, den, q, r;
2039
2040 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2041 den = (t0 & 0xffff);
2042 if (den == 0) {
2043 raise_exception(EXCP00_DIVZ);
2044 }
2045 q = (num / den);
2046 if (q > 0xffff)
2047 raise_exception(EXCP00_DIVZ);
2048 q &= 0xffff;
2049 r = (num % den) & 0xffff;
2050 EAX = (EAX & ~0xffff) | q;
2051 EDX = (EDX & ~0xffff) | r;
2052}
2053
2054void helper_idivw_AX(target_ulong t0)
2055{
2056 int num, den, q, r;
2057
2058 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2059 den = (int16_t)t0;
2060 if (den == 0) {
2061 raise_exception(EXCP00_DIVZ);
2062 }
2063 q = (num / den);
2064 if (q != (int16_t)q)
2065 raise_exception(EXCP00_DIVZ);
2066 q &= 0xffff;
2067 r = (num % den) & 0xffff;
2068 EAX = (EAX & ~0xffff) | q;
2069 EDX = (EDX & ~0xffff) | r;
2070}
2071
2072void helper_divl_EAX(target_ulong t0)
2073{
2074 unsigned int den, r;
2075 uint64_t num, q;
2076
2077 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2078 den = t0;
2079 if (den == 0) {
2080 raise_exception(EXCP00_DIVZ);
2081 }
2082 q = (num / den);
2083 r = (num % den);
2084 if (q > 0xffffffff)
2085 raise_exception(EXCP00_DIVZ);
2086 EAX = (uint32_t)q;
2087 EDX = (uint32_t)r;
2088}
2089
2090void helper_idivl_EAX(target_ulong t0)
2091{
2092 int den, r;
2093 int64_t num, q;
2094
2095 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2096 den = t0;
2097 if (den == 0) {
2098 raise_exception(EXCP00_DIVZ);
2099 }
2100 q = (num / den);
2101 r = (num % den);
2102 if (q != (int32_t)q)
2103 raise_exception(EXCP00_DIVZ);
2104 EAX = (uint32_t)q;
2105 EDX = (uint32_t)r;
2106}
2107
2108/* bcd */
2109
2110/* XXX: exception */
2111void helper_aam(int base)
2112{
2113 int al, ah;
2114 al = EAX & 0xff;
2115 ah = al / base;
2116 al = al % base;
2117 EAX = (EAX & ~0xffff) | al | (ah << 8);
2118 CC_DST = al;
2119}
2120
2121void helper_aad(int base)
2122{
2123 int al, ah;
2124 al = EAX & 0xff;
2125 ah = (EAX >> 8) & 0xff;
2126 al = ((ah * base) + al) & 0xff;
2127 EAX = (EAX & ~0xffff) | al;
2128 CC_DST = al;
2129}
2130
2131void helper_aaa(void)
2132{
2133 int icarry;
2134 int al, ah, af;
2135 int eflags;
2136
2137 eflags = cc_table[CC_OP].compute_all();
2138 af = eflags & CC_A;
2139 al = EAX & 0xff;
2140 ah = (EAX >> 8) & 0xff;
2141
2142 icarry = (al > 0xf9);
2143 if (((al & 0x0f) > 9 ) || af) {
2144 al = (al + 6) & 0x0f;
2145 ah = (ah + 1 + icarry) & 0xff;
2146 eflags |= CC_C | CC_A;
2147 } else {
2148 eflags &= ~(CC_C | CC_A);
2149 al &= 0x0f;
2150 }
2151 EAX = (EAX & ~0xffff) | al | (ah << 8);
2152 CC_SRC = eflags;
2153 FORCE_RET();
2154}
2155
2156void helper_aas(void)
2157{
2158 int icarry;
2159 int al, ah, af;
2160 int eflags;
2161
2162 eflags = cc_table[CC_OP].compute_all();
2163 af = eflags & CC_A;
2164 al = EAX & 0xff;
2165 ah = (EAX >> 8) & 0xff;
2166
2167 icarry = (al < 6);
2168 if (((al & 0x0f) > 9 ) || af) {
2169 al = (al - 6) & 0x0f;
2170 ah = (ah - 1 - icarry) & 0xff;
2171 eflags |= CC_C | CC_A;
2172 } else {
2173 eflags &= ~(CC_C | CC_A);
2174 al &= 0x0f;
2175 }
2176 EAX = (EAX & ~0xffff) | al | (ah << 8);
2177 CC_SRC = eflags;
2178 FORCE_RET();
2179}
2180
2181void helper_daa(void)
2182{
2183 int al, af, cf;
2184 int eflags;
2185
2186 eflags = cc_table[CC_OP].compute_all();
2187 cf = eflags & CC_C;
2188 af = eflags & CC_A;
2189 al = EAX & 0xff;
2190
2191 eflags = 0;
2192 if (((al & 0x0f) > 9 ) || af) {
2193 al = (al + 6) & 0xff;
2194 eflags |= CC_A;
2195 }
2196 if ((al > 0x9f) || cf) {
2197 al = (al + 0x60) & 0xff;
2198 eflags |= CC_C;
2199 }
2200 EAX = (EAX & ~0xff) | al;
2201 /* well, speed is not an issue here, so we compute the flags by hand */
2202 eflags |= (al == 0) << 6; /* zf */
2203 eflags |= parity_table[al]; /* pf */
2204 eflags |= (al & 0x80); /* sf */
2205 CC_SRC = eflags;
2206 FORCE_RET();
2207}
2208
2209void helper_das(void)
2210{
2211 int al, al1, af, cf;
2212 int eflags;
2213
2214 eflags = cc_table[CC_OP].compute_all();
2215 cf = eflags & CC_C;
2216 af = eflags & CC_A;
2217 al = EAX & 0xff;
2218
2219 eflags = 0;
2220 al1 = al;
2221 if (((al & 0x0f) > 9 ) || af) {
2222 eflags |= CC_A;
2223 if (al < 6 || cf)
2224 eflags |= CC_C;
2225 al = (al - 6) & 0xff;
2226 }
2227 if ((al1 > 0x99) || cf) {
2228 al = (al - 0x60) & 0xff;
2229 eflags |= CC_C;
2230 }
2231 EAX = (EAX & ~0xff) | al;
2232 /* well, speed is not an issue here, so we compute the flags by hand */
2233 eflags |= (al == 0) << 6; /* zf */
2234 eflags |= parity_table[al]; /* pf */
2235 eflags |= (al & 0x80); /* sf */
2236 CC_SRC = eflags;
2237 FORCE_RET();
2238}
2239
2240void helper_into(int next_eip_addend)
2241{
2242 int eflags;
2243 eflags = cc_table[CC_OP].compute_all();
2244 if (eflags & CC_O) {
2245 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2246 }
2247}
2248
2249void helper_cmpxchg8b(target_ulong a0)
2250{
2251 uint64_t d;
2252 int eflags;
2253
2254 eflags = cc_table[CC_OP].compute_all();
2255 d = ldq(a0);
2256 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2257 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2258 eflags |= CC_Z;
2259 } else {
2260 /* always do the store */
2261 stq(a0, d);
2262 EDX = (uint32_t)(d >> 32);
2263 EAX = (uint32_t)d;
2264 eflags &= ~CC_Z;
2265 }
2266 CC_SRC = eflags;
2267}
2268
2269#ifdef TARGET_X86_64
2270void helper_cmpxchg16b(target_ulong a0)
2271{
2272 uint64_t d0, d1;
2273 int eflags;
2274
2275 if ((a0 & 0xf) != 0)
2276 raise_exception(EXCP0D_GPF);
2277 eflags = cc_table[CC_OP].compute_all();
2278 d0 = ldq(a0);
2279 d1 = ldq(a0 + 8);
2280 if (d0 == EAX && d1 == EDX) {
2281 stq(a0, EBX);
2282 stq(a0 + 8, ECX);
2283 eflags |= CC_Z;
2284 } else {
2285 /* always do the store */
2286 stq(a0, d0);
2287 stq(a0 + 8, d1);
2288 EDX = d1;
2289 EAX = d0;
2290 eflags &= ~CC_Z;
2291 }
2292 CC_SRC = eflags;
2293}
2294#endif
2295
2296void helper_single_step(void)
2297{
2298 env->dr[6] |= 0x4000;
2299 raise_exception(EXCP01_SSTP);
2300}
2301
2302void helper_cpuid(void)
2303{
2304#ifndef VBOX
2305 uint32_t index;
2306
2307 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2308
2309 index = (uint32_t)EAX;
2310 /* test if maximum index reached */
2311 if (index & 0x80000000) {
2312 if (index > env->cpuid_xlevel)
2313 index = env->cpuid_level;
2314 } else {
2315 if (index > env->cpuid_level)
2316 index = env->cpuid_level;
2317 }
2318
2319 switch(index) {
2320 case 0:
2321 EAX = env->cpuid_level;
2322 EBX = env->cpuid_vendor1;
2323 EDX = env->cpuid_vendor2;
2324 ECX = env->cpuid_vendor3;
2325 break;
2326 case 1:
2327 EAX = env->cpuid_version;
2328 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2329 ECX = env->cpuid_ext_features;
2330 EDX = env->cpuid_features;
2331 break;
2332 case 2:
2333 /* cache info: needed for Pentium Pro compatibility */
2334 EAX = 1;
2335 EBX = 0;
2336 ECX = 0;
2337 EDX = 0x2c307d;
2338 break;
2339 case 4:
2340 /* cache info: needed for Core compatibility */
2341 switch (ECX) {
2342 case 0: /* L1 dcache info */
2343 EAX = 0x0000121;
2344 EBX = 0x1c0003f;
2345 ECX = 0x000003f;
2346 EDX = 0x0000001;
2347 break;
2348 case 1: /* L1 icache info */
2349 EAX = 0x0000122;
2350 EBX = 0x1c0003f;
2351 ECX = 0x000003f;
2352 EDX = 0x0000001;
2353 break;
2354 case 2: /* L2 cache info */
2355 EAX = 0x0000143;
2356 EBX = 0x3c0003f;
2357 ECX = 0x0000fff;
2358 EDX = 0x0000001;
2359 break;
2360 default: /* end of info */
2361 EAX = 0;
2362 EBX = 0;
2363 ECX = 0;
2364 EDX = 0;
2365 break;
2366 }
2367
2368 break;
2369 case 5:
2370 /* mwait info: needed for Core compatibility */
2371 EAX = 0; /* Smallest monitor-line size in bytes */
2372 EBX = 0; /* Largest monitor-line size in bytes */
2373 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2374 EDX = 0;
2375 break;
2376 case 6:
2377 /* Thermal and Power Leaf */
2378 EAX = 0;
2379 EBX = 0;
2380 ECX = 0;
2381 EDX = 0;
2382 break;
2383 case 9:
2384 /* Direct Cache Access Information Leaf */
2385 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2386 EBX = 0;
2387 ECX = 0;
2388 EDX = 0;
2389 break;
2390 case 0xA:
2391 /* Architectural Performance Monitoring Leaf */
2392 EAX = 0;
2393 EBX = 0;
2394 ECX = 0;
2395 EDX = 0;
2396 break;
2397 case 0x80000000:
2398 EAX = env->cpuid_xlevel;
2399 EBX = env->cpuid_vendor1;
2400 EDX = env->cpuid_vendor2;
2401 ECX = env->cpuid_vendor3;
2402 break;
2403 case 0x80000001:
2404 EAX = env->cpuid_features;
2405 EBX = 0;
2406 ECX = env->cpuid_ext3_features;
2407 EDX = env->cpuid_ext2_features;
2408 break;
2409 case 0x80000002:
2410 case 0x80000003:
2411 case 0x80000004:
2412 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2413 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2414 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2415 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2416 break;
2417 case 0x80000005:
2418 /* cache info (L1 cache) */
2419 EAX = 0x01ff01ff;
2420 EBX = 0x01ff01ff;
2421 ECX = 0x40020140;
2422 EDX = 0x40020140;
2423 break;
2424 case 0x80000006:
2425 /* cache info (L2 cache) */
2426 EAX = 0;
2427 EBX = 0x42004200;
2428 ECX = 0x02008140;
2429 EDX = 0;
2430 break;
2431 case 0x80000008:
2432 /* virtual & phys address size in low 2 bytes. */
2433/* XXX: This value must match the one used in the MMU code. */
2434 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2435 /* 64 bit processor */
2436#if defined(USE_KQEMU)
2437 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2438#else
2439/* XXX: The physical address space is limited to 42 bits in exec.c. */
2440 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2441#endif
2442 } else {
2443#if defined(USE_KQEMU)
2444 EAX = 0x00000020; /* 32 bits physical */
2445#else
2446 if (env->cpuid_features & CPUID_PSE36)
2447 EAX = 0x00000024; /* 36 bits physical */
2448 else
2449 EAX = 0x00000020; /* 32 bits physical */
2450#endif
2451 }
2452 EBX = 0;
2453 ECX = 0;
2454 EDX = 0;
2455 break;
2456 case 0x8000000A:
2457 EAX = 0x00000001;
2458 EBX = 0;
2459 ECX = 0;
2460 EDX = 0;
2461 break;
2462 default:
2463 /* reserved values: zero */
2464 EAX = 0;
2465 EBX = 0;
2466 ECX = 0;
2467 EDX = 0;
2468 break;
2469 }
2470#else /* VBOX */
2471 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2472#endif /* VBOX */
2473}
2474
2475void helper_enter_level(int level, int data32, target_ulong t1)
2476{
2477 target_ulong ssp;
2478 uint32_t esp_mask, esp, ebp;
2479
2480 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2481 ssp = env->segs[R_SS].base;
2482 ebp = EBP;
2483 esp = ESP;
2484 if (data32) {
2485 /* 32 bit */
2486 esp -= 4;
2487 while (--level) {
2488 esp -= 4;
2489 ebp -= 4;
2490 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2491 }
2492 esp -= 4;
2493 stl(ssp + (esp & esp_mask), t1);
2494 } else {
2495 /* 16 bit */
2496 esp -= 2;
2497 while (--level) {
2498 esp -= 2;
2499 ebp -= 2;
2500 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2501 }
2502 esp -= 2;
2503 stw(ssp + (esp & esp_mask), t1);
2504 }
2505}
2506
2507#ifdef TARGET_X86_64
2508void helper_enter64_level(int level, int data64, target_ulong t1)
2509{
2510 target_ulong esp, ebp;
2511 ebp = EBP;
2512 esp = ESP;
2513
2514 if (data64) {
2515 /* 64 bit */
2516 esp -= 8;
2517 while (--level) {
2518 esp -= 8;
2519 ebp -= 8;
2520 stq(esp, ldq(ebp));
2521 }
2522 esp -= 8;
2523 stq(esp, t1);
2524 } else {
2525 /* 16 bit */
2526 esp -= 2;
2527 while (--level) {
2528 esp -= 2;
2529 ebp -= 2;
2530 stw(esp, lduw(ebp));
2531 }
2532 esp -= 2;
2533 stw(esp, t1);
2534 }
2535}
2536#endif
2537
2538void helper_lldt(int selector)
2539{
2540 SegmentCache *dt;
2541 uint32_t e1, e2;
2542#ifndef VBOX
2543 int index, entry_limit;
2544#else
2545 unsigned int index, entry_limit;
2546#endif
2547 target_ulong ptr;
2548
2549#ifdef VBOX
2550 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2551 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2552#endif
2553
2554 selector &= 0xffff;
2555 if ((selector & 0xfffc) == 0) {
2556 /* XXX: NULL selector case: invalid LDT */
2557 env->ldt.base = 0;
2558 env->ldt.limit = 0;
2559 } else {
2560 if (selector & 0x4)
2561 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2562 dt = &env->gdt;
2563 index = selector & ~7;
2564#ifdef TARGET_X86_64
2565 if (env->hflags & HF_LMA_MASK)
2566 entry_limit = 15;
2567 else
2568#endif
2569 entry_limit = 7;
2570 if ((index + entry_limit) > dt->limit)
2571 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2572 ptr = dt->base + index;
2573 e1 = ldl_kernel(ptr);
2574 e2 = ldl_kernel(ptr + 4);
2575 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2576 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2577 if (!(e2 & DESC_P_MASK))
2578 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2579#ifdef TARGET_X86_64
2580 if (env->hflags & HF_LMA_MASK) {
2581 uint32_t e3;
2582 e3 = ldl_kernel(ptr + 8);
2583 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2584 env->ldt.base |= (target_ulong)e3 << 32;
2585 } else
2586#endif
2587 {
2588 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2589 }
2590 }
2591 env->ldt.selector = selector;
2592#ifdef VBOX
2593 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2594 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2595#endif
2596}
2597
2598void helper_ltr(int selector)
2599{
2600 SegmentCache *dt;
2601 uint32_t e1, e2;
2602#ifndef VBOX
2603 int index, type, entry_limit;
2604#else
2605 unsigned int index;
2606 int type, entry_limit;
2607#endif
2608 target_ulong ptr;
2609
2610#ifdef VBOX
2611 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2612 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2613 env->tr.flags, (RTSEL)(selector & 0xffff)));
2614#endif
2615 selector &= 0xffff;
2616 if ((selector & 0xfffc) == 0) {
2617 /* NULL selector case: invalid TR */
2618 env->tr.base = 0;
2619 env->tr.limit = 0;
2620 env->tr.flags = 0;
2621 } else {
2622 if (selector & 0x4)
2623 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2624 dt = &env->gdt;
2625 index = selector & ~7;
2626#ifdef TARGET_X86_64
2627 if (env->hflags & HF_LMA_MASK)
2628 entry_limit = 15;
2629 else
2630#endif
2631 entry_limit = 7;
2632 if ((index + entry_limit) > dt->limit)
2633 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2634 ptr = dt->base + index;
2635 e1 = ldl_kernel(ptr);
2636 e2 = ldl_kernel(ptr + 4);
2637 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2638 if ((e2 & DESC_S_MASK) ||
2639 (type != 1 && type != 9))
2640 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2641 if (!(e2 & DESC_P_MASK))
2642 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2643#ifdef TARGET_X86_64
2644 if (env->hflags & HF_LMA_MASK) {
2645 uint32_t e3, e4;
2646 e3 = ldl_kernel(ptr + 8);
2647 e4 = ldl_kernel(ptr + 12);
2648 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2649 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2650 load_seg_cache_raw_dt(&env->tr, e1, e2);
2651 env->tr.base |= (target_ulong)e3 << 32;
2652 } else
2653#endif
2654 {
2655 load_seg_cache_raw_dt(&env->tr, e1, e2);
2656 }
2657 e2 |= DESC_TSS_BUSY_MASK;
2658 stl_kernel(ptr + 4, e2);
2659 }
2660 env->tr.selector = selector;
2661#ifdef VBOX
2662 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2663 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2664 env->tr.flags, (RTSEL)(selector & 0xffff)));
2665#endif
2666}
2667
2668/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2669void helper_load_seg(int seg_reg, int selector)
2670{
2671 uint32_t e1, e2;
2672 int cpl, dpl, rpl;
2673 SegmentCache *dt;
2674#ifndef VBOX
2675 int index;
2676#else
2677 unsigned int index;
2678#endif
2679 target_ulong ptr;
2680
2681 selector &= 0xffff;
2682 cpl = env->hflags & HF_CPL_MASK;
2683
2684#ifdef VBOX
2685 /* Trying to load a selector with CPL=1? */
2686 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2687 {
2688 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2689 selector = selector & 0xfffc;
2690 }
2691#endif
2692 if ((selector & 0xfffc) == 0) {
2693 /* null selector case */
2694 if (seg_reg == R_SS
2695#ifdef TARGET_X86_64
2696 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2697#endif
2698 )
2699 raise_exception_err(EXCP0D_GPF, 0);
2700 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2701 } else {
2702
2703 if (selector & 0x4)
2704 dt = &env->ldt;
2705 else
2706 dt = &env->gdt;
2707 index = selector & ~7;
2708 if ((index + 7) > dt->limit)
2709 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2710 ptr = dt->base + index;
2711 e1 = ldl_kernel(ptr);
2712 e2 = ldl_kernel(ptr + 4);
2713
2714 if (!(e2 & DESC_S_MASK))
2715 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2716 rpl = selector & 3;
2717 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2718 if (seg_reg == R_SS) {
2719 /* must be writable segment */
2720 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2721 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2722 if (rpl != cpl || dpl != cpl)
2723 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2724 } else {
2725 /* must be readable segment */
2726 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2727 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2728
2729 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2730 /* if not conforming code, test rights */
2731 if (dpl < cpl || dpl < rpl)
2732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2733 }
2734 }
2735
2736 if (!(e2 & DESC_P_MASK)) {
2737 if (seg_reg == R_SS)
2738 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2739 else
2740 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2741 }
2742
2743 /* set the access bit if not already set */
2744 if (!(e2 & DESC_A_MASK)) {
2745 e2 |= DESC_A_MASK;
2746 stl_kernel(ptr + 4, e2);
2747 }
2748
2749 cpu_x86_load_seg_cache(env, seg_reg, selector,
2750 get_seg_base(e1, e2),
2751 get_seg_limit(e1, e2),
2752 e2);
2753#if 0
2754 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2755 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2756#endif
2757 }
2758}
2759
2760/* protected mode jump */
2761void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2762 int next_eip_addend)
2763{
2764 int gate_cs, type;
2765 uint32_t e1, e2, cpl, dpl, rpl, limit;
2766 target_ulong next_eip;
2767
2768#ifdef VBOX
2769 e1 = e2 = 0;
2770#endif
2771 if ((new_cs & 0xfffc) == 0)
2772 raise_exception_err(EXCP0D_GPF, 0);
2773 if (load_segment(&e1, &e2, new_cs) != 0)
2774 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2775 cpl = env->hflags & HF_CPL_MASK;
2776 if (e2 & DESC_S_MASK) {
2777 if (!(e2 & DESC_CS_MASK))
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2780 if (e2 & DESC_C_MASK) {
2781 /* conforming code segment */
2782 if (dpl > cpl)
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 } else {
2785 /* non conforming code segment */
2786 rpl = new_cs & 3;
2787 if (rpl > cpl)
2788 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2789 if (dpl != cpl)
2790 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2791 }
2792 if (!(e2 & DESC_P_MASK))
2793 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2794 limit = get_seg_limit(e1, e2);
2795 if (new_eip > limit &&
2796 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2797 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2798 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2799 get_seg_base(e1, e2), limit, e2);
2800 EIP = new_eip;
2801 } else {
2802 /* jump to call or task gate */
2803 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2804 rpl = new_cs & 3;
2805 cpl = env->hflags & HF_CPL_MASK;
2806 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2807 switch(type) {
2808 case 1: /* 286 TSS */
2809 case 9: /* 386 TSS */
2810 case 5: /* task gate */
2811 if (dpl < cpl || dpl < rpl)
2812 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2813 next_eip = env->eip + next_eip_addend;
2814 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2815 CC_OP = CC_OP_EFLAGS;
2816 break;
2817 case 4: /* 286 call gate */
2818 case 12: /* 386 call gate */
2819 if ((dpl < cpl) || (dpl < rpl))
2820 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2821 if (!(e2 & DESC_P_MASK))
2822 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2823 gate_cs = e1 >> 16;
2824 new_eip = (e1 & 0xffff);
2825 if (type == 12)
2826 new_eip |= (e2 & 0xffff0000);
2827 if (load_segment(&e1, &e2, gate_cs) != 0)
2828 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2829 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2830 /* must be code segment */
2831 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2832 (DESC_S_MASK | DESC_CS_MASK)))
2833 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2834 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2835 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2836 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2837 if (!(e2 & DESC_P_MASK))
2838#ifdef VBOX /* See page 3-514 of 253666.pdf */
2839 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2840#else
2841 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2842#endif
2843 limit = get_seg_limit(e1, e2);
2844 if (new_eip > limit)
2845 raise_exception_err(EXCP0D_GPF, 0);
2846 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2847 get_seg_base(e1, e2), limit, e2);
2848 EIP = new_eip;
2849 break;
2850 default:
2851 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2852 break;
2853 }
2854 }
2855}
2856
2857/* real mode call */
2858void helper_lcall_real(int new_cs, target_ulong new_eip1,
2859 int shift, int next_eip)
2860{
2861 int new_eip;
2862 uint32_t esp, esp_mask;
2863 target_ulong ssp;
2864
2865 new_eip = new_eip1;
2866 esp = ESP;
2867 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2868 ssp = env->segs[R_SS].base;
2869 if (shift) {
2870 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2871 PUSHL(ssp, esp, esp_mask, next_eip);
2872 } else {
2873 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2874 PUSHW(ssp, esp, esp_mask, next_eip);
2875 }
2876
2877 SET_ESP(esp, esp_mask);
2878 env->eip = new_eip;
2879 env->segs[R_CS].selector = new_cs;
2880 env->segs[R_CS].base = (new_cs << 4);
2881}
2882
2883/* protected mode call */
2884void helper_lcall_protected(int new_cs, target_ulong new_eip,
2885 int shift, int next_eip_addend)
2886{
2887 int new_stack, i;
2888 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2889 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2890 uint32_t val, limit, old_sp_mask;
2891 target_ulong ssp, old_ssp, next_eip;
2892
2893#ifdef VBOX
2894 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2895#endif
2896 next_eip = env->eip + next_eip_addend;
2897#ifdef DEBUG_PCALL
2898 if (loglevel & CPU_LOG_PCALL) {
2899 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2900 new_cs, (uint32_t)new_eip, shift);
2901 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2902 }
2903#endif
2904 if ((new_cs & 0xfffc) == 0)
2905 raise_exception_err(EXCP0D_GPF, 0);
2906 if (load_segment(&e1, &e2, new_cs) != 0)
2907 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2908 cpl = env->hflags & HF_CPL_MASK;
2909#ifdef DEBUG_PCALL
2910 if (loglevel & CPU_LOG_PCALL) {
2911 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2912 }
2913#endif
2914 if (e2 & DESC_S_MASK) {
2915 if (!(e2 & DESC_CS_MASK))
2916 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2917 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2918 if (e2 & DESC_C_MASK) {
2919 /* conforming code segment */
2920 if (dpl > cpl)
2921 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2922 } else {
2923 /* non conforming code segment */
2924 rpl = new_cs & 3;
2925 if (rpl > cpl)
2926 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2927 if (dpl != cpl)
2928 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2929 }
2930 if (!(e2 & DESC_P_MASK))
2931 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2932
2933#ifdef TARGET_X86_64
2934 /* XXX: check 16/32 bit cases in long mode */
2935 if (shift == 2) {
2936 target_ulong rsp;
2937 /* 64 bit case */
2938 rsp = ESP;
2939 PUSHQ(rsp, env->segs[R_CS].selector);
2940 PUSHQ(rsp, next_eip);
2941 /* from this point, not restartable */
2942 ESP = rsp;
2943 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2944 get_seg_base(e1, e2),
2945 get_seg_limit(e1, e2), e2);
2946 EIP = new_eip;
2947 } else
2948#endif
2949 {
2950 sp = ESP;
2951 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2952 ssp = env->segs[R_SS].base;
2953 if (shift) {
2954 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2955 PUSHL(ssp, sp, sp_mask, next_eip);
2956 } else {
2957 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2958 PUSHW(ssp, sp, sp_mask, next_eip);
2959 }
2960
2961 limit = get_seg_limit(e1, e2);
2962 if (new_eip > limit)
2963 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2964 /* from this point, not restartable */
2965 SET_ESP(sp, sp_mask);
2966 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2967 get_seg_base(e1, e2), limit, e2);
2968 EIP = new_eip;
2969 }
2970 } else {
2971 /* check gate type */
2972 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2973 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2974 rpl = new_cs & 3;
2975 switch(type) {
2976 case 1: /* available 286 TSS */
2977 case 9: /* available 386 TSS */
2978 case 5: /* task gate */
2979 if (dpl < cpl || dpl < rpl)
2980 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2981 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2982 CC_OP = CC_OP_EFLAGS;
2983 return;
2984 case 4: /* 286 call gate */
2985 case 12: /* 386 call gate */
2986 break;
2987 default:
2988 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2989 break;
2990 }
2991 shift = type >> 3;
2992
2993 if (dpl < cpl || dpl < rpl)
2994 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2995 /* check valid bit */
2996 if (!(e2 & DESC_P_MASK))
2997 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2998 selector = e1 >> 16;
2999 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
3000 param_count = e2 & 0x1f;
3001 if ((selector & 0xfffc) == 0)
3002 raise_exception_err(EXCP0D_GPF, 0);
3003
3004 if (load_segment(&e1, &e2, selector) != 0)
3005 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3006 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3007 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3008 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3009 if (dpl > cpl)
3010 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3011 if (!(e2 & DESC_P_MASK))
3012 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3013
3014 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3015 /* to inner privilege */
3016 get_ss_esp_from_tss(&ss, &sp, dpl);
3017#ifdef DEBUG_PCALL
3018 if (loglevel & CPU_LOG_PCALL)
3019 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3020 ss, sp, param_count, ESP);
3021#endif
3022 if ((ss & 0xfffc) == 0)
3023 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3024 if ((ss & 3) != dpl)
3025 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3026 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3027 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3028 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3029 if (ss_dpl != dpl)
3030 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3031 if (!(ss_e2 & DESC_S_MASK) ||
3032 (ss_e2 & DESC_CS_MASK) ||
3033 !(ss_e2 & DESC_W_MASK))
3034 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3035 if (!(ss_e2 & DESC_P_MASK))
3036#ifdef VBOX /* See page 3-99 of 253666.pdf */
3037 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3038#else
3039 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3040#endif
3041
3042 // push_size = ((param_count * 2) + 8) << shift;
3043
3044 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3045 old_ssp = env->segs[R_SS].base;
3046
3047 sp_mask = get_sp_mask(ss_e2);
3048 ssp = get_seg_base(ss_e1, ss_e2);
3049 if (shift) {
3050 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3051 PUSHL(ssp, sp, sp_mask, ESP);
3052 for(i = param_count - 1; i >= 0; i--) {
3053 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3054 PUSHL(ssp, sp, sp_mask, val);
3055 }
3056 } else {
3057 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3058 PUSHW(ssp, sp, sp_mask, ESP);
3059 for(i = param_count - 1; i >= 0; i--) {
3060 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3061 PUSHW(ssp, sp, sp_mask, val);
3062 }
3063 }
3064 new_stack = 1;
3065 } else {
3066 /* to same privilege */
3067 sp = ESP;
3068 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3069 ssp = env->segs[R_SS].base;
3070 // push_size = (4 << shift);
3071 new_stack = 0;
3072 }
3073
3074 if (shift) {
3075 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3076 PUSHL(ssp, sp, sp_mask, next_eip);
3077 } else {
3078 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3079 PUSHW(ssp, sp, sp_mask, next_eip);
3080 }
3081
3082 /* from this point, not restartable */
3083
3084 if (new_stack) {
3085 ss = (ss & ~3) | dpl;
3086 cpu_x86_load_seg_cache(env, R_SS, ss,
3087 ssp,
3088 get_seg_limit(ss_e1, ss_e2),
3089 ss_e2);
3090 }
3091
3092 selector = (selector & ~3) | dpl;
3093 cpu_x86_load_seg_cache(env, R_CS, selector,
3094 get_seg_base(e1, e2),
3095 get_seg_limit(e1, e2),
3096 e2);
3097 cpu_x86_set_cpl(env, dpl);
3098 SET_ESP(sp, sp_mask);
3099 EIP = offset;
3100 }
3101#ifdef USE_KQEMU
3102 if (kqemu_is_ok(env)) {
3103 env->exception_index = -1;
3104 cpu_loop_exit();
3105 }
3106#endif
3107}
3108
3109/* real and vm86 mode iret */
3110void helper_iret_real(int shift)
3111{
3112 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3113 target_ulong ssp;
3114 int eflags_mask;
3115#ifdef VBOX
3116 bool fVME = false;
3117
3118 remR3TrapClear(env->pVM);
3119#endif /* VBOX */
3120
3121 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3122 sp = ESP;
3123 ssp = env->segs[R_SS].base;
3124 if (shift == 1) {
3125 /* 32 bits */
3126 POPL(ssp, sp, sp_mask, new_eip);
3127 POPL(ssp, sp, sp_mask, new_cs);
3128 new_cs &= 0xffff;
3129 POPL(ssp, sp, sp_mask, new_eflags);
3130 } else {
3131 /* 16 bits */
3132 POPW(ssp, sp, sp_mask, new_eip);
3133 POPW(ssp, sp, sp_mask, new_cs);
3134 POPW(ssp, sp, sp_mask, new_eflags);
3135 }
3136#ifdef VBOX
3137 if ( (env->eflags & VM_MASK)
3138 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3139 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3140 {
3141 fVME = true;
3142 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3143 /* if TF will be set -> #GP */
3144 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3145 || (new_eflags & TF_MASK))
3146 raise_exception(EXCP0D_GPF);
3147 }
3148#endif /* VBOX */
3149 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3150 env->segs[R_CS].selector = new_cs;
3151 env->segs[R_CS].base = (new_cs << 4);
3152 env->eip = new_eip;
3153#ifdef VBOX
3154 if (fVME)
3155 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3156 else
3157#endif
3158 if (env->eflags & VM_MASK)
3159 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3160 else
3161 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3162 if (shift == 0)
3163 eflags_mask &= 0xffff;
3164 load_eflags(new_eflags, eflags_mask);
3165 env->hflags2 &= ~HF2_NMI_MASK;
3166#ifdef VBOX
3167 if (fVME)
3168 {
3169 if (new_eflags & IF_MASK)
3170 env->eflags |= VIF_MASK;
3171 else
3172 env->eflags &= ~VIF_MASK;
3173 }
3174#endif /* VBOX */
3175}
3176
3177#ifndef VBOX
3178static inline void validate_seg(int seg_reg, int cpl)
3179#else /* VBOX */
3180DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3181#endif /* VBOX */
3182{
3183 int dpl;
3184 uint32_t e2;
3185
3186 /* XXX: on x86_64, we do not want to nullify FS and GS because
3187 they may still contain a valid base. I would be interested to
3188 know how a real x86_64 CPU behaves */
3189 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3190 (env->segs[seg_reg].selector & 0xfffc) == 0)
3191 return;
3192
3193 e2 = env->segs[seg_reg].flags;
3194 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3195 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3196 /* data or non conforming code segment */
3197 if (dpl < cpl) {
3198 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3199 }
3200 }
3201}
3202
3203/* protected mode iret */
3204#ifndef VBOX
3205static inline void helper_ret_protected(int shift, int is_iret, int addend)
3206#else /* VBOX */
3207DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3208#endif /* VBOX */
3209{
3210 uint32_t new_cs, new_eflags, new_ss;
3211 uint32_t new_es, new_ds, new_fs, new_gs;
3212 uint32_t e1, e2, ss_e1, ss_e2;
3213 int cpl, dpl, rpl, eflags_mask, iopl;
3214 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3215
3216#ifdef VBOX
3217 ss_e1 = ss_e2 = e1 = e2 = 0;
3218#endif
3219
3220#ifdef TARGET_X86_64
3221 if (shift == 2)
3222 sp_mask = -1;
3223 else
3224#endif
3225 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3226 sp = ESP;
3227 ssp = env->segs[R_SS].base;
3228 new_eflags = 0; /* avoid warning */
3229#ifdef TARGET_X86_64
3230 if (shift == 2) {
3231 POPQ(sp, new_eip);
3232 POPQ(sp, new_cs);
3233 new_cs &= 0xffff;
3234 if (is_iret) {
3235 POPQ(sp, new_eflags);
3236 }
3237 } else
3238#endif
3239 if (shift == 1) {
3240 /* 32 bits */
3241 POPL(ssp, sp, sp_mask, new_eip);
3242 POPL(ssp, sp, sp_mask, new_cs);
3243 new_cs &= 0xffff;
3244 if (is_iret) {
3245 POPL(ssp, sp, sp_mask, new_eflags);
3246#if defined(VBOX) && defined(DEBUG)
3247 printf("iret: new CS %04X\n", new_cs);
3248 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3249 printf("iret: new EFLAGS %08X\n", new_eflags);
3250 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3251#endif
3252 if (new_eflags & VM_MASK)
3253 goto return_to_vm86;
3254 }
3255#ifdef VBOX
3256 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3257 {
3258#ifdef DEBUG
3259 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3260#endif
3261 new_cs = new_cs & 0xfffc;
3262 }
3263#endif
3264 } else {
3265 /* 16 bits */
3266 POPW(ssp, sp, sp_mask, new_eip);
3267 POPW(ssp, sp, sp_mask, new_cs);
3268 if (is_iret)
3269 POPW(ssp, sp, sp_mask, new_eflags);
3270 }
3271#ifdef DEBUG_PCALL
3272 if (loglevel & CPU_LOG_PCALL) {
3273 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3274 new_cs, new_eip, shift, addend);
3275 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3276 }
3277#endif
3278 if ((new_cs & 0xfffc) == 0)
3279 {
3280#if defined(VBOX) && defined(DEBUG)
3281 printf("new_cs & 0xfffc) == 0\n");
3282#endif
3283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3284 }
3285 if (load_segment(&e1, &e2, new_cs) != 0)
3286 {
3287#if defined(VBOX) && defined(DEBUG)
3288 printf("load_segment failed\n");
3289#endif
3290 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3291 }
3292 if (!(e2 & DESC_S_MASK) ||
3293 !(e2 & DESC_CS_MASK))
3294 {
3295#if defined(VBOX) && defined(DEBUG)
3296 printf("e2 mask %08x\n", e2);
3297#endif
3298 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3299 }
3300 cpl = env->hflags & HF_CPL_MASK;
3301 rpl = new_cs & 3;
3302 if (rpl < cpl)
3303 {
3304#if defined(VBOX) && defined(DEBUG)
3305 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3306#endif
3307 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3308 }
3309 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3310 if (e2 & DESC_C_MASK) {
3311 if (dpl > rpl)
3312 {
3313#if defined(VBOX) && defined(DEBUG)
3314 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3315#endif
3316 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3317 }
3318 } else {
3319 if (dpl != rpl)
3320 {
3321#if defined(VBOX) && defined(DEBUG)
3322 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3323#endif
3324 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3325 }
3326 }
3327 if (!(e2 & DESC_P_MASK))
3328 {
3329#if defined(VBOX) && defined(DEBUG)
3330 printf("DESC_P_MASK e2=%08x\n", e2);
3331#endif
3332 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3333 }
3334
3335 sp += addend;
3336 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3337 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3338 /* return to same privilege level */
3339 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3340 get_seg_base(e1, e2),
3341 get_seg_limit(e1, e2),
3342 e2);
3343 } else {
3344 /* return to different privilege level */
3345#ifdef TARGET_X86_64
3346 if (shift == 2) {
3347 POPQ(sp, new_esp);
3348 POPQ(sp, new_ss);
3349 new_ss &= 0xffff;
3350 } else
3351#endif
3352 if (shift == 1) {
3353 /* 32 bits */
3354 POPL(ssp, sp, sp_mask, new_esp);
3355 POPL(ssp, sp, sp_mask, new_ss);
3356 new_ss &= 0xffff;
3357 } else {
3358 /* 16 bits */
3359 POPW(ssp, sp, sp_mask, new_esp);
3360 POPW(ssp, sp, sp_mask, new_ss);
3361 }
3362#ifdef DEBUG_PCALL
3363 if (loglevel & CPU_LOG_PCALL) {
3364 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3365 new_ss, new_esp);
3366 }
3367#endif
3368 if ((new_ss & 0xfffc) == 0) {
3369#ifdef TARGET_X86_64
3370 /* NULL ss is allowed in long mode if cpl != 3*/
3371 /* XXX: test CS64 ? */
3372 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3373 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3374 0, 0xffffffff,
3375 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3376 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3377 DESC_W_MASK | DESC_A_MASK);
3378 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3379 } else
3380#endif
3381 {
3382 raise_exception_err(EXCP0D_GPF, 0);
3383 }
3384 } else {
3385 if ((new_ss & 3) != rpl)
3386 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3387 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3388 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3389 if (!(ss_e2 & DESC_S_MASK) ||
3390 (ss_e2 & DESC_CS_MASK) ||
3391 !(ss_e2 & DESC_W_MASK))
3392 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3393 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3394 if (dpl != rpl)
3395 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3396 if (!(ss_e2 & DESC_P_MASK))
3397 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3398 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3399 get_seg_base(ss_e1, ss_e2),
3400 get_seg_limit(ss_e1, ss_e2),
3401 ss_e2);
3402 }
3403
3404 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3405 get_seg_base(e1, e2),
3406 get_seg_limit(e1, e2),
3407 e2);
3408 cpu_x86_set_cpl(env, rpl);
3409 sp = new_esp;
3410#ifdef TARGET_X86_64
3411 if (env->hflags & HF_CS64_MASK)
3412 sp_mask = -1;
3413 else
3414#endif
3415 sp_mask = get_sp_mask(ss_e2);
3416
3417 /* validate data segments */
3418 validate_seg(R_ES, rpl);
3419 validate_seg(R_DS, rpl);
3420 validate_seg(R_FS, rpl);
3421 validate_seg(R_GS, rpl);
3422
3423 sp += addend;
3424 }
3425 SET_ESP(sp, sp_mask);
3426 env->eip = new_eip;
3427 if (is_iret) {
3428 /* NOTE: 'cpl' is the _old_ CPL */
3429 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3430 if (cpl == 0)
3431#ifdef VBOX
3432 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3433#else
3434 eflags_mask |= IOPL_MASK;
3435#endif
3436 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3437 if (cpl <= iopl)
3438 eflags_mask |= IF_MASK;
3439 if (shift == 0)
3440 eflags_mask &= 0xffff;
3441 load_eflags(new_eflags, eflags_mask);
3442 }
3443 return;
3444
3445 return_to_vm86:
3446 POPL(ssp, sp, sp_mask, new_esp);
3447 POPL(ssp, sp, sp_mask, new_ss);
3448 POPL(ssp, sp, sp_mask, new_es);
3449 POPL(ssp, sp, sp_mask, new_ds);
3450 POPL(ssp, sp, sp_mask, new_fs);
3451 POPL(ssp, sp, sp_mask, new_gs);
3452
3453 /* modify processor state */
3454 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3455 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3456 load_seg_vm(R_CS, new_cs & 0xffff);
3457 cpu_x86_set_cpl(env, 3);
3458 load_seg_vm(R_SS, new_ss & 0xffff);
3459 load_seg_vm(R_ES, new_es & 0xffff);
3460 load_seg_vm(R_DS, new_ds & 0xffff);
3461 load_seg_vm(R_FS, new_fs & 0xffff);
3462 load_seg_vm(R_GS, new_gs & 0xffff);
3463
3464 env->eip = new_eip & 0xffff;
3465 ESP = new_esp;
3466}
3467
3468void helper_iret_protected(int shift, int next_eip)
3469{
3470 int tss_selector, type;
3471 uint32_t e1, e2;
3472
3473#ifdef VBOX
3474 e1 = e2 = 0;
3475 remR3TrapClear(env->pVM);
3476#endif
3477
3478 /* specific case for TSS */
3479 if (env->eflags & NT_MASK) {
3480#ifdef TARGET_X86_64
3481 if (env->hflags & HF_LMA_MASK)
3482 raise_exception_err(EXCP0D_GPF, 0);
3483#endif
3484 tss_selector = lduw_kernel(env->tr.base + 0);
3485 if (tss_selector & 4)
3486 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3487 if (load_segment(&e1, &e2, tss_selector) != 0)
3488 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3489 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3490 /* NOTE: we check both segment and busy TSS */
3491 if (type != 3)
3492 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3493 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3494 } else {
3495 helper_ret_protected(shift, 1, 0);
3496 }
3497 env->hflags2 &= ~HF2_NMI_MASK;
3498#ifdef USE_KQEMU
3499 if (kqemu_is_ok(env)) {
3500 CC_OP = CC_OP_EFLAGS;
3501 env->exception_index = -1;
3502 cpu_loop_exit();
3503 }
3504#endif
3505}
3506
3507void helper_lret_protected(int shift, int addend)
3508{
3509 helper_ret_protected(shift, 0, addend);
3510#ifdef USE_KQEMU
3511 if (kqemu_is_ok(env)) {
3512 env->exception_index = -1;
3513 cpu_loop_exit();
3514 }
3515#endif
3516}
3517
3518void helper_sysenter(void)
3519{
3520 if (env->sysenter_cs == 0) {
3521 raise_exception_err(EXCP0D_GPF, 0);
3522 }
3523 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3524 cpu_x86_set_cpl(env, 0);
3525
3526#ifdef TARGET_X86_64
3527 if (env->hflags & HF_LMA_MASK) {
3528 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3529 0, 0xffffffff,
3530 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3531 DESC_S_MASK |
3532 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3533 } else
3534#endif
3535 {
3536 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3537 0, 0xffffffff,
3538 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3539 DESC_S_MASK |
3540 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3541 }
3542 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3543 0, 0xffffffff,
3544 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3545 DESC_S_MASK |
3546 DESC_W_MASK | DESC_A_MASK);
3547 ESP = env->sysenter_esp;
3548 EIP = env->sysenter_eip;
3549}
3550
3551void helper_sysexit(int dflag)
3552{
3553 int cpl;
3554
3555 cpl = env->hflags & HF_CPL_MASK;
3556 if (env->sysenter_cs == 0 || cpl != 0) {
3557 raise_exception_err(EXCP0D_GPF, 0);
3558 }
3559 cpu_x86_set_cpl(env, 3);
3560#ifdef TARGET_X86_64
3561 if (dflag == 2) {
3562 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3563 0, 0xffffffff,
3564 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3565 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3566 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3567 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3568 0, 0xffffffff,
3569 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3570 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3571 DESC_W_MASK | DESC_A_MASK);
3572 } else
3573#endif
3574 {
3575 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3576 0, 0xffffffff,
3577 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3578 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3579 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3580 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3581 0, 0xffffffff,
3582 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3583 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3584 DESC_W_MASK | DESC_A_MASK);
3585 }
3586 ESP = ECX;
3587 EIP = EDX;
3588#ifdef USE_KQEMU
3589 if (kqemu_is_ok(env)) {
3590 env->exception_index = -1;
3591 cpu_loop_exit();
3592 }
3593#endif
3594}
3595
3596#if defined(CONFIG_USER_ONLY)
3597target_ulong helper_read_crN(int reg)
3598{
3599 return 0;
3600}
3601
3602void helper_write_crN(int reg, target_ulong t0)
3603{
3604}
3605#else
3606target_ulong helper_read_crN(int reg)
3607{
3608 target_ulong val;
3609
3610 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3611 switch(reg) {
3612 default:
3613 val = env->cr[reg];
3614 break;
3615 case 8:
3616 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3617 val = cpu_get_apic_tpr(env);
3618 } else {
3619 val = env->v_tpr;
3620 }
3621 break;
3622 }
3623 return val;
3624}
3625
3626void helper_write_crN(int reg, target_ulong t0)
3627{
3628 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3629 switch(reg) {
3630 case 0:
3631 cpu_x86_update_cr0(env, t0);
3632 break;
3633 case 3:
3634 cpu_x86_update_cr3(env, t0);
3635 break;
3636 case 4:
3637 cpu_x86_update_cr4(env, t0);
3638 break;
3639 case 8:
3640 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3641 cpu_set_apic_tpr(env, t0);
3642 }
3643 env->v_tpr = t0 & 0x0f;
3644 break;
3645 default:
3646 env->cr[reg] = t0;
3647 break;
3648 }
3649}
3650#endif
3651
3652void helper_lmsw(target_ulong t0)
3653{
3654 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3655 if already set to one. */
3656 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3657 helper_write_crN(0, t0);
3658}
3659
3660void helper_clts(void)
3661{
3662 env->cr[0] &= ~CR0_TS_MASK;
3663 env->hflags &= ~HF_TS_MASK;
3664}
3665
3666/* XXX: do more */
3667void helper_movl_drN_T0(int reg, target_ulong t0)
3668{
3669 env->dr[reg] = t0;
3670}
3671
3672void helper_invlpg(target_ulong addr)
3673{
3674 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3675 tlb_flush_page(env, addr);
3676}
3677
3678void helper_rdtsc(void)
3679{
3680 uint64_t val;
3681
3682 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3683 raise_exception(EXCP0D_GPF);
3684 }
3685 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3686
3687 val = cpu_get_tsc(env) + env->tsc_offset;
3688 EAX = (uint32_t)(val);
3689 EDX = (uint32_t)(val >> 32);
3690}
3691
3692#ifdef VBOX
3693void helper_rdtscp(void)
3694{
3695 uint64_t val;
3696 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3697 raise_exception(EXCP0D_GPF);
3698 }
3699
3700 val = cpu_get_tsc(env);
3701 EAX = (uint32_t)(val);
3702 EDX = (uint32_t)(val >> 32);
3703 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3704}
3705#endif
3706
3707void helper_rdpmc(void)
3708{
3709#ifdef VBOX
3710 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3711 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3712 raise_exception(EXCP0D_GPF);
3713 }
3714 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3715 EAX = 0;
3716 EDX = 0;
3717#else
3718 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3719 raise_exception(EXCP0D_GPF);
3720 }
3721 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3722
3723 /* currently unimplemented */
3724 raise_exception_err(EXCP06_ILLOP, 0);
3725#endif
3726}
3727
3728#if defined(CONFIG_USER_ONLY)
3729void helper_wrmsr(void)
3730{
3731}
3732
3733void helper_rdmsr(void)
3734{
3735}
3736#else
3737void helper_wrmsr(void)
3738{
3739 uint64_t val;
3740
3741 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3742
3743 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3744
3745 switch((uint32_t)ECX) {
3746 case MSR_IA32_SYSENTER_CS:
3747 env->sysenter_cs = val & 0xffff;
3748 break;
3749 case MSR_IA32_SYSENTER_ESP:
3750 env->sysenter_esp = val;
3751 break;
3752 case MSR_IA32_SYSENTER_EIP:
3753 env->sysenter_eip = val;
3754 break;
3755 case MSR_IA32_APICBASE:
3756 cpu_set_apic_base(env, val);
3757 break;
3758 case MSR_EFER:
3759 {
3760 uint64_t update_mask;
3761 update_mask = 0;
3762 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3763 update_mask |= MSR_EFER_SCE;
3764 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3765 update_mask |= MSR_EFER_LME;
3766 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3767 update_mask |= MSR_EFER_FFXSR;
3768 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3769 update_mask |= MSR_EFER_NXE;
3770 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3771 update_mask |= MSR_EFER_SVME;
3772 cpu_load_efer(env, (env->efer & ~update_mask) |
3773 (val & update_mask));
3774 }
3775 break;
3776 case MSR_STAR:
3777 env->star = val;
3778 break;
3779 case MSR_PAT:
3780 env->pat = val;
3781 break;
3782 case MSR_VM_HSAVE_PA:
3783 env->vm_hsave = val;
3784 break;
3785#ifdef TARGET_X86_64
3786 case MSR_LSTAR:
3787 env->lstar = val;
3788 break;
3789 case MSR_CSTAR:
3790 env->cstar = val;
3791 break;
3792 case MSR_FMASK:
3793 env->fmask = val;
3794 break;
3795 case MSR_FSBASE:
3796 env->segs[R_FS].base = val;
3797 break;
3798 case MSR_GSBASE:
3799 env->segs[R_GS].base = val;
3800 break;
3801 case MSR_KERNELGSBASE:
3802 env->kernelgsbase = val;
3803 break;
3804#endif
3805 default:
3806#ifndef VBOX
3807 /* XXX: exception ? */
3808 break;
3809#else /* VBOX */
3810 {
3811 uint32_t ecx = (uint32_t)ECX;
3812 /* In X2APIC specification this range is reserved for APIC control. */
3813 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3814 cpu_apic_wrmsr(env, ecx, val);
3815 /** @todo else exception? */
3816 break;
3817 }
3818 case MSR_K8_TSC_AUX:
3819 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3820 break;
3821#endif /* VBOX */
3822 }
3823}
3824
3825void helper_rdmsr(void)
3826{
3827 uint64_t val;
3828 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3829
3830 switch((uint32_t)ECX) {
3831 case MSR_IA32_SYSENTER_CS:
3832 val = env->sysenter_cs;
3833 break;
3834 case MSR_IA32_SYSENTER_ESP:
3835 val = env->sysenter_esp;
3836 break;
3837 case MSR_IA32_SYSENTER_EIP:
3838 val = env->sysenter_eip;
3839 break;
3840 case MSR_IA32_APICBASE:
3841 val = cpu_get_apic_base(env);
3842 break;
3843 case MSR_EFER:
3844 val = env->efer;
3845 break;
3846 case MSR_STAR:
3847 val = env->star;
3848 break;
3849 case MSR_PAT:
3850 val = env->pat;
3851 break;
3852 case MSR_VM_HSAVE_PA:
3853 val = env->vm_hsave;
3854 break;
3855#ifdef VBOX
3856 case MSR_IA32_PERF_STATUS:
3857 case MSR_IA32_PLATFORM_INFO:
3858 case MSR_IA32_FSB_CLOCK_STS:
3859 case MSR_IA32_THERM_STATUS:
3860 val = CPUMGetGuestMsr(env->pVCpu, (uint32_t)ECX);
3861 break;
3862#else
3863 case MSR_IA32_PERF_STATUS:
3864 /* tsc_increment_by_tick */
3865 val = 1000ULL;
3866 /* CPU multiplier */
3867 val |= ((uint64_t)4ULL << 40);
3868 break;
3869#endif
3870#ifdef TARGET_X86_64
3871 case MSR_LSTAR:
3872 val = env->lstar;
3873 break;
3874 case MSR_CSTAR:
3875 val = env->cstar;
3876 break;
3877 case MSR_FMASK:
3878 val = env->fmask;
3879 break;
3880 case MSR_FSBASE:
3881 val = env->segs[R_FS].base;
3882 break;
3883 case MSR_GSBASE:
3884 val = env->segs[R_GS].base;
3885 break;
3886 case MSR_KERNELGSBASE:
3887 val = env->kernelgsbase;
3888 break;
3889#endif
3890#ifdef USE_KQEMU
3891 case MSR_QPI_COMMBASE:
3892 if (env->kqemu_enabled) {
3893 val = kqemu_comm_base;
3894 } else {
3895 val = 0;
3896 }
3897 break;
3898#endif
3899 default:
3900#ifndef VBOX
3901 /* XXX: exception ? */
3902 val = 0;
3903 break;
3904#else /* VBOX */
3905 {
3906 uint32_t ecx = (uint32_t)ECX;
3907 /* In X2APIC specification this range is reserved for APIC control. */
3908 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3909 val = cpu_apic_rdmsr(env, ecx);
3910 else
3911 val = 0; /** @todo else exception? */
3912 break;
3913 }
3914 case MSR_IA32_TSC:
3915 case MSR_K8_TSC_AUX:
3916 val = cpu_rdmsr(env, (uint32_t)ECX);
3917 break;
3918#endif /* VBOX */
3919 }
3920 EAX = (uint32_t)(val);
3921 EDX = (uint32_t)(val >> 32);
3922}
3923#endif
3924
3925target_ulong helper_lsl(target_ulong selector1)
3926{
3927 unsigned int limit;
3928 uint32_t e1, e2, eflags, selector;
3929 int rpl, dpl, cpl, type;
3930
3931 selector = selector1 & 0xffff;
3932 eflags = cc_table[CC_OP].compute_all();
3933 if (load_segment(&e1, &e2, selector) != 0)
3934 goto fail;
3935 rpl = selector & 3;
3936 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3937 cpl = env->hflags & HF_CPL_MASK;
3938 if (e2 & DESC_S_MASK) {
3939 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3940 /* conforming */
3941 } else {
3942 if (dpl < cpl || dpl < rpl)
3943 goto fail;
3944 }
3945 } else {
3946 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3947 switch(type) {
3948 case 1:
3949 case 2:
3950 case 3:
3951 case 9:
3952 case 11:
3953 break;
3954 default:
3955 goto fail;
3956 }
3957 if (dpl < cpl || dpl < rpl) {
3958 fail:
3959 CC_SRC = eflags & ~CC_Z;
3960 return 0;
3961 }
3962 }
3963 limit = get_seg_limit(e1, e2);
3964 CC_SRC = eflags | CC_Z;
3965 return limit;
3966}
3967
3968target_ulong helper_lar(target_ulong selector1)
3969{
3970 uint32_t e1, e2, eflags, selector;
3971 int rpl, dpl, cpl, type;
3972
3973 selector = selector1 & 0xffff;
3974 eflags = cc_table[CC_OP].compute_all();
3975 if ((selector & 0xfffc) == 0)
3976 goto fail;
3977 if (load_segment(&e1, &e2, selector) != 0)
3978 goto fail;
3979 rpl = selector & 3;
3980 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3981 cpl = env->hflags & HF_CPL_MASK;
3982 if (e2 & DESC_S_MASK) {
3983 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3984 /* conforming */
3985 } else {
3986 if (dpl < cpl || dpl < rpl)
3987 goto fail;
3988 }
3989 } else {
3990 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3991 switch(type) {
3992 case 1:
3993 case 2:
3994 case 3:
3995 case 4:
3996 case 5:
3997 case 9:
3998 case 11:
3999 case 12:
4000 break;
4001 default:
4002 goto fail;
4003 }
4004 if (dpl < cpl || dpl < rpl) {
4005 fail:
4006 CC_SRC = eflags & ~CC_Z;
4007 return 0;
4008 }
4009 }
4010 CC_SRC = eflags | CC_Z;
4011 return e2 & 0x00f0ff00;
4012}
4013
4014void helper_verr(target_ulong selector1)
4015{
4016 uint32_t e1, e2, eflags, selector;
4017 int rpl, dpl, cpl;
4018
4019 selector = selector1 & 0xffff;
4020 eflags = cc_table[CC_OP].compute_all();
4021 if ((selector & 0xfffc) == 0)
4022 goto fail;
4023 if (load_segment(&e1, &e2, selector) != 0)
4024 goto fail;
4025 if (!(e2 & DESC_S_MASK))
4026 goto fail;
4027 rpl = selector & 3;
4028 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4029 cpl = env->hflags & HF_CPL_MASK;
4030 if (e2 & DESC_CS_MASK) {
4031 if (!(e2 & DESC_R_MASK))
4032 goto fail;
4033 if (!(e2 & DESC_C_MASK)) {
4034 if (dpl < cpl || dpl < rpl)
4035 goto fail;
4036 }
4037 } else {
4038 if (dpl < cpl || dpl < rpl) {
4039 fail:
4040 CC_SRC = eflags & ~CC_Z;
4041 return;
4042 }
4043 }
4044 CC_SRC = eflags | CC_Z;
4045}
4046
4047void helper_verw(target_ulong selector1)
4048{
4049 uint32_t e1, e2, eflags, selector;
4050 int rpl, dpl, cpl;
4051
4052 selector = selector1 & 0xffff;
4053 eflags = cc_table[CC_OP].compute_all();
4054 if ((selector & 0xfffc) == 0)
4055 goto fail;
4056 if (load_segment(&e1, &e2, selector) != 0)
4057 goto fail;
4058 if (!(e2 & DESC_S_MASK))
4059 goto fail;
4060 rpl = selector & 3;
4061 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4062 cpl = env->hflags & HF_CPL_MASK;
4063 if (e2 & DESC_CS_MASK) {
4064 goto fail;
4065 } else {
4066 if (dpl < cpl || dpl < rpl)
4067 goto fail;
4068 if (!(e2 & DESC_W_MASK)) {
4069 fail:
4070 CC_SRC = eflags & ~CC_Z;
4071 return;
4072 }
4073 }
4074 CC_SRC = eflags | CC_Z;
4075}
4076
4077/* x87 FPU helpers */
4078
4079static void fpu_set_exception(int mask)
4080{
4081 env->fpus |= mask;
4082 if (env->fpus & (~env->fpuc & FPUC_EM))
4083 env->fpus |= FPUS_SE | FPUS_B;
4084}
4085
4086#ifndef VBOX
4087static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4088#else /* VBOX */
4089DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4090#endif /* VBOX */
4091{
4092 if (b == 0.0)
4093 fpu_set_exception(FPUS_ZE);
4094 return a / b;
4095}
4096
4097void fpu_raise_exception(void)
4098{
4099 if (env->cr[0] & CR0_NE_MASK) {
4100 raise_exception(EXCP10_COPR);
4101 }
4102#if !defined(CONFIG_USER_ONLY)
4103 else {
4104 cpu_set_ferr(env);
4105 }
4106#endif
4107}
4108
4109void helper_flds_FT0(uint32_t val)
4110{
4111 union {
4112 float32 f;
4113 uint32_t i;
4114 } u;
4115 u.i = val;
4116 FT0 = float32_to_floatx(u.f, &env->fp_status);
4117}
4118
4119void helper_fldl_FT0(uint64_t val)
4120{
4121 union {
4122 float64 f;
4123 uint64_t i;
4124 } u;
4125 u.i = val;
4126 FT0 = float64_to_floatx(u.f, &env->fp_status);
4127}
4128
4129void helper_fildl_FT0(int32_t val)
4130{
4131 FT0 = int32_to_floatx(val, &env->fp_status);
4132}
4133
4134void helper_flds_ST0(uint32_t val)
4135{
4136 int new_fpstt;
4137 union {
4138 float32 f;
4139 uint32_t i;
4140 } u;
4141 new_fpstt = (env->fpstt - 1) & 7;
4142 u.i = val;
4143 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4144 env->fpstt = new_fpstt;
4145 env->fptags[new_fpstt] = 0; /* validate stack entry */
4146}
4147
4148void helper_fldl_ST0(uint64_t val)
4149{
4150 int new_fpstt;
4151 union {
4152 float64 f;
4153 uint64_t i;
4154 } u;
4155 new_fpstt = (env->fpstt - 1) & 7;
4156 u.i = val;
4157 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4158 env->fpstt = new_fpstt;
4159 env->fptags[new_fpstt] = 0; /* validate stack entry */
4160}
4161
4162void helper_fildl_ST0(int32_t val)
4163{
4164 int new_fpstt;
4165 new_fpstt = (env->fpstt - 1) & 7;
4166 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4167 env->fpstt = new_fpstt;
4168 env->fptags[new_fpstt] = 0; /* validate stack entry */
4169}
4170
4171void helper_fildll_ST0(int64_t val)
4172{
4173 int new_fpstt;
4174 new_fpstt = (env->fpstt - 1) & 7;
4175 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4176 env->fpstt = new_fpstt;
4177 env->fptags[new_fpstt] = 0; /* validate stack entry */
4178}
4179
4180#ifndef VBOX
4181uint32_t helper_fsts_ST0(void)
4182#else
4183RTCCUINTREG helper_fsts_ST0(void)
4184#endif
4185{
4186 union {
4187 float32 f;
4188 uint32_t i;
4189 } u;
4190 u.f = floatx_to_float32(ST0, &env->fp_status);
4191 return u.i;
4192}
4193
4194uint64_t helper_fstl_ST0(void)
4195{
4196 union {
4197 float64 f;
4198 uint64_t i;
4199 } u;
4200 u.f = floatx_to_float64(ST0, &env->fp_status);
4201 return u.i;
4202}
4203#ifndef VBOX
4204int32_t helper_fist_ST0(void)
4205#else
4206RTCCINTREG helper_fist_ST0(void)
4207#endif
4208{
4209 int32_t val;
4210 val = floatx_to_int32(ST0, &env->fp_status);
4211 if (val != (int16_t)val)
4212 val = -32768;
4213 return val;
4214}
4215
4216#ifndef VBOX
4217int32_t helper_fistl_ST0(void)
4218#else
4219RTCCINTREG helper_fistl_ST0(void)
4220#endif
4221{
4222 int32_t val;
4223 val = floatx_to_int32(ST0, &env->fp_status);
4224 return val;
4225}
4226
4227int64_t helper_fistll_ST0(void)
4228{
4229 int64_t val;
4230 val = floatx_to_int64(ST0, &env->fp_status);
4231 return val;
4232}
4233
4234#ifndef VBOX
4235int32_t helper_fistt_ST0(void)
4236#else
4237RTCCINTREG helper_fistt_ST0(void)
4238#endif
4239{
4240 int32_t val;
4241 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4242 if (val != (int16_t)val)
4243 val = -32768;
4244 return val;
4245}
4246
4247#ifndef VBOX
4248int32_t helper_fisttl_ST0(void)
4249#else
4250RTCCINTREG helper_fisttl_ST0(void)
4251#endif
4252{
4253 int32_t val;
4254 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4255 return val;
4256}
4257
4258int64_t helper_fisttll_ST0(void)
4259{
4260 int64_t val;
4261 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4262 return val;
4263}
4264
4265void helper_fldt_ST0(target_ulong ptr)
4266{
4267 int new_fpstt;
4268 new_fpstt = (env->fpstt - 1) & 7;
4269 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4270 env->fpstt = new_fpstt;
4271 env->fptags[new_fpstt] = 0; /* validate stack entry */
4272}
4273
4274void helper_fstt_ST0(target_ulong ptr)
4275{
4276 helper_fstt(ST0, ptr);
4277}
4278
4279void helper_fpush(void)
4280{
4281 fpush();
4282}
4283
4284void helper_fpop(void)
4285{
4286 fpop();
4287}
4288
4289void helper_fdecstp(void)
4290{
4291 env->fpstt = (env->fpstt - 1) & 7;
4292 env->fpus &= (~0x4700);
4293}
4294
4295void helper_fincstp(void)
4296{
4297 env->fpstt = (env->fpstt + 1) & 7;
4298 env->fpus &= (~0x4700);
4299}
4300
4301/* FPU move */
4302
4303void helper_ffree_STN(int st_index)
4304{
4305 env->fptags[(env->fpstt + st_index) & 7] = 1;
4306}
4307
4308void helper_fmov_ST0_FT0(void)
4309{
4310 ST0 = FT0;
4311}
4312
4313void helper_fmov_FT0_STN(int st_index)
4314{
4315 FT0 = ST(st_index);
4316}
4317
4318void helper_fmov_ST0_STN(int st_index)
4319{
4320 ST0 = ST(st_index);
4321}
4322
4323void helper_fmov_STN_ST0(int st_index)
4324{
4325 ST(st_index) = ST0;
4326}
4327
4328void helper_fxchg_ST0_STN(int st_index)
4329{
4330 CPU86_LDouble tmp;
4331 tmp = ST(st_index);
4332 ST(st_index) = ST0;
4333 ST0 = tmp;
4334}
4335
4336/* FPU operations */
4337
4338static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4339
4340void helper_fcom_ST0_FT0(void)
4341{
4342 int ret;
4343
4344 ret = floatx_compare(ST0, FT0, &env->fp_status);
4345 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4346 FORCE_RET();
4347}
4348
4349void helper_fucom_ST0_FT0(void)
4350{
4351 int ret;
4352
4353 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4354 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4355 FORCE_RET();
4356}
4357
4358static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4359
4360void helper_fcomi_ST0_FT0(void)
4361{
4362 int eflags;
4363 int ret;
4364
4365 ret = floatx_compare(ST0, FT0, &env->fp_status);
4366 eflags = cc_table[CC_OP].compute_all();
4367 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4368 CC_SRC = eflags;
4369 FORCE_RET();
4370}
4371
4372void helper_fucomi_ST0_FT0(void)
4373{
4374 int eflags;
4375 int ret;
4376
4377 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4378 eflags = cc_table[CC_OP].compute_all();
4379 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4380 CC_SRC = eflags;
4381 FORCE_RET();
4382}
4383
4384void helper_fadd_ST0_FT0(void)
4385{
4386 ST0 += FT0;
4387}
4388
4389void helper_fmul_ST0_FT0(void)
4390{
4391 ST0 *= FT0;
4392}
4393
4394void helper_fsub_ST0_FT0(void)
4395{
4396 ST0 -= FT0;
4397}
4398
4399void helper_fsubr_ST0_FT0(void)
4400{
4401 ST0 = FT0 - ST0;
4402}
4403
4404void helper_fdiv_ST0_FT0(void)
4405{
4406 ST0 = helper_fdiv(ST0, FT0);
4407}
4408
4409void helper_fdivr_ST0_FT0(void)
4410{
4411 ST0 = helper_fdiv(FT0, ST0);
4412}
4413
4414/* fp operations between STN and ST0 */
4415
4416void helper_fadd_STN_ST0(int st_index)
4417{
4418 ST(st_index) += ST0;
4419}
4420
4421void helper_fmul_STN_ST0(int st_index)
4422{
4423 ST(st_index) *= ST0;
4424}
4425
4426void helper_fsub_STN_ST0(int st_index)
4427{
4428 ST(st_index) -= ST0;
4429}
4430
4431void helper_fsubr_STN_ST0(int st_index)
4432{
4433 CPU86_LDouble *p;
4434 p = &ST(st_index);
4435 *p = ST0 - *p;
4436}
4437
4438void helper_fdiv_STN_ST0(int st_index)
4439{
4440 CPU86_LDouble *p;
4441 p = &ST(st_index);
4442 *p = helper_fdiv(*p, ST0);
4443}
4444
4445void helper_fdivr_STN_ST0(int st_index)
4446{
4447 CPU86_LDouble *p;
4448 p = &ST(st_index);
4449 *p = helper_fdiv(ST0, *p);
4450}
4451
4452/* misc FPU operations */
4453void helper_fchs_ST0(void)
4454{
4455 ST0 = floatx_chs(ST0);
4456}
4457
4458void helper_fabs_ST0(void)
4459{
4460 ST0 = floatx_abs(ST0);
4461}
4462
4463void helper_fld1_ST0(void)
4464{
4465 ST0 = f15rk[1];
4466}
4467
4468void helper_fldl2t_ST0(void)
4469{
4470 ST0 = f15rk[6];
4471}
4472
4473void helper_fldl2e_ST0(void)
4474{
4475 ST0 = f15rk[5];
4476}
4477
4478void helper_fldpi_ST0(void)
4479{
4480 ST0 = f15rk[2];
4481}
4482
4483void helper_fldlg2_ST0(void)
4484{
4485 ST0 = f15rk[3];
4486}
4487
4488void helper_fldln2_ST0(void)
4489{
4490 ST0 = f15rk[4];
4491}
4492
4493void helper_fldz_ST0(void)
4494{
4495 ST0 = f15rk[0];
4496}
4497
4498void helper_fldz_FT0(void)
4499{
4500 FT0 = f15rk[0];
4501}
4502
4503#ifndef VBOX
4504uint32_t helper_fnstsw(void)
4505#else
4506RTCCUINTREG helper_fnstsw(void)
4507#endif
4508{
4509 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4510}
4511
4512#ifndef VBOX
4513uint32_t helper_fnstcw(void)
4514#else
4515RTCCUINTREG helper_fnstcw(void)
4516#endif
4517{
4518 return env->fpuc;
4519}
4520
4521static void update_fp_status(void)
4522{
4523 int rnd_type;
4524
4525 /* set rounding mode */
4526 switch(env->fpuc & RC_MASK) {
4527 default:
4528 case RC_NEAR:
4529 rnd_type = float_round_nearest_even;
4530 break;
4531 case RC_DOWN:
4532 rnd_type = float_round_down;
4533 break;
4534 case RC_UP:
4535 rnd_type = float_round_up;
4536 break;
4537 case RC_CHOP:
4538 rnd_type = float_round_to_zero;
4539 break;
4540 }
4541 set_float_rounding_mode(rnd_type, &env->fp_status);
4542#ifdef FLOATX80
4543 switch((env->fpuc >> 8) & 3) {
4544 case 0:
4545 rnd_type = 32;
4546 break;
4547 case 2:
4548 rnd_type = 64;
4549 break;
4550 case 3:
4551 default:
4552 rnd_type = 80;
4553 break;
4554 }
4555 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4556#endif
4557}
4558
4559void helper_fldcw(uint32_t val)
4560{
4561 env->fpuc = val;
4562 update_fp_status();
4563}
4564
4565void helper_fclex(void)
4566{
4567 env->fpus &= 0x7f00;
4568}
4569
4570void helper_fwait(void)
4571{
4572 if (env->fpus & FPUS_SE)
4573 fpu_raise_exception();
4574 FORCE_RET();
4575}
4576
4577void helper_fninit(void)
4578{
4579 env->fpus = 0;
4580 env->fpstt = 0;
4581 env->fpuc = 0x37f;
4582 env->fptags[0] = 1;
4583 env->fptags[1] = 1;
4584 env->fptags[2] = 1;
4585 env->fptags[3] = 1;
4586 env->fptags[4] = 1;
4587 env->fptags[5] = 1;
4588 env->fptags[6] = 1;
4589 env->fptags[7] = 1;
4590}
4591
4592/* BCD ops */
4593
4594void helper_fbld_ST0(target_ulong ptr)
4595{
4596 CPU86_LDouble tmp;
4597 uint64_t val;
4598 unsigned int v;
4599 int i;
4600
4601 val = 0;
4602 for(i = 8; i >= 0; i--) {
4603 v = ldub(ptr + i);
4604 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4605 }
4606 tmp = val;
4607 if (ldub(ptr + 9) & 0x80)
4608 tmp = -tmp;
4609 fpush();
4610 ST0 = tmp;
4611}
4612
4613void helper_fbst_ST0(target_ulong ptr)
4614{
4615 int v;
4616 target_ulong mem_ref, mem_end;
4617 int64_t val;
4618
4619 val = floatx_to_int64(ST0, &env->fp_status);
4620 mem_ref = ptr;
4621 mem_end = mem_ref + 9;
4622 if (val < 0) {
4623 stb(mem_end, 0x80);
4624 val = -val;
4625 } else {
4626 stb(mem_end, 0x00);
4627 }
4628 while (mem_ref < mem_end) {
4629 if (val == 0)
4630 break;
4631 v = val % 100;
4632 val = val / 100;
4633 v = ((v / 10) << 4) | (v % 10);
4634 stb(mem_ref++, v);
4635 }
4636 while (mem_ref < mem_end) {
4637 stb(mem_ref++, 0);
4638 }
4639}
4640
4641void helper_f2xm1(void)
4642{
4643 ST0 = pow(2.0,ST0) - 1.0;
4644}
4645
4646void helper_fyl2x(void)
4647{
4648 CPU86_LDouble fptemp;
4649
4650 fptemp = ST0;
4651 if (fptemp>0.0){
4652 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4653 ST1 *= fptemp;
4654 fpop();
4655 } else {
4656 env->fpus &= (~0x4700);
4657 env->fpus |= 0x400;
4658 }
4659}
4660
4661void helper_fptan(void)
4662{
4663 CPU86_LDouble fptemp;
4664
4665 fptemp = ST0;
4666 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4667 env->fpus |= 0x400;
4668 } else {
4669 ST0 = tan(fptemp);
4670 fpush();
4671 ST0 = 1.0;
4672 env->fpus &= (~0x400); /* C2 <-- 0 */
4673 /* the above code is for |arg| < 2**52 only */
4674 }
4675}
4676
4677void helper_fpatan(void)
4678{
4679 CPU86_LDouble fptemp, fpsrcop;
4680
4681 fpsrcop = ST1;
4682 fptemp = ST0;
4683 ST1 = atan2(fpsrcop,fptemp);
4684 fpop();
4685}
4686
4687void helper_fxtract(void)
4688{
4689 CPU86_LDoubleU temp;
4690 unsigned int expdif;
4691
4692 temp.d = ST0;
4693 expdif = EXPD(temp) - EXPBIAS;
4694 /*DP exponent bias*/
4695 ST0 = expdif;
4696 fpush();
4697 BIASEXPONENT(temp);
4698 ST0 = temp.d;
4699}
4700
4701#ifdef VBOX
4702#ifdef _MSC_VER
4703/* MSC cannot divide by zero */
4704extern double _Nan;
4705#define NaN _Nan
4706#else
4707#define NaN (0.0 / 0.0)
4708#endif
4709#endif /* VBOX */
4710
4711void helper_fprem1(void)
4712{
4713 CPU86_LDouble dblq, fpsrcop, fptemp;
4714 CPU86_LDoubleU fpsrcop1, fptemp1;
4715 int expdif;
4716 signed long long int q;
4717
4718#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4719 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4720#else
4721 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4722#endif
4723 ST0 = 0.0 / 0.0; /* NaN */
4724 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4725 return;
4726 }
4727
4728 fpsrcop = ST0;
4729 fptemp = ST1;
4730 fpsrcop1.d = fpsrcop;
4731 fptemp1.d = fptemp;
4732 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4733
4734 if (expdif < 0) {
4735 /* optimisation? taken from the AMD docs */
4736 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4737 /* ST0 is unchanged */
4738 return;
4739 }
4740
4741 if (expdif < 53) {
4742 dblq = fpsrcop / fptemp;
4743 /* round dblq towards nearest integer */
4744 dblq = rint(dblq);
4745 ST0 = fpsrcop - fptemp * dblq;
4746
4747 /* convert dblq to q by truncating towards zero */
4748 if (dblq < 0.0)
4749 q = (signed long long int)(-dblq);
4750 else
4751 q = (signed long long int)dblq;
4752
4753 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4754 /* (C0,C3,C1) <-- (q2,q1,q0) */
4755 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4756 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4757 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4758 } else {
4759 env->fpus |= 0x400; /* C2 <-- 1 */
4760 fptemp = pow(2.0, expdif - 50);
4761 fpsrcop = (ST0 / ST1) / fptemp;
4762 /* fpsrcop = integer obtained by chopping */
4763 fpsrcop = (fpsrcop < 0.0) ?
4764 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4765 ST0 -= (ST1 * fpsrcop * fptemp);
4766 }
4767}
4768
4769void helper_fprem(void)
4770{
4771 CPU86_LDouble dblq, fpsrcop, fptemp;
4772 CPU86_LDoubleU fpsrcop1, fptemp1;
4773 int expdif;
4774 signed long long int q;
4775
4776#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4777 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4778#else
4779 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4780#endif
4781 ST0 = 0.0 / 0.0; /* NaN */
4782 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4783 return;
4784 }
4785
4786 fpsrcop = (CPU86_LDouble)ST0;
4787 fptemp = (CPU86_LDouble)ST1;
4788 fpsrcop1.d = fpsrcop;
4789 fptemp1.d = fptemp;
4790 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4791
4792 if (expdif < 0) {
4793 /* optimisation? taken from the AMD docs */
4794 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4795 /* ST0 is unchanged */
4796 return;
4797 }
4798
4799 if ( expdif < 53 ) {
4800 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4801 /* round dblq towards zero */
4802 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4803 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4804
4805 /* convert dblq to q by truncating towards zero */
4806 if (dblq < 0.0)
4807 q = (signed long long int)(-dblq);
4808 else
4809 q = (signed long long int)dblq;
4810
4811 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4812 /* (C0,C3,C1) <-- (q2,q1,q0) */
4813 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4814 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4815 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4816 } else {
4817 int N = 32 + (expdif % 32); /* as per AMD docs */
4818 env->fpus |= 0x400; /* C2 <-- 1 */
4819 fptemp = pow(2.0, (double)(expdif - N));
4820 fpsrcop = (ST0 / ST1) / fptemp;
4821 /* fpsrcop = integer obtained by chopping */
4822 fpsrcop = (fpsrcop < 0.0) ?
4823 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4824 ST0 -= (ST1 * fpsrcop * fptemp);
4825 }
4826}
4827
4828void helper_fyl2xp1(void)
4829{
4830 CPU86_LDouble fptemp;
4831
4832 fptemp = ST0;
4833 if ((fptemp+1.0)>0.0) {
4834 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4835 ST1 *= fptemp;
4836 fpop();
4837 } else {
4838 env->fpus &= (~0x4700);
4839 env->fpus |= 0x400;
4840 }
4841}
4842
4843void helper_fsqrt(void)
4844{
4845 CPU86_LDouble fptemp;
4846
4847 fptemp = ST0;
4848 if (fptemp<0.0) {
4849 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4850 env->fpus |= 0x400;
4851 }
4852 ST0 = sqrt(fptemp);
4853}
4854
4855void helper_fsincos(void)
4856{
4857 CPU86_LDouble fptemp;
4858
4859 fptemp = ST0;
4860 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4861 env->fpus |= 0x400;
4862 } else {
4863 ST0 = sin(fptemp);
4864 fpush();
4865 ST0 = cos(fptemp);
4866 env->fpus &= (~0x400); /* C2 <-- 0 */
4867 /* the above code is for |arg| < 2**63 only */
4868 }
4869}
4870
4871void helper_frndint(void)
4872{
4873 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4874}
4875
4876void helper_fscale(void)
4877{
4878 ST0 = ldexp (ST0, (int)(ST1));
4879}
4880
4881void helper_fsin(void)
4882{
4883 CPU86_LDouble fptemp;
4884
4885 fptemp = ST0;
4886 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4887 env->fpus |= 0x400;
4888 } else {
4889 ST0 = sin(fptemp);
4890 env->fpus &= (~0x400); /* C2 <-- 0 */
4891 /* the above code is for |arg| < 2**53 only */
4892 }
4893}
4894
4895void helper_fcos(void)
4896{
4897 CPU86_LDouble fptemp;
4898
4899 fptemp = ST0;
4900 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4901 env->fpus |= 0x400;
4902 } else {
4903 ST0 = cos(fptemp);
4904 env->fpus &= (~0x400); /* C2 <-- 0 */
4905 /* the above code is for |arg5 < 2**63 only */
4906 }
4907}
4908
4909void helper_fxam_ST0(void)
4910{
4911 CPU86_LDoubleU temp;
4912 int expdif;
4913
4914 temp.d = ST0;
4915
4916 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4917 if (SIGND(temp))
4918 env->fpus |= 0x200; /* C1 <-- 1 */
4919
4920 /* XXX: test fptags too */
4921 expdif = EXPD(temp);
4922 if (expdif == MAXEXPD) {
4923#ifdef USE_X86LDOUBLE
4924 if (MANTD(temp) == 0x8000000000000000ULL)
4925#else
4926 if (MANTD(temp) == 0)
4927#endif
4928 env->fpus |= 0x500 /*Infinity*/;
4929 else
4930 env->fpus |= 0x100 /*NaN*/;
4931 } else if (expdif == 0) {
4932 if (MANTD(temp) == 0)
4933 env->fpus |= 0x4000 /*Zero*/;
4934 else
4935 env->fpus |= 0x4400 /*Denormal*/;
4936 } else {
4937 env->fpus |= 0x400;
4938 }
4939}
4940
4941void helper_fstenv(target_ulong ptr, int data32)
4942{
4943 int fpus, fptag, exp, i;
4944 uint64_t mant;
4945 CPU86_LDoubleU tmp;
4946
4947 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4948 fptag = 0;
4949 for (i=7; i>=0; i--) {
4950 fptag <<= 2;
4951 if (env->fptags[i]) {
4952 fptag |= 3;
4953 } else {
4954 tmp.d = env->fpregs[i].d;
4955 exp = EXPD(tmp);
4956 mant = MANTD(tmp);
4957 if (exp == 0 && mant == 0) {
4958 /* zero */
4959 fptag |= 1;
4960 } else if (exp == 0 || exp == MAXEXPD
4961#ifdef USE_X86LDOUBLE
4962 || (mant & (1LL << 63)) == 0
4963#endif
4964 ) {
4965 /* NaNs, infinity, denormal */
4966 fptag |= 2;
4967 }
4968 }
4969 }
4970 if (data32) {
4971 /* 32 bit */
4972 stl(ptr, env->fpuc);
4973 stl(ptr + 4, fpus);
4974 stl(ptr + 8, fptag);
4975 stl(ptr + 12, 0); /* fpip */
4976 stl(ptr + 16, 0); /* fpcs */
4977 stl(ptr + 20, 0); /* fpoo */
4978 stl(ptr + 24, 0); /* fpos */
4979 } else {
4980 /* 16 bit */
4981 stw(ptr, env->fpuc);
4982 stw(ptr + 2, fpus);
4983 stw(ptr + 4, fptag);
4984 stw(ptr + 6, 0);
4985 stw(ptr + 8, 0);
4986 stw(ptr + 10, 0);
4987 stw(ptr + 12, 0);
4988 }
4989}
4990
4991void helper_fldenv(target_ulong ptr, int data32)
4992{
4993 int i, fpus, fptag;
4994
4995 if (data32) {
4996 env->fpuc = lduw(ptr);
4997 fpus = lduw(ptr + 4);
4998 fptag = lduw(ptr + 8);
4999 }
5000 else {
5001 env->fpuc = lduw(ptr);
5002 fpus = lduw(ptr + 2);
5003 fptag = lduw(ptr + 4);
5004 }
5005 env->fpstt = (fpus >> 11) & 7;
5006 env->fpus = fpus & ~0x3800;
5007 for(i = 0;i < 8; i++) {
5008 env->fptags[i] = ((fptag & 3) == 3);
5009 fptag >>= 2;
5010 }
5011}
5012
5013void helper_fsave(target_ulong ptr, int data32)
5014{
5015 CPU86_LDouble tmp;
5016 int i;
5017
5018 helper_fstenv(ptr, data32);
5019
5020 ptr += (14 << data32);
5021 for(i = 0;i < 8; i++) {
5022 tmp = ST(i);
5023 helper_fstt(tmp, ptr);
5024 ptr += 10;
5025 }
5026
5027 /* fninit */
5028 env->fpus = 0;
5029 env->fpstt = 0;
5030 env->fpuc = 0x37f;
5031 env->fptags[0] = 1;
5032 env->fptags[1] = 1;
5033 env->fptags[2] = 1;
5034 env->fptags[3] = 1;
5035 env->fptags[4] = 1;
5036 env->fptags[5] = 1;
5037 env->fptags[6] = 1;
5038 env->fptags[7] = 1;
5039}
5040
5041void helper_frstor(target_ulong ptr, int data32)
5042{
5043 CPU86_LDouble tmp;
5044 int i;
5045
5046 helper_fldenv(ptr, data32);
5047 ptr += (14 << data32);
5048
5049 for(i = 0;i < 8; i++) {
5050 tmp = helper_fldt(ptr);
5051 ST(i) = tmp;
5052 ptr += 10;
5053 }
5054}
5055
5056void helper_fxsave(target_ulong ptr, int data64)
5057{
5058 int fpus, fptag, i, nb_xmm_regs;
5059 CPU86_LDouble tmp;
5060 target_ulong addr;
5061
5062 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5063 fptag = 0;
5064 for(i = 0; i < 8; i++) {
5065 fptag |= (env->fptags[i] << i);
5066 }
5067 stw(ptr, env->fpuc);
5068 stw(ptr + 2, fpus);
5069 stw(ptr + 4, fptag ^ 0xff);
5070#ifdef TARGET_X86_64
5071 if (data64) {
5072 stq(ptr + 0x08, 0); /* rip */
5073 stq(ptr + 0x10, 0); /* rdp */
5074 } else
5075#endif
5076 {
5077 stl(ptr + 0x08, 0); /* eip */
5078 stl(ptr + 0x0c, 0); /* sel */
5079 stl(ptr + 0x10, 0); /* dp */
5080 stl(ptr + 0x14, 0); /* sel */
5081 }
5082
5083 addr = ptr + 0x20;
5084 for(i = 0;i < 8; i++) {
5085 tmp = ST(i);
5086 helper_fstt(tmp, addr);
5087 addr += 16;
5088 }
5089
5090 if (env->cr[4] & CR4_OSFXSR_MASK) {
5091 /* XXX: finish it */
5092 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5093 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5094 if (env->hflags & HF_CS64_MASK)
5095 nb_xmm_regs = 16;
5096 else
5097 nb_xmm_regs = 8;
5098 addr = ptr + 0xa0;
5099 for(i = 0; i < nb_xmm_regs; i++) {
5100 stq(addr, env->xmm_regs[i].XMM_Q(0));
5101 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5102 addr += 16;
5103 }
5104 }
5105}
5106
5107void helper_fxrstor(target_ulong ptr, int data64)
5108{
5109 int i, fpus, fptag, nb_xmm_regs;
5110 CPU86_LDouble tmp;
5111 target_ulong addr;
5112
5113 env->fpuc = lduw(ptr);
5114 fpus = lduw(ptr + 2);
5115 fptag = lduw(ptr + 4);
5116 env->fpstt = (fpus >> 11) & 7;
5117 env->fpus = fpus & ~0x3800;
5118 fptag ^= 0xff;
5119 for(i = 0;i < 8; i++) {
5120 env->fptags[i] = ((fptag >> i) & 1);
5121 }
5122
5123 addr = ptr + 0x20;
5124 for(i = 0;i < 8; i++) {
5125 tmp = helper_fldt(addr);
5126 ST(i) = tmp;
5127 addr += 16;
5128 }
5129
5130 if (env->cr[4] & CR4_OSFXSR_MASK) {
5131 /* XXX: finish it */
5132 env->mxcsr = ldl(ptr + 0x18);
5133 //ldl(ptr + 0x1c);
5134 if (env->hflags & HF_CS64_MASK)
5135 nb_xmm_regs = 16;
5136 else
5137 nb_xmm_regs = 8;
5138 addr = ptr + 0xa0;
5139 for(i = 0; i < nb_xmm_regs; i++) {
5140#if !defined(VBOX) || __GNUC__ < 4
5141 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5142 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5143#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5144# if 1
5145 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5146 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5147 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5148 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5149# else
5150 /* this works fine on Mac OS X, gcc 4.0.1 */
5151 uint64_t u64 = ldq(addr);
5152 env->xmm_regs[i].XMM_Q(0);
5153 u64 = ldq(addr + 4);
5154 env->xmm_regs[i].XMM_Q(1) = u64;
5155# endif
5156#endif
5157 addr += 16;
5158 }
5159 }
5160}
5161
5162#ifndef USE_X86LDOUBLE
5163
5164void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5165{
5166 CPU86_LDoubleU temp;
5167 int e;
5168
5169 temp.d = f;
5170 /* mantissa */
5171 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5172 /* exponent + sign */
5173 e = EXPD(temp) - EXPBIAS + 16383;
5174 e |= SIGND(temp) >> 16;
5175 *pexp = e;
5176}
5177
5178CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5179{
5180 CPU86_LDoubleU temp;
5181 int e;
5182 uint64_t ll;
5183
5184 /* XXX: handle overflow ? */
5185 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5186 e |= (upper >> 4) & 0x800; /* sign */
5187 ll = (mant >> 11) & ((1LL << 52) - 1);
5188#ifdef __arm__
5189 temp.l.upper = (e << 20) | (ll >> 32);
5190 temp.l.lower = ll;
5191#else
5192 temp.ll = ll | ((uint64_t)e << 52);
5193#endif
5194 return temp.d;
5195}
5196
5197#else
5198
5199void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5200{
5201 CPU86_LDoubleU temp;
5202
5203 temp.d = f;
5204 *pmant = temp.l.lower;
5205 *pexp = temp.l.upper;
5206}
5207
5208CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5209{
5210 CPU86_LDoubleU temp;
5211
5212 temp.l.upper = upper;
5213 temp.l.lower = mant;
5214 return temp.d;
5215}
5216#endif
5217
5218#ifdef TARGET_X86_64
5219
5220//#define DEBUG_MULDIV
5221
5222static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5223{
5224 *plow += a;
5225 /* carry test */
5226 if (*plow < a)
5227 (*phigh)++;
5228 *phigh += b;
5229}
5230
5231static void neg128(uint64_t *plow, uint64_t *phigh)
5232{
5233 *plow = ~ *plow;
5234 *phigh = ~ *phigh;
5235 add128(plow, phigh, 1, 0);
5236}
5237
5238/* return TRUE if overflow */
5239static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5240{
5241 uint64_t q, r, a1, a0;
5242 int i, qb, ab;
5243
5244 a0 = *plow;
5245 a1 = *phigh;
5246 if (a1 == 0) {
5247 q = a0 / b;
5248 r = a0 % b;
5249 *plow = q;
5250 *phigh = r;
5251 } else {
5252 if (a1 >= b)
5253 return 1;
5254 /* XXX: use a better algorithm */
5255 for(i = 0; i < 64; i++) {
5256 ab = a1 >> 63;
5257 a1 = (a1 << 1) | (a0 >> 63);
5258 if (ab || a1 >= b) {
5259 a1 -= b;
5260 qb = 1;
5261 } else {
5262 qb = 0;
5263 }
5264 a0 = (a0 << 1) | qb;
5265 }
5266#if defined(DEBUG_MULDIV)
5267 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5268 *phigh, *plow, b, a0, a1);
5269#endif
5270 *plow = a0;
5271 *phigh = a1;
5272 }
5273 return 0;
5274}
5275
5276/* return TRUE if overflow */
5277static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5278{
5279 int sa, sb;
5280 sa = ((int64_t)*phigh < 0);
5281 if (sa)
5282 neg128(plow, phigh);
5283 sb = (b < 0);
5284 if (sb)
5285 b = -b;
5286 if (div64(plow, phigh, b) != 0)
5287 return 1;
5288 if (sa ^ sb) {
5289 if (*plow > (1ULL << 63))
5290 return 1;
5291 *plow = - *plow;
5292 } else {
5293 if (*plow >= (1ULL << 63))
5294 return 1;
5295 }
5296 if (sa)
5297 *phigh = - *phigh;
5298 return 0;
5299}
5300
5301void helper_mulq_EAX_T0(target_ulong t0)
5302{
5303 uint64_t r0, r1;
5304
5305 mulu64(&r0, &r1, EAX, t0);
5306 EAX = r0;
5307 EDX = r1;
5308 CC_DST = r0;
5309 CC_SRC = r1;
5310}
5311
5312void helper_imulq_EAX_T0(target_ulong t0)
5313{
5314 uint64_t r0, r1;
5315
5316 muls64(&r0, &r1, EAX, t0);
5317 EAX = r0;
5318 EDX = r1;
5319 CC_DST = r0;
5320 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5321}
5322
5323target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5324{
5325 uint64_t r0, r1;
5326
5327 muls64(&r0, &r1, t0, t1);
5328 CC_DST = r0;
5329 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5330 return r0;
5331}
5332
5333void helper_divq_EAX(target_ulong t0)
5334{
5335 uint64_t r0, r1;
5336 if (t0 == 0) {
5337 raise_exception(EXCP00_DIVZ);
5338 }
5339 r0 = EAX;
5340 r1 = EDX;
5341 if (div64(&r0, &r1, t0))
5342 raise_exception(EXCP00_DIVZ);
5343 EAX = r0;
5344 EDX = r1;
5345}
5346
5347void helper_idivq_EAX(target_ulong t0)
5348{
5349 uint64_t r0, r1;
5350 if (t0 == 0) {
5351 raise_exception(EXCP00_DIVZ);
5352 }
5353 r0 = EAX;
5354 r1 = EDX;
5355 if (idiv64(&r0, &r1, t0))
5356 raise_exception(EXCP00_DIVZ);
5357 EAX = r0;
5358 EDX = r1;
5359}
5360#endif
5361
5362static void do_hlt(void)
5363{
5364 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5365 env->halted = 1;
5366 env->exception_index = EXCP_HLT;
5367 cpu_loop_exit();
5368}
5369
5370void helper_hlt(int next_eip_addend)
5371{
5372 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5373 EIP += next_eip_addend;
5374
5375 do_hlt();
5376}
5377
5378void helper_monitor(target_ulong ptr)
5379{
5380#ifdef VBOX
5381 if ((uint32_t)ECX > 1)
5382 raise_exception(EXCP0D_GPF);
5383#else
5384 if ((uint32_t)ECX != 0)
5385 raise_exception(EXCP0D_GPF);
5386#endif
5387 /* XXX: store address ? */
5388 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5389}
5390
5391void helper_mwait(int next_eip_addend)
5392{
5393 if ((uint32_t)ECX != 0)
5394 raise_exception(EXCP0D_GPF);
5395#ifdef VBOX
5396 helper_hlt(next_eip_addend);
5397#else
5398 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5399 EIP += next_eip_addend;
5400
5401 /* XXX: not complete but not completely erroneous */
5402 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5403 /* more than one CPU: do not sleep because another CPU may
5404 wake this one */
5405 } else {
5406 do_hlt();
5407 }
5408#endif
5409}
5410
5411void helper_debug(void)
5412{
5413 env->exception_index = EXCP_DEBUG;
5414 cpu_loop_exit();
5415}
5416
5417void helper_raise_interrupt(int intno, int next_eip_addend)
5418{
5419 raise_interrupt(intno, 1, 0, next_eip_addend);
5420}
5421
5422void helper_raise_exception(int exception_index)
5423{
5424 raise_exception(exception_index);
5425}
5426
5427void helper_cli(void)
5428{
5429 env->eflags &= ~IF_MASK;
5430}
5431
5432void helper_sti(void)
5433{
5434 env->eflags |= IF_MASK;
5435}
5436
5437#ifdef VBOX
5438void helper_cli_vme(void)
5439{
5440 env->eflags &= ~VIF_MASK;
5441}
5442
5443void helper_sti_vme(void)
5444{
5445 /* First check, then change eflags according to the AMD manual */
5446 if (env->eflags & VIP_MASK) {
5447 raise_exception(EXCP0D_GPF);
5448 }
5449 env->eflags |= VIF_MASK;
5450}
5451#endif
5452
5453#if 0
5454/* vm86plus instructions */
5455void helper_cli_vm(void)
5456{
5457 env->eflags &= ~VIF_MASK;
5458}
5459
5460void helper_sti_vm(void)
5461{
5462 env->eflags |= VIF_MASK;
5463 if (env->eflags & VIP_MASK) {
5464 raise_exception(EXCP0D_GPF);
5465 }
5466}
5467#endif
5468
5469void helper_set_inhibit_irq(void)
5470{
5471 env->hflags |= HF_INHIBIT_IRQ_MASK;
5472}
5473
5474void helper_reset_inhibit_irq(void)
5475{
5476 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5477}
5478
5479void helper_boundw(target_ulong a0, int v)
5480{
5481 int low, high;
5482 low = ldsw(a0);
5483 high = ldsw(a0 + 2);
5484 v = (int16_t)v;
5485 if (v < low || v > high) {
5486 raise_exception(EXCP05_BOUND);
5487 }
5488 FORCE_RET();
5489}
5490
5491void helper_boundl(target_ulong a0, int v)
5492{
5493 int low, high;
5494 low = ldl(a0);
5495 high = ldl(a0 + 4);
5496 if (v < low || v > high) {
5497 raise_exception(EXCP05_BOUND);
5498 }
5499 FORCE_RET();
5500}
5501
5502static float approx_rsqrt(float a)
5503{
5504 return 1.0 / sqrt(a);
5505}
5506
5507static float approx_rcp(float a)
5508{
5509 return 1.0 / a;
5510}
5511
5512#if !defined(CONFIG_USER_ONLY)
5513
5514#define MMUSUFFIX _mmu
5515
5516#define SHIFT 0
5517#include "softmmu_template.h"
5518
5519#define SHIFT 1
5520#include "softmmu_template.h"
5521
5522#define SHIFT 2
5523#include "softmmu_template.h"
5524
5525#define SHIFT 3
5526#include "softmmu_template.h"
5527
5528#endif
5529
5530#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5531/* This code assumes real physical address always fit into host CPU reg,
5532 which is wrong in general, but true for our current use cases. */
5533RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5534{
5535 return remR3PhysReadS8(addr);
5536}
5537RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5538{
5539 return remR3PhysReadU8(addr);
5540}
5541void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5542{
5543 remR3PhysWriteU8(addr, val);
5544}
5545RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5546{
5547 return remR3PhysReadS16(addr);
5548}
5549RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5550{
5551 return remR3PhysReadU16(addr);
5552}
5553void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5554{
5555 remR3PhysWriteU16(addr, val);
5556}
5557RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5558{
5559 return remR3PhysReadS32(addr);
5560}
5561RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5562{
5563 return remR3PhysReadU32(addr);
5564}
5565void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5566{
5567 remR3PhysWriteU32(addr, val);
5568}
5569uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5570{
5571 return remR3PhysReadU64(addr);
5572}
5573void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5574{
5575 remR3PhysWriteU64(addr, val);
5576}
5577#endif
5578
5579/* try to fill the TLB and return an exception if error. If retaddr is
5580 NULL, it means that the function was called in C code (i.e. not
5581 from generated code or from helper.c) */
5582/* XXX: fix it to restore all registers */
5583void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5584{
5585 TranslationBlock *tb;
5586 int ret;
5587 unsigned long pc;
5588 CPUX86State *saved_env;
5589
5590 /* XXX: hack to restore env in all cases, even if not called from
5591 generated code */
5592 saved_env = env;
5593 env = cpu_single_env;
5594
5595 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5596 if (ret) {
5597 if (retaddr) {
5598 /* now we have a real cpu fault */
5599 pc = (unsigned long)retaddr;
5600 tb = tb_find_pc(pc);
5601 if (tb) {
5602 /* the PC is inside the translated code. It means that we have
5603 a virtual CPU fault */
5604 cpu_restore_state(tb, env, pc, NULL);
5605 }
5606 }
5607 raise_exception_err(env->exception_index, env->error_code);
5608 }
5609 env = saved_env;
5610}
5611
5612#ifdef VBOX
5613
5614/**
5615 * Correctly computes the eflags.
5616 * @returns eflags.
5617 * @param env1 CPU environment.
5618 */
5619uint32_t raw_compute_eflags(CPUX86State *env1)
5620{
5621 CPUX86State *savedenv = env;
5622 uint32_t efl;
5623 env = env1;
5624 efl = compute_eflags();
5625 env = savedenv;
5626 return efl;
5627}
5628
5629/**
5630 * Reads byte from virtual address in guest memory area.
5631 * XXX: is it working for any addresses? swapped out pages?
5632 * @returns readed data byte.
5633 * @param env1 CPU environment.
5634 * @param pvAddr GC Virtual address.
5635 */
5636uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5637{
5638 CPUX86State *savedenv = env;
5639 uint8_t u8;
5640 env = env1;
5641 u8 = ldub_kernel(addr);
5642 env = savedenv;
5643 return u8;
5644}
5645
5646/**
5647 * Reads byte from virtual address in guest memory area.
5648 * XXX: is it working for any addresses? swapped out pages?
5649 * @returns readed data byte.
5650 * @param env1 CPU environment.
5651 * @param pvAddr GC Virtual address.
5652 */
5653uint16_t read_word(CPUX86State *env1, target_ulong addr)
5654{
5655 CPUX86State *savedenv = env;
5656 uint16_t u16;
5657 env = env1;
5658 u16 = lduw_kernel(addr);
5659 env = savedenv;
5660 return u16;
5661}
5662
5663/**
5664 * Reads byte from virtual address in guest memory area.
5665 * XXX: is it working for any addresses? swapped out pages?
5666 * @returns readed data byte.
5667 * @param env1 CPU environment.
5668 * @param pvAddr GC Virtual address.
5669 */
5670uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5671{
5672 CPUX86State *savedenv = env;
5673 uint32_t u32;
5674 env = env1;
5675 u32 = ldl_kernel(addr);
5676 env = savedenv;
5677 return u32;
5678}
5679
5680/**
5681 * Writes byte to virtual address in guest memory area.
5682 * XXX: is it working for any addresses? swapped out pages?
5683 * @returns readed data byte.
5684 * @param env1 CPU environment.
5685 * @param pvAddr GC Virtual address.
5686 * @param val byte value
5687 */
5688void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5689{
5690 CPUX86State *savedenv = env;
5691 env = env1;
5692 stb(addr, val);
5693 env = savedenv;
5694}
5695
5696void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5697{
5698 CPUX86State *savedenv = env;
5699 env = env1;
5700 stw(addr, val);
5701 env = savedenv;
5702}
5703
5704void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5705{
5706 CPUX86State *savedenv = env;
5707 env = env1;
5708 stl(addr, val);
5709 env = savedenv;
5710}
5711
5712/**
5713 * Correctly loads selector into segment register with updating internal
5714 * qemu data/caches.
5715 * @param env1 CPU environment.
5716 * @param seg_reg Segment register.
5717 * @param selector Selector to load.
5718 */
5719void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5720{
5721 CPUX86State *savedenv = env;
5722#ifdef FORCE_SEGMENT_SYNC
5723 jmp_buf old_buf;
5724#endif
5725
5726 env = env1;
5727
5728 if ( env->eflags & X86_EFL_VM
5729 || !(env->cr[0] & X86_CR0_PE))
5730 {
5731 load_seg_vm(seg_reg, selector);
5732
5733 env = savedenv;
5734
5735 /* Successful sync. */
5736 env1->segs[seg_reg].newselector = 0;
5737 }
5738 else
5739 {
5740 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5741 time critical - let's not do that */
5742#ifdef FORCE_SEGMENT_SYNC
5743 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5744#endif
5745 if (setjmp(env1->jmp_env) == 0)
5746 {
5747 if (seg_reg == R_CS)
5748 {
5749 uint32_t e1, e2;
5750 e1 = e2 = 0;
5751 load_segment(&e1, &e2, selector);
5752 cpu_x86_load_seg_cache(env, R_CS, selector,
5753 get_seg_base(e1, e2),
5754 get_seg_limit(e1, e2),
5755 e2);
5756 }
5757 else
5758 helper_load_seg(seg_reg, selector);
5759 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5760 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5761
5762 env = savedenv;
5763
5764 /* Successful sync. */
5765 env1->segs[seg_reg].newselector = 0;
5766 }
5767 else
5768 {
5769 env = savedenv;
5770
5771 /* Postpone sync until the guest uses the selector. */
5772 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5773 env1->segs[seg_reg].newselector = selector;
5774 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5775 env1->exception_index = -1;
5776 env1->error_code = 0;
5777 env1->old_exception = -1;
5778 }
5779#ifdef FORCE_SEGMENT_SYNC
5780 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5781#endif
5782 }
5783
5784}
5785
5786DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5787{
5788 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5789}
5790
5791
5792int emulate_single_instr(CPUX86State *env1)
5793{
5794 TranslationBlock *tb;
5795 TranslationBlock *current;
5796 int flags;
5797 uint8_t *tc_ptr;
5798 target_ulong old_eip;
5799
5800 /* ensures env is loaded! */
5801 CPUX86State *savedenv = env;
5802 env = env1;
5803
5804 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5805
5806 current = env->current_tb;
5807 env->current_tb = NULL;
5808 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5809
5810 /*
5811 * Translate only one instruction.
5812 */
5813 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5814 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5815 env->segs[R_CS].base, flags, 0);
5816
5817 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5818
5819
5820 /* tb_link_phys: */
5821 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5822 tb->jmp_next[0] = NULL;
5823 tb->jmp_next[1] = NULL;
5824 Assert(tb->jmp_next[0] == NULL);
5825 Assert(tb->jmp_next[1] == NULL);
5826 if (tb->tb_next_offset[0] != 0xffff)
5827 tb_reset_jump(tb, 0);
5828 if (tb->tb_next_offset[1] != 0xffff)
5829 tb_reset_jump(tb, 1);
5830
5831 /*
5832 * Execute it using emulation
5833 */
5834 old_eip = env->eip;
5835 env->current_tb = tb;
5836
5837 /*
5838 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5839 * perhaps not a very safe hack
5840 */
5841 while(old_eip == env->eip)
5842 {
5843 tc_ptr = tb->tc_ptr;
5844
5845#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5846 int fake_ret;
5847 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5848#else
5849 tcg_qemu_tb_exec(tc_ptr);
5850#endif
5851 /*
5852 * Exit once we detect an external interrupt and interrupts are enabled
5853 */
5854 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5855 ( (env->eflags & IF_MASK) &&
5856 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5857 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5858 {
5859 break;
5860 }
5861 }
5862 env->current_tb = current;
5863
5864 tb_phys_invalidate(tb, -1);
5865 tb_free(tb);
5866/*
5867 Assert(tb->tb_next_offset[0] == 0xffff);
5868 Assert(tb->tb_next_offset[1] == 0xffff);
5869 Assert(tb->tb_next[0] == 0xffff);
5870 Assert(tb->tb_next[1] == 0xffff);
5871 Assert(tb->jmp_next[0] == NULL);
5872 Assert(tb->jmp_next[1] == NULL);
5873 Assert(tb->jmp_first == NULL); */
5874
5875 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5876
5877 /*
5878 * Execute the next instruction when we encounter instruction fusing.
5879 */
5880 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5881 {
5882 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5883 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5884 emulate_single_instr(env);
5885 }
5886
5887 env = savedenv;
5888 return 0;
5889}
5890
5891/**
5892 * Correctly loads a new ldtr selector.
5893 *
5894 * @param env1 CPU environment.
5895 * @param selector Selector to load.
5896 */
5897void sync_ldtr(CPUX86State *env1, int selector)
5898{
5899 CPUX86State *saved_env = env;
5900 if (setjmp(env1->jmp_env) == 0)
5901 {
5902 env = env1;
5903 helper_lldt(selector);
5904 env = saved_env;
5905 }
5906 else
5907 {
5908 env = saved_env;
5909#ifdef VBOX_STRICT
5910 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5911#endif
5912 }
5913}
5914
5915int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5916 uint32_t *esp_ptr, int dpl)
5917{
5918 int type, index, shift;
5919
5920 CPUX86State *savedenv = env;
5921 env = env1;
5922
5923 if (!(env->tr.flags & DESC_P_MASK))
5924 cpu_abort(env, "invalid tss");
5925 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5926 if ((type & 7) != 1)
5927 cpu_abort(env, "invalid tss type %d", type);
5928 shift = type >> 3;
5929 index = (dpl * 4 + 2) << shift;
5930 if (index + (4 << shift) - 1 > env->tr.limit)
5931 {
5932 env = savedenv;
5933 return 0;
5934 }
5935 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5936
5937 if (shift == 0) {
5938 *esp_ptr = lduw_kernel(env->tr.base + index);
5939 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5940 } else {
5941 *esp_ptr = ldl_kernel(env->tr.base + index);
5942 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5943 }
5944
5945 env = savedenv;
5946 return 1;
5947}
5948
5949//*****************************************************************************
5950// Needs to be at the bottom of the file (overriding macros)
5951
5952#ifndef VBOX
5953static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5954#else /* VBOX */
5955DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5956#endif /* VBOX */
5957{
5958 return *(CPU86_LDouble *)ptr;
5959}
5960
5961#ifndef VBOX
5962static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5963#else /* VBOX */
5964DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5965#endif /* VBOX */
5966{
5967 *(CPU86_LDouble *)ptr = f;
5968}
5969
5970#undef stw
5971#undef stl
5972#undef stq
5973#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5974#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5975#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5976
5977//*****************************************************************************
5978void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5979{
5980 int fpus, fptag, i, nb_xmm_regs;
5981 CPU86_LDouble tmp;
5982 uint8_t *addr;
5983 int data64 = !!(env->hflags & HF_LMA_MASK);
5984
5985 if (env->cpuid_features & CPUID_FXSR)
5986 {
5987 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5988 fptag = 0;
5989 for(i = 0; i < 8; i++) {
5990 fptag |= (env->fptags[i] << i);
5991 }
5992 stw(ptr, env->fpuc);
5993 stw(ptr + 2, fpus);
5994 stw(ptr + 4, fptag ^ 0xff);
5995
5996 addr = ptr + 0x20;
5997 for(i = 0;i < 8; i++) {
5998 tmp = ST(i);
5999 helper_fstt_raw(tmp, addr);
6000 addr += 16;
6001 }
6002
6003 if (env->cr[4] & CR4_OSFXSR_MASK) {
6004 /* XXX: finish it */
6005 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6006 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6007 nb_xmm_regs = 8 << data64;
6008 addr = ptr + 0xa0;
6009 for(i = 0; i < nb_xmm_regs; i++) {
6010#if __GNUC__ < 4
6011 stq(addr, env->xmm_regs[i].XMM_Q(0));
6012 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6013#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6014 stl(addr, env->xmm_regs[i].XMM_L(0));
6015 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6016 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6017 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6018#endif
6019 addr += 16;
6020 }
6021 }
6022 }
6023 else
6024 {
6025 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6026 int fptag;
6027
6028 fp->FCW = env->fpuc;
6029 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6030 fptag = 0;
6031 for (i=7; i>=0; i--) {
6032 fptag <<= 2;
6033 if (env->fptags[i]) {
6034 fptag |= 3;
6035 } else {
6036 /* the FPU automatically computes it */
6037 }
6038 }
6039 fp->FTW = fptag;
6040
6041 for(i = 0;i < 8; i++) {
6042 tmp = ST(i);
6043 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6044 }
6045 }
6046}
6047
6048//*****************************************************************************
6049#undef lduw
6050#undef ldl
6051#undef ldq
6052#define lduw(a) *(uint16_t *)(a)
6053#define ldl(a) *(uint32_t *)(a)
6054#define ldq(a) *(uint64_t *)(a)
6055//*****************************************************************************
6056void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6057{
6058 int i, fpus, fptag, nb_xmm_regs;
6059 CPU86_LDouble tmp;
6060 uint8_t *addr;
6061 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6062
6063 if (env->cpuid_features & CPUID_FXSR)
6064 {
6065 env->fpuc = lduw(ptr);
6066 fpus = lduw(ptr + 2);
6067 fptag = lduw(ptr + 4);
6068 env->fpstt = (fpus >> 11) & 7;
6069 env->fpus = fpus & ~0x3800;
6070 fptag ^= 0xff;
6071 for(i = 0;i < 8; i++) {
6072 env->fptags[i] = ((fptag >> i) & 1);
6073 }
6074
6075 addr = ptr + 0x20;
6076 for(i = 0;i < 8; i++) {
6077 tmp = helper_fldt_raw(addr);
6078 ST(i) = tmp;
6079 addr += 16;
6080 }
6081
6082 if (env->cr[4] & CR4_OSFXSR_MASK) {
6083 /* XXX: finish it, endianness */
6084 env->mxcsr = ldl(ptr + 0x18);
6085 //ldl(ptr + 0x1c);
6086 nb_xmm_regs = 8 << data64;
6087 addr = ptr + 0xa0;
6088 for(i = 0; i < nb_xmm_regs; i++) {
6089#if HC_ARCH_BITS == 32
6090 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6091 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6092 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6093 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6094 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6095#else
6096 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6097 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6098#endif
6099 addr += 16;
6100 }
6101 }
6102 }
6103 else
6104 {
6105 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6106 int fptag, j;
6107
6108 env->fpuc = fp->FCW;
6109 env->fpstt = (fp->FSW >> 11) & 7;
6110 env->fpus = fp->FSW & ~0x3800;
6111 fptag = fp->FTW;
6112 for(i = 0;i < 8; i++) {
6113 env->fptags[i] = ((fptag & 3) == 3);
6114 fptag >>= 2;
6115 }
6116 j = env->fpstt;
6117 for(i = 0;i < 8; i++) {
6118 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6119 ST(i) = tmp;
6120 }
6121 }
6122}
6123//*****************************************************************************
6124//*****************************************************************************
6125
6126#endif /* VBOX */
6127
6128/* Secure Virtual Machine helpers */
6129
6130#if defined(CONFIG_USER_ONLY)
6131
6132void helper_vmrun(int aflag, int next_eip_addend)
6133{
6134}
6135void helper_vmmcall(void)
6136{
6137}
6138void helper_vmload(int aflag)
6139{
6140}
6141void helper_vmsave(int aflag)
6142{
6143}
6144void helper_stgi(void)
6145{
6146}
6147void helper_clgi(void)
6148{
6149}
6150void helper_skinit(void)
6151{
6152}
6153void helper_invlpga(int aflag)
6154{
6155}
6156void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6157{
6158}
6159void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6160{
6161}
6162
6163void helper_svm_check_io(uint32_t port, uint32_t param,
6164 uint32_t next_eip_addend)
6165{
6166}
6167#else
6168
6169#ifndef VBOX
6170static inline void svm_save_seg(target_phys_addr_t addr,
6171#else /* VBOX */
6172DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6173#endif /* VBOX */
6174 const SegmentCache *sc)
6175{
6176 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6177 sc->selector);
6178 stq_phys(addr + offsetof(struct vmcb_seg, base),
6179 sc->base);
6180 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6181 sc->limit);
6182 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6183 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6184}
6185
6186#ifndef VBOX
6187static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6188#else /* VBOX */
6189DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6190#endif /* VBOX */
6191{
6192 unsigned int flags;
6193
6194 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6195 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6196 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6197 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6198 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6199}
6200
6201#ifndef VBOX
6202static inline void svm_load_seg_cache(target_phys_addr_t addr,
6203#else /* VBOX */
6204DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6205#endif /* VBOX */
6206 CPUState *env, int seg_reg)
6207{
6208 SegmentCache sc1, *sc = &sc1;
6209 svm_load_seg(addr, sc);
6210 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6211 sc->base, sc->limit, sc->flags);
6212}
6213
6214void helper_vmrun(int aflag, int next_eip_addend)
6215{
6216 target_ulong addr;
6217 uint32_t event_inj;
6218 uint32_t int_ctl;
6219
6220 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6221
6222 if (aflag == 2)
6223 addr = EAX;
6224 else
6225 addr = (uint32_t)EAX;
6226
6227 if (loglevel & CPU_LOG_TB_IN_ASM)
6228 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6229
6230 env->vm_vmcb = addr;
6231
6232 /* save the current CPU state in the hsave page */
6233 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6234 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6235
6236 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6237 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6238
6239 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6240 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6241 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6242 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6243 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6244 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6245
6246 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6247 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6248
6249 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6250 &env->segs[R_ES]);
6251 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6252 &env->segs[R_CS]);
6253 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6254 &env->segs[R_SS]);
6255 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6256 &env->segs[R_DS]);
6257
6258 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6259 EIP + next_eip_addend);
6260 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6261 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6262
6263 /* load the interception bitmaps so we do not need to access the
6264 vmcb in svm mode */
6265 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6266 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6267 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6268 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6269 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6270 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6271
6272 /* enable intercepts */
6273 env->hflags |= HF_SVMI_MASK;
6274
6275 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6276
6277 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6278 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6279
6280 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6281 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6282
6283 /* clear exit_info_2 so we behave like the real hardware */
6284 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6285
6286 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6287 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6288 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6289 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6290 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6291 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6292 if (int_ctl & V_INTR_MASKING_MASK) {
6293 env->v_tpr = int_ctl & V_TPR_MASK;
6294 env->hflags2 |= HF2_VINTR_MASK;
6295 if (env->eflags & IF_MASK)
6296 env->hflags2 |= HF2_HIF_MASK;
6297 }
6298
6299 cpu_load_efer(env,
6300 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6301 env->eflags = 0;
6302 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6303 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6304 CC_OP = CC_OP_EFLAGS;
6305
6306 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6307 env, R_ES);
6308 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6309 env, R_CS);
6310 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6311 env, R_SS);
6312 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6313 env, R_DS);
6314
6315 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6316 env->eip = EIP;
6317 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6318 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6319 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6320 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6321 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6322
6323 /* FIXME: guest state consistency checks */
6324
6325 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6326 case TLB_CONTROL_DO_NOTHING:
6327 break;
6328 case TLB_CONTROL_FLUSH_ALL_ASID:
6329 /* FIXME: this is not 100% correct but should work for now */
6330 tlb_flush(env, 1);
6331 break;
6332 }
6333
6334 env->hflags2 |= HF2_GIF_MASK;
6335
6336 if (int_ctl & V_IRQ_MASK) {
6337 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6338 }
6339
6340 /* maybe we need to inject an event */
6341 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6342 if (event_inj & SVM_EVTINJ_VALID) {
6343 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6344 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6345 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6346 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6347
6348 if (loglevel & CPU_LOG_TB_IN_ASM)
6349 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6350 /* FIXME: need to implement valid_err */
6351 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6352 case SVM_EVTINJ_TYPE_INTR:
6353 env->exception_index = vector;
6354 env->error_code = event_inj_err;
6355 env->exception_is_int = 0;
6356 env->exception_next_eip = -1;
6357 if (loglevel & CPU_LOG_TB_IN_ASM)
6358 fprintf(logfile, "INTR");
6359 /* XXX: is it always correct ? */
6360 do_interrupt(vector, 0, 0, 0, 1);
6361 break;
6362 case SVM_EVTINJ_TYPE_NMI:
6363 env->exception_index = EXCP02_NMI;
6364 env->error_code = event_inj_err;
6365 env->exception_is_int = 0;
6366 env->exception_next_eip = EIP;
6367 if (loglevel & CPU_LOG_TB_IN_ASM)
6368 fprintf(logfile, "NMI");
6369 cpu_loop_exit();
6370 break;
6371 case SVM_EVTINJ_TYPE_EXEPT:
6372 env->exception_index = vector;
6373 env->error_code = event_inj_err;
6374 env->exception_is_int = 0;
6375 env->exception_next_eip = -1;
6376 if (loglevel & CPU_LOG_TB_IN_ASM)
6377 fprintf(logfile, "EXEPT");
6378 cpu_loop_exit();
6379 break;
6380 case SVM_EVTINJ_TYPE_SOFT:
6381 env->exception_index = vector;
6382 env->error_code = event_inj_err;
6383 env->exception_is_int = 1;
6384 env->exception_next_eip = EIP;
6385 if (loglevel & CPU_LOG_TB_IN_ASM)
6386 fprintf(logfile, "SOFT");
6387 cpu_loop_exit();
6388 break;
6389 }
6390 if (loglevel & CPU_LOG_TB_IN_ASM)
6391 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6392 }
6393}
6394
6395void helper_vmmcall(void)
6396{
6397 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6398 raise_exception(EXCP06_ILLOP);
6399}
6400
6401void helper_vmload(int aflag)
6402{
6403 target_ulong addr;
6404 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6405
6406 if (aflag == 2)
6407 addr = EAX;
6408 else
6409 addr = (uint32_t)EAX;
6410
6411 if (loglevel & CPU_LOG_TB_IN_ASM)
6412 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6413 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6414 env->segs[R_FS].base);
6415
6416 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6417 env, R_FS);
6418 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6419 env, R_GS);
6420 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6421 &env->tr);
6422 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6423 &env->ldt);
6424
6425#ifdef TARGET_X86_64
6426 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6427 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6428 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6429 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6430#endif
6431 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6432 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6433 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6434 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6435}
6436
6437void helper_vmsave(int aflag)
6438{
6439 target_ulong addr;
6440 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6441
6442 if (aflag == 2)
6443 addr = EAX;
6444 else
6445 addr = (uint32_t)EAX;
6446
6447 if (loglevel & CPU_LOG_TB_IN_ASM)
6448 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6449 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6450 env->segs[R_FS].base);
6451
6452 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6453 &env->segs[R_FS]);
6454 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6455 &env->segs[R_GS]);
6456 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6457 &env->tr);
6458 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6459 &env->ldt);
6460
6461#ifdef TARGET_X86_64
6462 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6463 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6464 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6465 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6466#endif
6467 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6468 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6469 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6470 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6471}
6472
6473void helper_stgi(void)
6474{
6475 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6476 env->hflags2 |= HF2_GIF_MASK;
6477}
6478
6479void helper_clgi(void)
6480{
6481 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6482 env->hflags2 &= ~HF2_GIF_MASK;
6483}
6484
6485void helper_skinit(void)
6486{
6487 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6488 /* XXX: not implemented */
6489 raise_exception(EXCP06_ILLOP);
6490}
6491
6492void helper_invlpga(int aflag)
6493{
6494 target_ulong addr;
6495 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6496
6497 if (aflag == 2)
6498 addr = EAX;
6499 else
6500 addr = (uint32_t)EAX;
6501
6502 /* XXX: could use the ASID to see if it is needed to do the
6503 flush */
6504 tlb_flush_page(env, addr);
6505}
6506
6507void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6508{
6509 if (likely(!(env->hflags & HF_SVMI_MASK)))
6510 return;
6511#ifndef VBOX
6512 switch(type) {
6513#ifndef VBOX
6514 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6515#else
6516 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6517 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6518 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6519#endif
6520 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6521 helper_vmexit(type, param);
6522 }
6523 break;
6524#ifndef VBOX
6525 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6526#else
6527 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6528 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6529 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6530#endif
6531 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6532 helper_vmexit(type, param);
6533 }
6534 break;
6535 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6536 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6537 helper_vmexit(type, param);
6538 }
6539 break;
6540 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6541 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6542 helper_vmexit(type, param);
6543 }
6544 break;
6545 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6546 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6547 helper_vmexit(type, param);
6548 }
6549 break;
6550 case SVM_EXIT_MSR:
6551 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6552 /* FIXME: this should be read in at vmrun (faster this way?) */
6553 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6554 uint32_t t0, t1;
6555 switch((uint32_t)ECX) {
6556 case 0 ... 0x1fff:
6557 t0 = (ECX * 2) % 8;
6558 t1 = ECX / 8;
6559 break;
6560 case 0xc0000000 ... 0xc0001fff:
6561 t0 = (8192 + ECX - 0xc0000000) * 2;
6562 t1 = (t0 / 8);
6563 t0 %= 8;
6564 break;
6565 case 0xc0010000 ... 0xc0011fff:
6566 t0 = (16384 + ECX - 0xc0010000) * 2;
6567 t1 = (t0 / 8);
6568 t0 %= 8;
6569 break;
6570 default:
6571 helper_vmexit(type, param);
6572 t0 = 0;
6573 t1 = 0;
6574 break;
6575 }
6576 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6577 helper_vmexit(type, param);
6578 }
6579 break;
6580 default:
6581 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6582 helper_vmexit(type, param);
6583 }
6584 break;
6585 }
6586#else
6587 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6588#endif
6589}
6590
6591void helper_svm_check_io(uint32_t port, uint32_t param,
6592 uint32_t next_eip_addend)
6593{
6594 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6595 /* FIXME: this should be read in at vmrun (faster this way?) */
6596 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6597 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6598 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6599 /* next EIP */
6600 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6601 env->eip + next_eip_addend);
6602 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6603 }
6604 }
6605}
6606
6607/* Note: currently only 32 bits of exit_code are used */
6608void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6609{
6610 uint32_t int_ctl;
6611
6612 if (loglevel & CPU_LOG_TB_IN_ASM)
6613 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6614 exit_code, exit_info_1,
6615 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6616 EIP);
6617
6618 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6619 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6620 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6621 } else {
6622 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6623 }
6624
6625 /* Save the VM state in the vmcb */
6626 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6627 &env->segs[R_ES]);
6628 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6629 &env->segs[R_CS]);
6630 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6631 &env->segs[R_SS]);
6632 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6633 &env->segs[R_DS]);
6634
6635 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6636 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6637
6638 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6639 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6640
6641 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6642 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6643 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6644 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6645 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6646
6647 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6648 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6649 int_ctl |= env->v_tpr & V_TPR_MASK;
6650 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6651 int_ctl |= V_IRQ_MASK;
6652 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6653
6654 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6655 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6656 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6657 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6658 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6659 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6660 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6661
6662 /* Reload the host state from vm_hsave */
6663 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6664 env->hflags &= ~HF_SVMI_MASK;
6665 env->intercept = 0;
6666 env->intercept_exceptions = 0;
6667 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6668 env->tsc_offset = 0;
6669
6670 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6671 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6672
6673 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6674 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6675
6676 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6677 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6678 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6679 /* we need to set the efer after the crs so the hidden flags get
6680 set properly */
6681 cpu_load_efer(env,
6682 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6683 env->eflags = 0;
6684 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6685 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6686 CC_OP = CC_OP_EFLAGS;
6687
6688 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6689 env, R_ES);
6690 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6691 env, R_CS);
6692 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6693 env, R_SS);
6694 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6695 env, R_DS);
6696
6697 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6698 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6699 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6700
6701 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6702 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6703
6704 /* other setups */
6705 cpu_x86_set_cpl(env, 0);
6706 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6707 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6708
6709 env->hflags2 &= ~HF2_GIF_MASK;
6710 /* FIXME: Resets the current ASID register to zero (host ASID). */
6711
6712 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6713
6714 /* Clears the TSC_OFFSET inside the processor. */
6715
6716 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6717 from the page table indicated the host's CR3. If the PDPEs contain
6718 illegal state, the processor causes a shutdown. */
6719
6720 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6721 env->cr[0] |= CR0_PE_MASK;
6722 env->eflags &= ~VM_MASK;
6723
6724 /* Disables all breakpoints in the host DR7 register. */
6725
6726 /* Checks the reloaded host state for consistency. */
6727
6728 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6729 host's code segment or non-canonical (in the case of long mode), a
6730 #GP fault is delivered inside the host.) */
6731
6732 /* remove any pending exception */
6733 env->exception_index = -1;
6734 env->error_code = 0;
6735 env->old_exception = -1;
6736
6737 cpu_loop_exit();
6738}
6739
6740#endif
6741
6742/* MMX/SSE */
6743/* XXX: optimize by storing fptt and fptags in the static cpu state */
6744void helper_enter_mmx(void)
6745{
6746 env->fpstt = 0;
6747 *(uint32_t *)(env->fptags) = 0;
6748 *(uint32_t *)(env->fptags + 4) = 0;
6749}
6750
6751void helper_emms(void)
6752{
6753 /* set to empty state */
6754 *(uint32_t *)(env->fptags) = 0x01010101;
6755 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6756}
6757
6758/* XXX: suppress */
6759void helper_movq(uint64_t *d, uint64_t *s)
6760{
6761 *d = *s;
6762}
6763
6764#define SHIFT 0
6765#include "ops_sse.h"
6766
6767#define SHIFT 1
6768#include "ops_sse.h"
6769
6770#define SHIFT 0
6771#include "helper_template.h"
6772#undef SHIFT
6773
6774#define SHIFT 1
6775#include "helper_template.h"
6776#undef SHIFT
6777
6778#define SHIFT 2
6779#include "helper_template.h"
6780#undef SHIFT
6781
6782#ifdef TARGET_X86_64
6783
6784#define SHIFT 3
6785#include "helper_template.h"
6786#undef SHIFT
6787
6788#endif
6789
6790/* bit operations */
6791target_ulong helper_bsf(target_ulong t0)
6792{
6793 int count;
6794 target_ulong res;
6795
6796 res = t0;
6797 count = 0;
6798 while ((res & 1) == 0) {
6799 count++;
6800 res >>= 1;
6801 }
6802 return count;
6803}
6804
6805target_ulong helper_bsr(target_ulong t0)
6806{
6807 int count;
6808 target_ulong res, mask;
6809
6810 res = t0;
6811 count = TARGET_LONG_BITS - 1;
6812 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6813 while ((res & mask) == 0) {
6814 count--;
6815 res <<= 1;
6816 }
6817 return count;
6818}
6819
6820
6821static int compute_all_eflags(void)
6822{
6823 return CC_SRC;
6824}
6825
6826static int compute_c_eflags(void)
6827{
6828 return CC_SRC & CC_C;
6829}
6830
6831#ifndef VBOX
6832CCTable cc_table[CC_OP_NB] = {
6833 [CC_OP_DYNAMIC] = { /* should never happen */ },
6834
6835 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6836
6837 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6838 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6839 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6840
6841 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6842 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6843 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6844
6845 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6846 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6847 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6848
6849 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6850 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6851 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6852
6853 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6854 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6855 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6856
6857 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6858 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6859 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6860
6861 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6862 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6863 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6864
6865 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6866 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6867 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6868
6869 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6870 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6871 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6872
6873 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6874 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6875 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6876
6877#ifdef TARGET_X86_64
6878 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6879
6880 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6881
6882 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6883
6884 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6885
6886 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6887
6888 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6889
6890 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6891
6892 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6893
6894 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6895
6896 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6897#endif
6898};
6899#else /* VBOX */
6900/* Sync carefully with cpu.h */
6901CCTable cc_table[CC_OP_NB] = {
6902 /* CC_OP_DYNAMIC */ { 0, 0 },
6903
6904 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6905
6906 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6907 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6908 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6909#ifdef TARGET_X86_64
6910 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6911#else
6912 /* CC_OP_MULQ */ { 0, 0 },
6913#endif
6914
6915 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6916 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6917 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6918#ifdef TARGET_X86_64
6919 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6920#else
6921 /* CC_OP_ADDQ */ { 0, 0 },
6922#endif
6923
6924 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6925 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6926 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6927#ifdef TARGET_X86_64
6928 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6929#else
6930 /* CC_OP_ADCQ */ { 0, 0 },
6931#endif
6932
6933 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6934 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6935 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6936#ifdef TARGET_X86_64
6937 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6938#else
6939 /* CC_OP_SUBQ */ { 0, 0 },
6940#endif
6941
6942 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6943 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6944 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6945#ifdef TARGET_X86_64
6946 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6947#else
6948 /* CC_OP_SBBQ */ { 0, 0 },
6949#endif
6950
6951 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6952 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6953 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6954#ifdef TARGET_X86_64
6955 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6956#else
6957 /* CC_OP_LOGICQ */ { 0, 0 },
6958#endif
6959
6960 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6961 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6962 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6963#ifdef TARGET_X86_64
6964 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6965#else
6966 /* CC_OP_INCQ */ { 0, 0 },
6967#endif
6968
6969 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6970 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6971 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6972#ifdef TARGET_X86_64
6973 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6974#else
6975 /* CC_OP_DECQ */ { 0, 0 },
6976#endif
6977
6978 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6979 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6980 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6981#ifdef TARGET_X86_64
6982 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6983#else
6984 /* CC_OP_SHLQ */ { 0, 0 },
6985#endif
6986
6987 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6988 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6989 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6990#ifdef TARGET_X86_64
6991 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6992#else
6993 /* CC_OP_SARQ */ { 0, 0 },
6994#endif
6995};
6996#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette