VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/op_helper.c@ 14542

Last change on this file since 14542 was 14542, checked in by vboxsync, 16 years ago

Export new recompiler to OSE

File size: 192.6 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
148 /* if TF will be set -> #GP */
149 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
150 || (new_eflags & TF_MASK)) {
151 raise_exception(EXCP0D_GPF);
152 } else {
153 load_eflags(new_eflags, (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
154
155 if (new_eflags & IF_MASK) {
156 env->eflags |= VIF_MASK;
157 } else {
158 env->eflags &= ~VIF_MASK;
159 }
160 }
161}
162
163target_ulong helper_read_eflags_vme(void)
164{
165 uint32_t eflags;
166 eflags = cc_table[CC_OP].compute_all();
167 eflags |= (DF & DF_MASK);
168 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
169 if (env->eflags & VIF_MASK)
170 eflags |= IF_MASK;
171 return eflags;
172}
173
174void helper_dump_state()
175{
176 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
177 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
178 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
179 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
180 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
181 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
182 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
183}
184#endif
185
186/* return non zero if error */
187#ifndef VBOX
188static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
189#else /* VBOX */
190DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
191#endif /* VBOX */
192 int selector)
193{
194 SegmentCache *dt;
195 int index;
196 target_ulong ptr;
197
198#ifdef VBOX
199 /* Trying to load a selector with CPL=1? */
200 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
201 {
202 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
203 selector = selector & 0xfffc;
204 }
205#endif
206
207 if (selector & 0x4)
208 dt = &env->ldt;
209 else
210 dt = &env->gdt;
211 index = selector & ~7;
212 if ((index + 7) > dt->limit)
213 return -1;
214 ptr = dt->base + index;
215 *e1_ptr = ldl_kernel(ptr);
216 *e2_ptr = ldl_kernel(ptr + 4);
217 return 0;
218}
219
220#ifndef VBOX
221static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
222#else /* VBOX */
223DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
224#endif /* VBOX */
225{
226 unsigned int limit;
227 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
228 if (e2 & DESC_G_MASK)
229 limit = (limit << 12) | 0xfff;
230 return limit;
231}
232
233#ifndef VBOX
234static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
235#else /* VBOX */
236DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
237#endif /* VBOX */
238{
239 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
240}
241
242#ifndef VBOX
243static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
244#else /* VBOX */
245DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
246#endif /* VBOX */
247{
248 sc->base = get_seg_base(e1, e2);
249 sc->limit = get_seg_limit(e1, e2);
250 sc->flags = e2;
251}
252
253/* init the segment cache in vm86 mode. */
254#ifndef VBOX
255static inline void load_seg_vm(int seg, int selector)
256#else /* VBOX */
257DECLINLINE(void) load_seg_vm(int seg, int selector)
258#endif /* VBOX */
259{
260 selector &= 0xffff;
261#ifdef VBOX
262 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
263
264 if (seg == R_CS)
265 flags |= DESC_CS_MASK;
266
267 cpu_x86_load_seg_cache(env, seg, selector,
268 (selector << 4), 0xffff, flags);
269#else
270 cpu_x86_load_seg_cache(env, seg, selector,
271 (selector << 4), 0xffff, 0);
272#endif
273}
274
275#ifndef VBOX
276static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
277#else /* VBOX */
278DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
279#endif /* VBOX */
280 uint32_t *esp_ptr, int dpl)
281{
282#ifndef VBOX
283 int type, index, shift;
284#else
285 unsigned int type, index, shift;
286#endif
287
288#if 0
289 {
290 int i;
291 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
292 for(i=0;i<env->tr.limit;i++) {
293 printf("%02x ", env->tr.base[i]);
294 if ((i & 7) == 7) printf("\n");
295 }
296 printf("\n");
297 }
298#endif
299
300 if (!(env->tr.flags & DESC_P_MASK))
301 cpu_abort(env, "invalid tss");
302 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
303 if ((type & 7) != 1)
304 cpu_abort(env, "invalid tss type");
305 shift = type >> 3;
306 index = (dpl * 4 + 2) << shift;
307 if (index + (4 << shift) - 1 > env->tr.limit)
308 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
309 if (shift == 0) {
310 *esp_ptr = lduw_kernel(env->tr.base + index);
311 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
312 } else {
313 *esp_ptr = ldl_kernel(env->tr.base + index);
314 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
315 }
316}
317
318/* XXX: merge with load_seg() */
319static void tss_load_seg(int seg_reg, int selector)
320{
321 uint32_t e1, e2;
322 int rpl, dpl, cpl;
323
324#ifdef VBOX
325 e1 = e2 = 0;
326 cpl = env->hflags & HF_CPL_MASK;
327 /* Trying to load a selector with CPL=1? */
328 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
329 {
330 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
331 selector = selector & 0xfffc;
332 }
333#endif
334
335 if ((selector & 0xfffc) != 0) {
336 if (load_segment(&e1, &e2, selector) != 0)
337 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
338 if (!(e2 & DESC_S_MASK))
339 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
340 rpl = selector & 3;
341 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
342 cpl = env->hflags & HF_CPL_MASK;
343 if (seg_reg == R_CS) {
344 if (!(e2 & DESC_CS_MASK))
345 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346 /* XXX: is it correct ? */
347 if (dpl != rpl)
348 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
349 if ((e2 & DESC_C_MASK) && dpl > rpl)
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 } else if (seg_reg == R_SS) {
352 /* SS must be writable data */
353 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 if (dpl != cpl || dpl != rpl)
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 } else {
358 /* not readable code */
359 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 /* if data or non conforming code, checks the rights */
362 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
363 if (dpl < cpl || dpl < rpl)
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 }
366 }
367 if (!(e2 & DESC_P_MASK))
368 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
369 cpu_x86_load_seg_cache(env, seg_reg, selector,
370 get_seg_base(e1, e2),
371 get_seg_limit(e1, e2),
372 e2);
373 } else {
374 if (seg_reg == R_SS || seg_reg == R_CS)
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376 }
377}
378
379#define SWITCH_TSS_JMP 0
380#define SWITCH_TSS_IRET 1
381#define SWITCH_TSS_CALL 2
382
383/* XXX: restore CPU state in registers (PowerPC case) */
384static void switch_tss(int tss_selector,
385 uint32_t e1, uint32_t e2, int source,
386 uint32_t next_eip)
387{
388 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
389 target_ulong tss_base;
390 uint32_t new_regs[8], new_segs[6];
391 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
392 uint32_t old_eflags, eflags_mask;
393 SegmentCache *dt;
394#ifndef VBOX
395 int index;
396#else
397 unsigned int index;
398#endif
399 target_ulong ptr;
400
401 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
402#ifdef DEBUG_PCALL
403 if (loglevel & CPU_LOG_PCALL)
404 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
405#endif
406
407#if defined(VBOX) && defined(DEBUG)
408 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
409#endif
410
411 /* if task gate, we read the TSS segment and we load it */
412 if (type == 5) {
413 if (!(e2 & DESC_P_MASK))
414 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
415 tss_selector = e1 >> 16;
416 if (tss_selector & 4)
417 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
418 if (load_segment(&e1, &e2, tss_selector) != 0)
419 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
420 if (e2 & DESC_S_MASK)
421 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
422 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
423 if ((type & 7) != 1)
424 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
425 }
426
427 if (!(e2 & DESC_P_MASK))
428 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
429
430 if (type & 8)
431 tss_limit_max = 103;
432 else
433 tss_limit_max = 43;
434 tss_limit = get_seg_limit(e1, e2);
435 tss_base = get_seg_base(e1, e2);
436 if ((tss_selector & 4) != 0 ||
437 tss_limit < tss_limit_max)
438 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
439 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
440 if (old_type & 8)
441 old_tss_limit_max = 103;
442 else
443 old_tss_limit_max = 43;
444
445 /* read all the registers from the new TSS */
446 if (type & 8) {
447 /* 32 bit */
448 new_cr3 = ldl_kernel(tss_base + 0x1c);
449 new_eip = ldl_kernel(tss_base + 0x20);
450 new_eflags = ldl_kernel(tss_base + 0x24);
451 for(i = 0; i < 8; i++)
452 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
453 for(i = 0; i < 6; i++)
454 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
455 new_ldt = lduw_kernel(tss_base + 0x60);
456 new_trap = ldl_kernel(tss_base + 0x64);
457 } else {
458 /* 16 bit */
459 new_cr3 = 0;
460 new_eip = lduw_kernel(tss_base + 0x0e);
461 new_eflags = lduw_kernel(tss_base + 0x10);
462 for(i = 0; i < 8; i++)
463 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
464 for(i = 0; i < 4; i++)
465 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
466 new_ldt = lduw_kernel(tss_base + 0x2a);
467 new_segs[R_FS] = 0;
468 new_segs[R_GS] = 0;
469 new_trap = 0;
470 }
471
472 /* NOTE: we must avoid memory exceptions during the task switch,
473 so we make dummy accesses before */
474 /* XXX: it can still fail in some cases, so a bigger hack is
475 necessary to valid the TLB after having done the accesses */
476
477 v1 = ldub_kernel(env->tr.base);
478 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
479 stb_kernel(env->tr.base, v1);
480 stb_kernel(env->tr.base + old_tss_limit_max, v2);
481
482 /* clear busy bit (it is restartable) */
483 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
484 target_ulong ptr;
485 uint32_t e2;
486 ptr = env->gdt.base + (env->tr.selector & ~7);
487 e2 = ldl_kernel(ptr + 4);
488 e2 &= ~DESC_TSS_BUSY_MASK;
489 stl_kernel(ptr + 4, e2);
490 }
491 old_eflags = compute_eflags();
492 if (source == SWITCH_TSS_IRET)
493 old_eflags &= ~NT_MASK;
494
495 /* save the current state in the old TSS */
496 if (type & 8) {
497 /* 32 bit */
498 stl_kernel(env->tr.base + 0x20, next_eip);
499 stl_kernel(env->tr.base + 0x24, old_eflags);
500 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
501 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
502 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
503 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
504 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
505 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
506 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
507 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
508 for(i = 0; i < 6; i++)
509 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
510#if defined(VBOX) && defined(DEBUG)
511 printf("TSS 32 bits switch\n");
512 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
513#endif
514 } else {
515 /* 16 bit */
516 stw_kernel(env->tr.base + 0x0e, next_eip);
517 stw_kernel(env->tr.base + 0x10, old_eflags);
518 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
519 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
520 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
521 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
522 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
523 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
524 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
525 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
526 for(i = 0; i < 4; i++)
527 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
528 }
529
530 /* now if an exception occurs, it will occurs in the next task
531 context */
532
533 if (source == SWITCH_TSS_CALL) {
534 stw_kernel(tss_base, env->tr.selector);
535 new_eflags |= NT_MASK;
536 }
537
538 /* set busy bit */
539 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
540 target_ulong ptr;
541 uint32_t e2;
542 ptr = env->gdt.base + (tss_selector & ~7);
543 e2 = ldl_kernel(ptr + 4);
544 e2 |= DESC_TSS_BUSY_MASK;
545 stl_kernel(ptr + 4, e2);
546 }
547
548 /* set the new CPU state */
549 /* from this point, any exception which occurs can give problems */
550 env->cr[0] |= CR0_TS_MASK;
551 env->hflags |= HF_TS_MASK;
552 env->tr.selector = tss_selector;
553 env->tr.base = tss_base;
554 env->tr.limit = tss_limit;
555 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
556
557 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
558 cpu_x86_update_cr3(env, new_cr3);
559 }
560
561 /* load all registers without an exception, then reload them with
562 possible exception */
563 env->eip = new_eip;
564 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
565 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
566 if (!(type & 8))
567 eflags_mask &= 0xffff;
568 load_eflags(new_eflags, eflags_mask);
569 /* XXX: what to do in 16 bit case ? */
570 EAX = new_regs[0];
571 ECX = new_regs[1];
572 EDX = new_regs[2];
573 EBX = new_regs[3];
574 ESP = new_regs[4];
575 EBP = new_regs[5];
576 ESI = new_regs[6];
577 EDI = new_regs[7];
578 if (new_eflags & VM_MASK) {
579 for(i = 0; i < 6; i++)
580 load_seg_vm(i, new_segs[i]);
581 /* in vm86, CPL is always 3 */
582 cpu_x86_set_cpl(env, 3);
583 } else {
584 /* CPL is set the RPL of CS */
585 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
586 /* first just selectors as the rest may trigger exceptions */
587 for(i = 0; i < 6; i++)
588 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
589 }
590
591 env->ldt.selector = new_ldt & ~4;
592 env->ldt.base = 0;
593 env->ldt.limit = 0;
594 env->ldt.flags = 0;
595
596 /* load the LDT */
597 if (new_ldt & 4)
598 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
599
600 if ((new_ldt & 0xfffc) != 0) {
601 dt = &env->gdt;
602 index = new_ldt & ~7;
603 if ((index + 7) > dt->limit)
604 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
605 ptr = dt->base + index;
606 e1 = ldl_kernel(ptr);
607 e2 = ldl_kernel(ptr + 4);
608 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
609 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
610 if (!(e2 & DESC_P_MASK))
611 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
612 load_seg_cache_raw_dt(&env->ldt, e1, e2);
613 }
614
615 /* load the segments */
616 if (!(new_eflags & VM_MASK)) {
617 tss_load_seg(R_CS, new_segs[R_CS]);
618 tss_load_seg(R_SS, new_segs[R_SS]);
619 tss_load_seg(R_ES, new_segs[R_ES]);
620 tss_load_seg(R_DS, new_segs[R_DS]);
621 tss_load_seg(R_FS, new_segs[R_FS]);
622 tss_load_seg(R_GS, new_segs[R_GS]);
623 }
624
625 /* check that EIP is in the CS segment limits */
626 if (new_eip > env->segs[R_CS].limit) {
627 /* XXX: different exception if CALL ? */
628 raise_exception_err(EXCP0D_GPF, 0);
629 }
630}
631
632/* check if Port I/O is allowed in TSS */
633#ifndef VBOX
634static inline void check_io(int addr, int size)
635{
636 int io_offset, val, mask;
637
638#else /* VBOX */
639DECLINLINE(void) check_io(int addr, int size)
640{
641 int val, mask;
642 unsigned int io_offset;
643#endif /* VBOX */
644 /* TSS must be a valid 32 bit one */
645 if (!(env->tr.flags & DESC_P_MASK) ||
646 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
647 env->tr.limit < 103)
648 goto fail;
649 io_offset = lduw_kernel(env->tr.base + 0x66);
650 io_offset += (addr >> 3);
651 /* Note: the check needs two bytes */
652 if ((io_offset + 1) > env->tr.limit)
653 goto fail;
654 val = lduw_kernel(env->tr.base + io_offset);
655 val >>= (addr & 7);
656 mask = (1 << size) - 1;
657 /* all bits must be zero to allow the I/O */
658 if ((val & mask) != 0) {
659 fail:
660 raise_exception_err(EXCP0D_GPF, 0);
661 }
662}
663
664#ifdef VBOX
665/* Keep in sync with gen_check_external_event() */
666void helper_check_external_event()
667{
668 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
669 | CPU_INTERRUPT_EXTERNAL_TIMER
670 | CPU_INTERRUPT_EXTERNAL_DMA))
671 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
672 && (env->eflags & IF_MASK)
673 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
674 {
675 helper_external_event();
676 }
677
678}
679
680void helper_sync_seg(uint32_t reg)
681{
682 assert(env->segs[reg].newselector != 0);
683 sync_seg(env, reg, env->segs[reg].newselector);
684}
685#endif
686
687void helper_check_iob(uint32_t t0)
688{
689 check_io(t0, 1);
690}
691
692void helper_check_iow(uint32_t t0)
693{
694 check_io(t0, 2);
695}
696
697void helper_check_iol(uint32_t t0)
698{
699 check_io(t0, 4);
700}
701
702void helper_outb(uint32_t port, uint32_t data)
703{
704 cpu_outb(env, port, data & 0xff);
705}
706
707target_ulong helper_inb(uint32_t port)
708{
709 return cpu_inb(env, port);
710}
711
712void helper_outw(uint32_t port, uint32_t data)
713{
714 cpu_outw(env, port, data & 0xffff);
715}
716
717target_ulong helper_inw(uint32_t port)
718{
719 return cpu_inw(env, port);
720}
721
722void helper_outl(uint32_t port, uint32_t data)
723{
724 cpu_outl(env, port, data);
725}
726
727target_ulong helper_inl(uint32_t port)
728{
729 return cpu_inl(env, port);
730}
731
732#ifndef VBOX
733static inline unsigned int get_sp_mask(unsigned int e2)
734#else /* VBOX */
735DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
736#endif /* VBOX */
737{
738 if (e2 & DESC_B_MASK)
739 return 0xffffffff;
740 else
741 return 0xffff;
742}
743
744#ifdef TARGET_X86_64
745#define SET_ESP(val, sp_mask)\
746do {\
747 if ((sp_mask) == 0xffff)\
748 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
749 else if ((sp_mask) == 0xffffffffLL)\
750 ESP = (uint32_t)(val);\
751 else\
752 ESP = (val);\
753} while (0)
754#else
755#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
756#endif
757
758/* in 64-bit machines, this can overflow. So this segment addition macro
759 * can be used to trim the value to 32-bit whenever needed */
760#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
761
762/* XXX: add a is_user flag to have proper security support */
763#define PUSHW(ssp, sp, sp_mask, val)\
764{\
765 sp -= 2;\
766 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
767}
768
769#define PUSHL(ssp, sp, sp_mask, val)\
770{\
771 sp -= 4;\
772 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
773}
774
775#define POPW(ssp, sp, sp_mask, val)\
776{\
777 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
778 sp += 2;\
779}
780
781#define POPL(ssp, sp, sp_mask, val)\
782{\
783 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
784 sp += 4;\
785}
786
787/* protected mode interrupt */
788static void do_interrupt_protected(int intno, int is_int, int error_code,
789 unsigned int next_eip, int is_hw)
790{
791 SegmentCache *dt;
792 target_ulong ptr, ssp;
793 int type, dpl, selector, ss_dpl, cpl;
794 int has_error_code, new_stack, shift;
795 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
796 uint32_t old_eip, sp_mask;
797
798#ifdef VBOX
799 ss = ss_e1 = ss_e2 = 0;
800# ifdef VBOX_WITH_VMI
801 if ( intno == 6
802 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
803 {
804 env->exception_index = EXCP_PARAV_CALL;
805 cpu_loop_exit();
806 }
807# endif
808 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
809 cpu_loop_exit();
810#endif
811
812 has_error_code = 0;
813 if (!is_int && !is_hw) {
814 switch(intno) {
815 case 8:
816 case 10:
817 case 11:
818 case 12:
819 case 13:
820 case 14:
821 case 17:
822 has_error_code = 1;
823 break;
824 }
825 }
826 if (is_int)
827 old_eip = next_eip;
828 else
829 old_eip = env->eip;
830
831 dt = &env->idt;
832#ifndef VBOX
833 if (intno * 8 + 7 > dt->limit)
834#else
835 if ((unsigned)intno * 8 + 7 > dt->limit)
836#endif
837 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
838 ptr = dt->base + intno * 8;
839 e1 = ldl_kernel(ptr);
840 e2 = ldl_kernel(ptr + 4);
841 /* check gate type */
842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
843 switch(type) {
844 case 5: /* task gate */
845 /* must do that check here to return the correct error code */
846 if (!(e2 & DESC_P_MASK))
847 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
848 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
849 if (has_error_code) {
850 int type;
851 uint32_t mask;
852 /* push the error code */
853 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
854 shift = type >> 3;
855 if (env->segs[R_SS].flags & DESC_B_MASK)
856 mask = 0xffffffff;
857 else
858 mask = 0xffff;
859 esp = (ESP - (2 << shift)) & mask;
860 ssp = env->segs[R_SS].base + esp;
861 if (shift)
862 stl_kernel(ssp, error_code);
863 else
864 stw_kernel(ssp, error_code);
865 SET_ESP(esp, mask);
866 }
867 return;
868 case 6: /* 286 interrupt gate */
869 case 7: /* 286 trap gate */
870 case 14: /* 386 interrupt gate */
871 case 15: /* 386 trap gate */
872 break;
873 default:
874 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
875 break;
876 }
877 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
878 cpl = env->hflags & HF_CPL_MASK;
879 /* check privilege if software int */
880 if (is_int && dpl < cpl)
881 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
882 /* check valid bit */
883 if (!(e2 & DESC_P_MASK))
884 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
885 selector = e1 >> 16;
886 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
887 if ((selector & 0xfffc) == 0)
888 raise_exception_err(EXCP0D_GPF, 0);
889
890 if (load_segment(&e1, &e2, selector) != 0)
891 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
892 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
893 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
894 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
895 if (dpl > cpl)
896 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
897 if (!(e2 & DESC_P_MASK))
898 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
899 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
900 /* to inner privilege */
901 get_ss_esp_from_tss(&ss, &esp, dpl);
902 if ((ss & 0xfffc) == 0)
903 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
904 if ((ss & 3) != dpl)
905 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
906 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
907 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
908 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
909 if (ss_dpl != dpl)
910 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
911 if (!(ss_e2 & DESC_S_MASK) ||
912 (ss_e2 & DESC_CS_MASK) ||
913 !(ss_e2 & DESC_W_MASK))
914 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
915 if (!(ss_e2 & DESC_P_MASK))
916#ifdef VBOX /* See page 3-477 of 253666.pdf */
917 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
918#else
919 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
920#endif
921 new_stack = 1;
922 sp_mask = get_sp_mask(ss_e2);
923 ssp = get_seg_base(ss_e1, ss_e2);
924#if defined(VBOX) && defined(DEBUG)
925 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
926#endif
927 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
928 /* to same privilege */
929 if (env->eflags & VM_MASK)
930 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
931 new_stack = 0;
932 sp_mask = get_sp_mask(env->segs[R_SS].flags);
933 ssp = env->segs[R_SS].base;
934 esp = ESP;
935 dpl = cpl;
936 } else {
937 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938 new_stack = 0; /* avoid warning */
939 sp_mask = 0; /* avoid warning */
940 ssp = 0; /* avoid warning */
941 esp = 0; /* avoid warning */
942 }
943
944 shift = type >> 3;
945
946#if 0
947 /* XXX: check that enough room is available */
948 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
949 if (env->eflags & VM_MASK)
950 push_size += 8;
951 push_size <<= shift;
952#endif
953 if (shift == 1) {
954 if (new_stack) {
955 if (env->eflags & VM_MASK) {
956 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
957 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
958 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
959 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
960 }
961 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
962 PUSHL(ssp, esp, sp_mask, ESP);
963 }
964 PUSHL(ssp, esp, sp_mask, compute_eflags());
965 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
966 PUSHL(ssp, esp, sp_mask, old_eip);
967 if (has_error_code) {
968 PUSHL(ssp, esp, sp_mask, error_code);
969 }
970 } else {
971 if (new_stack) {
972 if (env->eflags & VM_MASK) {
973 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
974 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
975 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
976 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
977 }
978 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
979 PUSHW(ssp, esp, sp_mask, ESP);
980 }
981 PUSHW(ssp, esp, sp_mask, compute_eflags());
982 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
983 PUSHW(ssp, esp, sp_mask, old_eip);
984 if (has_error_code) {
985 PUSHW(ssp, esp, sp_mask, error_code);
986 }
987 }
988
989 if (new_stack) {
990 if (env->eflags & VM_MASK) {
991 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
992 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
993 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
994 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
995 }
996 ss = (ss & ~3) | dpl;
997 cpu_x86_load_seg_cache(env, R_SS, ss,
998 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
999 }
1000 SET_ESP(esp, sp_mask);
1001
1002 selector = (selector & ~3) | dpl;
1003 cpu_x86_load_seg_cache(env, R_CS, selector,
1004 get_seg_base(e1, e2),
1005 get_seg_limit(e1, e2),
1006 e2);
1007 cpu_x86_set_cpl(env, dpl);
1008 env->eip = offset;
1009
1010 /* interrupt gate clear IF mask */
1011 if ((type & 1) == 0) {
1012 env->eflags &= ~IF_MASK;
1013 }
1014 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1015}
1016#ifdef VBOX
1017
1018/* check if VME interrupt redirection is enabled in TSS */
1019DECLINLINE(bool) is_vme_irq_redirected(int intno)
1020{
1021 unsigned int io_offset, intredir_offset;
1022 unsigned char val, mask;
1023
1024 /* TSS must be a valid 32 bit one */
1025 if (!(env->tr.flags & DESC_P_MASK) ||
1026 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1027 env->tr.limit < 103)
1028 goto fail;
1029 io_offset = lduw_kernel(env->tr.base + 0x66);
1030 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1031 intredir_offset = io_offset - 0x20;
1032
1033 intredir_offset += (intno >> 3);
1034 if ((intredir_offset) > env->tr.limit)
1035 goto fail;
1036
1037 val = ldub_kernel(env->tr.base + intredir_offset);
1038 mask = 1 << (unsigned char)(intno & 7);
1039
1040 /* bit set means no redirection. */
1041 if ((val & mask) != 0) {
1042 return false;
1043 }
1044 return true;
1045
1046fail:
1047 raise_exception_err(EXCP0D_GPF, 0);
1048 return true;
1049}
1050
1051/* V86 mode software interrupt with CR4.VME=1 */
1052static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1053{
1054 target_ulong ptr, ssp;
1055 int selector;
1056 uint32_t offset, esp;
1057 uint32_t old_cs, old_eflags;
1058 uint32_t iopl;
1059
1060 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1061
1062 if (!is_vme_irq_redirected(intno))
1063 {
1064 if (iopl == 3)
1065 {
1066 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1067 return;
1068 }
1069 else
1070 raise_exception_err(EXCP0D_GPF, 0);
1071 }
1072
1073 /* virtual mode idt is at linear address 0 */
1074 ptr = 0 + intno * 4;
1075 offset = lduw_kernel(ptr);
1076 selector = lduw_kernel(ptr + 2);
1077 esp = ESP;
1078 ssp = env->segs[R_SS].base;
1079 old_cs = env->segs[R_CS].selector;
1080
1081 old_eflags = compute_eflags();
1082 if (iopl < 3)
1083 {
1084 /* copy VIF into IF and set IOPL to 3 */
1085 if (env->eflags & VIF_MASK)
1086 old_eflags |= IF_MASK;
1087 else
1088 old_eflags &= ~IF_MASK;
1089
1090 old_eflags |= (3 << IOPL_SHIFT);
1091 }
1092
1093 /* XXX: use SS segment size ? */
1094 PUSHW(ssp, esp, 0xffff, old_eflags);
1095 PUSHW(ssp, esp, 0xffff, old_cs);
1096 PUSHW(ssp, esp, 0xffff, next_eip);
1097
1098 /* update processor state */
1099 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1100 env->eip = offset;
1101 env->segs[R_CS].selector = selector;
1102 env->segs[R_CS].base = (selector << 4);
1103 env->eflags &= ~(TF_MASK | RF_MASK);
1104
1105 if (iopl < 3)
1106 env->eflags &= ~VIF_MASK;
1107 else
1108 env->eflags &= ~IF_MASK;
1109}
1110#endif /* VBOX */
1111
1112#ifdef TARGET_X86_64
1113
1114#define PUSHQ(sp, val)\
1115{\
1116 sp -= 8;\
1117 stq_kernel(sp, (val));\
1118}
1119
1120#define POPQ(sp, val)\
1121{\
1122 val = ldq_kernel(sp);\
1123 sp += 8;\
1124}
1125
1126#ifndef VBOX
1127static inline target_ulong get_rsp_from_tss(int level)
1128#else /* VBOX */
1129DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1130#endif /* VBOX */
1131{
1132 int index;
1133
1134#if 0
1135 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1136 env->tr.base, env->tr.limit);
1137#endif
1138
1139 if (!(env->tr.flags & DESC_P_MASK))
1140 cpu_abort(env, "invalid tss");
1141 index = 8 * level + 4;
1142 if ((index + 7) > env->tr.limit)
1143 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1144 return ldq_kernel(env->tr.base + index);
1145}
1146
1147/* 64 bit interrupt */
1148static void do_interrupt64(int intno, int is_int, int error_code,
1149 target_ulong next_eip, int is_hw)
1150{
1151 SegmentCache *dt;
1152 target_ulong ptr;
1153 int type, dpl, selector, cpl, ist;
1154 int has_error_code, new_stack;
1155 uint32_t e1, e2, e3, ss;
1156 target_ulong old_eip, esp, offset;
1157
1158#ifdef VBOX
1159 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1160 cpu_loop_exit();
1161#endif
1162
1163 has_error_code = 0;
1164 if (!is_int && !is_hw) {
1165 switch(intno) {
1166 case 8:
1167 case 10:
1168 case 11:
1169 case 12:
1170 case 13:
1171 case 14:
1172 case 17:
1173 has_error_code = 1;
1174 break;
1175 }
1176 }
1177 if (is_int)
1178 old_eip = next_eip;
1179 else
1180 old_eip = env->eip;
1181
1182 dt = &env->idt;
1183 if (intno * 16 + 15 > dt->limit)
1184 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1185 ptr = dt->base + intno * 16;
1186 e1 = ldl_kernel(ptr);
1187 e2 = ldl_kernel(ptr + 4);
1188 e3 = ldl_kernel(ptr + 8);
1189 /* check gate type */
1190 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1191 switch(type) {
1192 case 14: /* 386 interrupt gate */
1193 case 15: /* 386 trap gate */
1194 break;
1195 default:
1196 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1197 break;
1198 }
1199 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1200 cpl = env->hflags & HF_CPL_MASK;
1201 /* check privilege if software int */
1202 if (is_int && dpl < cpl)
1203 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1204 /* check valid bit */
1205 if (!(e2 & DESC_P_MASK))
1206 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1207 selector = e1 >> 16;
1208 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1209 ist = e2 & 7;
1210 if ((selector & 0xfffc) == 0)
1211 raise_exception_err(EXCP0D_GPF, 0);
1212
1213 if (load_segment(&e1, &e2, selector) != 0)
1214 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1215 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1216 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1217 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1218 if (dpl > cpl)
1219 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1220 if (!(e2 & DESC_P_MASK))
1221 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1222 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1223 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1224 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1225 /* to inner privilege */
1226 if (ist != 0)
1227 esp = get_rsp_from_tss(ist + 3);
1228 else
1229 esp = get_rsp_from_tss(dpl);
1230 esp &= ~0xfLL; /* align stack */
1231 ss = 0;
1232 new_stack = 1;
1233 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1234 /* to same privilege */
1235 if (env->eflags & VM_MASK)
1236 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1237 new_stack = 0;
1238 if (ist != 0)
1239 esp = get_rsp_from_tss(ist + 3);
1240 else
1241 esp = ESP;
1242 esp &= ~0xfLL; /* align stack */
1243 dpl = cpl;
1244 } else {
1245 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1246 new_stack = 0; /* avoid warning */
1247 esp = 0; /* avoid warning */
1248 }
1249
1250 PUSHQ(esp, env->segs[R_SS].selector);
1251 PUSHQ(esp, ESP);
1252 PUSHQ(esp, compute_eflags());
1253 PUSHQ(esp, env->segs[R_CS].selector);
1254 PUSHQ(esp, old_eip);
1255 if (has_error_code) {
1256 PUSHQ(esp, error_code);
1257 }
1258
1259 if (new_stack) {
1260 ss = 0 | dpl;
1261 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1262 }
1263 ESP = esp;
1264
1265 selector = (selector & ~3) | dpl;
1266 cpu_x86_load_seg_cache(env, R_CS, selector,
1267 get_seg_base(e1, e2),
1268 get_seg_limit(e1, e2),
1269 e2);
1270 cpu_x86_set_cpl(env, dpl);
1271 env->eip = offset;
1272
1273 /* interrupt gate clear IF mask */
1274 if ((type & 1) == 0) {
1275 env->eflags &= ~IF_MASK;
1276 }
1277 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1278}
1279#endif
1280
1281#if defined(CONFIG_USER_ONLY)
1282void helper_syscall(int next_eip_addend)
1283{
1284 env->exception_index = EXCP_SYSCALL;
1285 env->exception_next_eip = env->eip + next_eip_addend;
1286 cpu_loop_exit();
1287}
1288#else
1289void helper_syscall(int next_eip_addend)
1290{
1291 int selector;
1292
1293 if (!(env->efer & MSR_EFER_SCE)) {
1294 raise_exception_err(EXCP06_ILLOP, 0);
1295 }
1296 selector = (env->star >> 32) & 0xffff;
1297#ifdef TARGET_X86_64
1298 if (env->hflags & HF_LMA_MASK) {
1299 int code64;
1300
1301 ECX = env->eip + next_eip_addend;
1302 env->regs[11] = compute_eflags();
1303
1304 code64 = env->hflags & HF_CS64_MASK;
1305
1306 cpu_x86_set_cpl(env, 0);
1307 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1308 0, 0xffffffff,
1309 DESC_G_MASK | DESC_P_MASK |
1310 DESC_S_MASK |
1311 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1312 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1313 0, 0xffffffff,
1314 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1315 DESC_S_MASK |
1316 DESC_W_MASK | DESC_A_MASK);
1317 env->eflags &= ~env->fmask;
1318 load_eflags(env->eflags, 0);
1319 if (code64)
1320 env->eip = env->lstar;
1321 else
1322 env->eip = env->cstar;
1323 } else
1324#endif
1325 {
1326 ECX = (uint32_t)(env->eip + next_eip_addend);
1327
1328 cpu_x86_set_cpl(env, 0);
1329 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1330 0, 0xffffffff,
1331 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1332 DESC_S_MASK |
1333 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1334 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1335 0, 0xffffffff,
1336 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1337 DESC_S_MASK |
1338 DESC_W_MASK | DESC_A_MASK);
1339 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1340 env->eip = (uint32_t)env->star;
1341 }
1342}
1343#endif
1344
1345void helper_sysret(int dflag)
1346{
1347 int cpl, selector;
1348
1349 if (!(env->efer & MSR_EFER_SCE)) {
1350 raise_exception_err(EXCP06_ILLOP, 0);
1351 }
1352 cpl = env->hflags & HF_CPL_MASK;
1353 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1354 raise_exception_err(EXCP0D_GPF, 0);
1355 }
1356 selector = (env->star >> 48) & 0xffff;
1357#ifdef TARGET_X86_64
1358 if (env->hflags & HF_LMA_MASK) {
1359 if (dflag == 2) {
1360 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1361 0, 0xffffffff,
1362 DESC_G_MASK | DESC_P_MASK |
1363 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1364 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1365 DESC_L_MASK);
1366 env->eip = ECX;
1367 } else {
1368 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1369 0, 0xffffffff,
1370 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1371 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1372 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1373 env->eip = (uint32_t)ECX;
1374 }
1375 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1376 0, 0xffffffff,
1377 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1378 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1379 DESC_W_MASK | DESC_A_MASK);
1380 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1381 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1382 cpu_x86_set_cpl(env, 3);
1383 } else
1384#endif
1385 {
1386 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1387 0, 0xffffffff,
1388 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1389 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1390 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1391 env->eip = (uint32_t)ECX;
1392 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1393 0, 0xffffffff,
1394 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1395 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1396 DESC_W_MASK | DESC_A_MASK);
1397 env->eflags |= IF_MASK;
1398 cpu_x86_set_cpl(env, 3);
1399 }
1400#ifdef USE_KQEMU
1401 if (kqemu_is_ok(env)) {
1402 if (env->hflags & HF_LMA_MASK)
1403 CC_OP = CC_OP_EFLAGS;
1404 env->exception_index = -1;
1405 cpu_loop_exit();
1406 }
1407#endif
1408}
1409
1410#ifdef VBOX
1411/**
1412 * Checks and processes external VMM events.
1413 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1414 */
1415void helper_external_event(void)
1416{
1417#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1418 uintptr_t uESP;
1419 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1420 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1421#endif
1422 /* Keep in sync with flags checked by gen_check_external_event() */
1423 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1424 {
1425 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1426 ~CPU_INTERRUPT_EXTERNAL_HARD);
1427 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1428 }
1429 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1430 {
1431 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1432 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1433 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1434 }
1435 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1436 {
1437 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1438 ~CPU_INTERRUPT_EXTERNAL_DMA);
1439 remR3DmaRun(env);
1440 }
1441 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1442 {
1443 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1444 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1445 remR3TimersRun(env);
1446 }
1447}
1448/* helper for recording call instruction addresses for later scanning */
1449void helper_record_call()
1450{
1451 if ( !(env->state & CPU_RAW_RING0)
1452 && (env->cr[0] & CR0_PG_MASK)
1453 && !(env->eflags & X86_EFL_IF))
1454 remR3RecordCall(env);
1455}
1456#endif /* VBOX */
1457
1458/* real mode interrupt */
1459static void do_interrupt_real(int intno, int is_int, int error_code,
1460 unsigned int next_eip)
1461{
1462 SegmentCache *dt;
1463 target_ulong ptr, ssp;
1464 int selector;
1465 uint32_t offset, esp;
1466 uint32_t old_cs, old_eip;
1467
1468 /* real mode (simpler !) */
1469 dt = &env->idt;
1470#ifndef VBOX
1471 if (intno * 4 + 3 > dt->limit)
1472#else
1473 if ((unsigned)intno * 4 + 3 > dt->limit)
1474#endif
1475 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1476 ptr = dt->base + intno * 4;
1477 offset = lduw_kernel(ptr);
1478 selector = lduw_kernel(ptr + 2);
1479 esp = ESP;
1480 ssp = env->segs[R_SS].base;
1481 if (is_int)
1482 old_eip = next_eip;
1483 else
1484 old_eip = env->eip;
1485 old_cs = env->segs[R_CS].selector;
1486 /* XXX: use SS segment size ? */
1487 PUSHW(ssp, esp, 0xffff, compute_eflags());
1488 PUSHW(ssp, esp, 0xffff, old_cs);
1489 PUSHW(ssp, esp, 0xffff, old_eip);
1490
1491 /* update processor state */
1492 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1493 env->eip = offset;
1494 env->segs[R_CS].selector = selector;
1495 env->segs[R_CS].base = (selector << 4);
1496 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1497}
1498
1499/* fake user mode interrupt */
1500void do_interrupt_user(int intno, int is_int, int error_code,
1501 target_ulong next_eip)
1502{
1503 SegmentCache *dt;
1504 target_ulong ptr;
1505 int dpl, cpl, shift;
1506 uint32_t e2;
1507
1508 dt = &env->idt;
1509 if (env->hflags & HF_LMA_MASK) {
1510 shift = 4;
1511 } else {
1512 shift = 3;
1513 }
1514 ptr = dt->base + (intno << shift);
1515 e2 = ldl_kernel(ptr + 4);
1516
1517 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1518 cpl = env->hflags & HF_CPL_MASK;
1519 /* check privilege if software int */
1520 if (is_int && dpl < cpl)
1521 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1522
1523 /* Since we emulate only user space, we cannot do more than
1524 exiting the emulation with the suitable exception and error
1525 code */
1526 if (is_int)
1527 EIP = next_eip;
1528}
1529
1530/*
1531 * Begin execution of an interruption. is_int is TRUE if coming from
1532 * the int instruction. next_eip is the EIP value AFTER the interrupt
1533 * instruction. It is only relevant if is_int is TRUE.
1534 */
1535void do_interrupt(int intno, int is_int, int error_code,
1536 target_ulong next_eip, int is_hw)
1537{
1538 if (loglevel & CPU_LOG_INT) {
1539 if ((env->cr[0] & CR0_PE_MASK)) {
1540 static int count;
1541 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1542 count, intno, error_code, is_int,
1543 env->hflags & HF_CPL_MASK,
1544 env->segs[R_CS].selector, EIP,
1545 (int)env->segs[R_CS].base + EIP,
1546 env->segs[R_SS].selector, ESP);
1547 if (intno == 0x0e) {
1548 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1549 } else {
1550 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1551 }
1552 fprintf(logfile, "\n");
1553 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1554#if 0
1555 {
1556 int i;
1557 uint8_t *ptr;
1558 fprintf(logfile, " code=");
1559 ptr = env->segs[R_CS].base + env->eip;
1560 for(i = 0; i < 16; i++) {
1561 fprintf(logfile, " %02x", ldub(ptr + i));
1562 }
1563 fprintf(logfile, "\n");
1564 }
1565#endif
1566 count++;
1567 }
1568 }
1569 if (env->cr[0] & CR0_PE_MASK) {
1570#ifdef TARGET_X86_64
1571 if (env->hflags & HF_LMA_MASK) {
1572 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1573 } else
1574#endif
1575 {
1576#ifdef VBOX
1577 /* int xx *, v86 code and VME enabled? */
1578 if ( (env->eflags & VM_MASK)
1579 && (env->cr[4] & CR4_VME_MASK)
1580 && is_int
1581 && !is_hw
1582 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1583 )
1584 do_soft_interrupt_vme(intno, error_code, next_eip);
1585 else
1586#endif /* VBOX */
1587 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1588 }
1589 } else {
1590 do_interrupt_real(intno, is_int, error_code, next_eip);
1591 }
1592}
1593
1594/*
1595 * Check nested exceptions and change to double or triple fault if
1596 * needed. It should only be called, if this is not an interrupt.
1597 * Returns the new exception number.
1598 */
1599static int check_exception(int intno, int *error_code)
1600{
1601 int first_contributory = env->old_exception == 0 ||
1602 (env->old_exception >= 10 &&
1603 env->old_exception <= 13);
1604 int second_contributory = intno == 0 ||
1605 (intno >= 10 && intno <= 13);
1606
1607 if (loglevel & CPU_LOG_INT)
1608 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1609 env->old_exception, intno);
1610
1611 if (env->old_exception == EXCP08_DBLE)
1612 cpu_abort(env, "triple fault");
1613
1614 if ((first_contributory && second_contributory)
1615 || (env->old_exception == EXCP0E_PAGE &&
1616 (second_contributory || (intno == EXCP0E_PAGE)))) {
1617 intno = EXCP08_DBLE;
1618 *error_code = 0;
1619 }
1620
1621 if (second_contributory || (intno == EXCP0E_PAGE) ||
1622 (intno == EXCP08_DBLE))
1623 env->old_exception = intno;
1624
1625 return intno;
1626}
1627
1628/*
1629 * Signal an interruption. It is executed in the main CPU loop.
1630 * is_int is TRUE if coming from the int instruction. next_eip is the
1631 * EIP value AFTER the interrupt instruction. It is only relevant if
1632 * is_int is TRUE.
1633 */
1634void raise_interrupt(int intno, int is_int, int error_code,
1635 int next_eip_addend)
1636{
1637#if defined(VBOX) && defined(DEBUG)
1638 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1639#endif
1640 if (!is_int) {
1641 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1642 intno = check_exception(intno, &error_code);
1643 } else {
1644 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1645 }
1646
1647 env->exception_index = intno;
1648 env->error_code = error_code;
1649 env->exception_is_int = is_int;
1650 env->exception_next_eip = env->eip + next_eip_addend;
1651 cpu_loop_exit();
1652}
1653
1654/* shortcuts to generate exceptions */
1655
1656void (raise_exception_err)(int exception_index, int error_code)
1657{
1658 raise_interrupt(exception_index, 0, error_code, 0);
1659}
1660
1661void raise_exception(int exception_index)
1662{
1663 raise_interrupt(exception_index, 0, 0, 0);
1664}
1665
1666/* SMM support */
1667
1668#if defined(CONFIG_USER_ONLY)
1669
1670void do_smm_enter(void)
1671{
1672}
1673
1674void helper_rsm(void)
1675{
1676}
1677
1678#else
1679
1680#ifdef TARGET_X86_64
1681#define SMM_REVISION_ID 0x00020064
1682#else
1683#define SMM_REVISION_ID 0x00020000
1684#endif
1685
1686void do_smm_enter(void)
1687{
1688 target_ulong sm_state;
1689 SegmentCache *dt;
1690 int i, offset;
1691
1692 if (loglevel & CPU_LOG_INT) {
1693 fprintf(logfile, "SMM: enter\n");
1694 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1695 }
1696
1697 env->hflags |= HF_SMM_MASK;
1698 cpu_smm_update(env);
1699
1700 sm_state = env->smbase + 0x8000;
1701
1702#ifdef TARGET_X86_64
1703 for(i = 0; i < 6; i++) {
1704 dt = &env->segs[i];
1705 offset = 0x7e00 + i * 16;
1706 stw_phys(sm_state + offset, dt->selector);
1707 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1708 stl_phys(sm_state + offset + 4, dt->limit);
1709 stq_phys(sm_state + offset + 8, dt->base);
1710 }
1711
1712 stq_phys(sm_state + 0x7e68, env->gdt.base);
1713 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1714
1715 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1716 stq_phys(sm_state + 0x7e78, env->ldt.base);
1717 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1718 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1719
1720 stq_phys(sm_state + 0x7e88, env->idt.base);
1721 stl_phys(sm_state + 0x7e84, env->idt.limit);
1722
1723 stw_phys(sm_state + 0x7e90, env->tr.selector);
1724 stq_phys(sm_state + 0x7e98, env->tr.base);
1725 stl_phys(sm_state + 0x7e94, env->tr.limit);
1726 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1727
1728 stq_phys(sm_state + 0x7ed0, env->efer);
1729
1730 stq_phys(sm_state + 0x7ff8, EAX);
1731 stq_phys(sm_state + 0x7ff0, ECX);
1732 stq_phys(sm_state + 0x7fe8, EDX);
1733 stq_phys(sm_state + 0x7fe0, EBX);
1734 stq_phys(sm_state + 0x7fd8, ESP);
1735 stq_phys(sm_state + 0x7fd0, EBP);
1736 stq_phys(sm_state + 0x7fc8, ESI);
1737 stq_phys(sm_state + 0x7fc0, EDI);
1738 for(i = 8; i < 16; i++)
1739 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1740 stq_phys(sm_state + 0x7f78, env->eip);
1741 stl_phys(sm_state + 0x7f70, compute_eflags());
1742 stl_phys(sm_state + 0x7f68, env->dr[6]);
1743 stl_phys(sm_state + 0x7f60, env->dr[7]);
1744
1745 stl_phys(sm_state + 0x7f48, env->cr[4]);
1746 stl_phys(sm_state + 0x7f50, env->cr[3]);
1747 stl_phys(sm_state + 0x7f58, env->cr[0]);
1748
1749 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1750 stl_phys(sm_state + 0x7f00, env->smbase);
1751#else
1752 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1753 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1754 stl_phys(sm_state + 0x7ff4, compute_eflags());
1755 stl_phys(sm_state + 0x7ff0, env->eip);
1756 stl_phys(sm_state + 0x7fec, EDI);
1757 stl_phys(sm_state + 0x7fe8, ESI);
1758 stl_phys(sm_state + 0x7fe4, EBP);
1759 stl_phys(sm_state + 0x7fe0, ESP);
1760 stl_phys(sm_state + 0x7fdc, EBX);
1761 stl_phys(sm_state + 0x7fd8, EDX);
1762 stl_phys(sm_state + 0x7fd4, ECX);
1763 stl_phys(sm_state + 0x7fd0, EAX);
1764 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1765 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1766
1767 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1768 stl_phys(sm_state + 0x7f64, env->tr.base);
1769 stl_phys(sm_state + 0x7f60, env->tr.limit);
1770 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1771
1772 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1773 stl_phys(sm_state + 0x7f80, env->ldt.base);
1774 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1775 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1776
1777 stl_phys(sm_state + 0x7f74, env->gdt.base);
1778 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1779
1780 stl_phys(sm_state + 0x7f58, env->idt.base);
1781 stl_phys(sm_state + 0x7f54, env->idt.limit);
1782
1783 for(i = 0; i < 6; i++) {
1784 dt = &env->segs[i];
1785 if (i < 3)
1786 offset = 0x7f84 + i * 12;
1787 else
1788 offset = 0x7f2c + (i - 3) * 12;
1789 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1790 stl_phys(sm_state + offset + 8, dt->base);
1791 stl_phys(sm_state + offset + 4, dt->limit);
1792 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1793 }
1794 stl_phys(sm_state + 0x7f14, env->cr[4]);
1795
1796 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1797 stl_phys(sm_state + 0x7ef8, env->smbase);
1798#endif
1799 /* init SMM cpu state */
1800
1801#ifdef TARGET_X86_64
1802 cpu_load_efer(env, 0);
1803#endif
1804 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1805 env->eip = 0x00008000;
1806 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1807 0xffffffff, 0);
1808 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1809 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1810 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1811 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1812 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1813
1814 cpu_x86_update_cr0(env,
1815 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1816 cpu_x86_update_cr4(env, 0);
1817 env->dr[7] = 0x00000400;
1818 CC_OP = CC_OP_EFLAGS;
1819}
1820
1821void helper_rsm(void)
1822{
1823#ifdef VBOX
1824 cpu_abort(env, "helper_rsm");
1825#else /* !VBOX */
1826 target_ulong sm_
1827
1828 target_ulong sm_state;
1829 int i, offset;
1830 uint32_t val;
1831
1832 sm_state = env->smbase + 0x8000;
1833#ifdef TARGET_X86_64
1834 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1835
1836 for(i = 0; i < 6; i++) {
1837 offset = 0x7e00 + i * 16;
1838 cpu_x86_load_seg_cache(env, i,
1839 lduw_phys(sm_state + offset),
1840 ldq_phys(sm_state + offset + 8),
1841 ldl_phys(sm_state + offset + 4),
1842 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1843 }
1844
1845 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1846 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1847
1848 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1849 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1850 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1851 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1852
1853 env->idt.base = ldq_phys(sm_state + 0x7e88);
1854 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1855
1856 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1857 env->tr.base = ldq_phys(sm_state + 0x7e98);
1858 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1859 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1860
1861 EAX = ldq_phys(sm_state + 0x7ff8);
1862 ECX = ldq_phys(sm_state + 0x7ff0);
1863 EDX = ldq_phys(sm_state + 0x7fe8);
1864 EBX = ldq_phys(sm_state + 0x7fe0);
1865 ESP = ldq_phys(sm_state + 0x7fd8);
1866 EBP = ldq_phys(sm_state + 0x7fd0);
1867 ESI = ldq_phys(sm_state + 0x7fc8);
1868 EDI = ldq_phys(sm_state + 0x7fc0);
1869 for(i = 8; i < 16; i++)
1870 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1871 env->eip = ldq_phys(sm_state + 0x7f78);
1872 load_eflags(ldl_phys(sm_state + 0x7f70),
1873 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1874 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1875 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1876
1877 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1878 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1879 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1880
1881 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1882 if (val & 0x20000) {
1883 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1884 }
1885#else
1886 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1887 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1888 load_eflags(ldl_phys(sm_state + 0x7ff4),
1889 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1890 env->eip = ldl_phys(sm_state + 0x7ff0);
1891 EDI = ldl_phys(sm_state + 0x7fec);
1892 ESI = ldl_phys(sm_state + 0x7fe8);
1893 EBP = ldl_phys(sm_state + 0x7fe4);
1894 ESP = ldl_phys(sm_state + 0x7fe0);
1895 EBX = ldl_phys(sm_state + 0x7fdc);
1896 EDX = ldl_phys(sm_state + 0x7fd8);
1897 ECX = ldl_phys(sm_state + 0x7fd4);
1898 EAX = ldl_phys(sm_state + 0x7fd0);
1899 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1900 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1901
1902 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1903 env->tr.base = ldl_phys(sm_state + 0x7f64);
1904 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1905 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1906
1907 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1908 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1909 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1910 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1911
1912 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1913 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1914
1915 env->idt.base = ldl_phys(sm_state + 0x7f58);
1916 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1917
1918 for(i = 0; i < 6; i++) {
1919 if (i < 3)
1920 offset = 0x7f84 + i * 12;
1921 else
1922 offset = 0x7f2c + (i - 3) * 12;
1923 cpu_x86_load_seg_cache(env, i,
1924 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1925 ldl_phys(sm_state + offset + 8),
1926 ldl_phys(sm_state + offset + 4),
1927 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1928 }
1929 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1930
1931 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1932 if (val & 0x20000) {
1933 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1934 }
1935#endif
1936 CC_OP = CC_OP_EFLAGS;
1937 env->hflags &= ~HF_SMM_MASK;
1938 cpu_smm_update(env);
1939
1940 if (loglevel & CPU_LOG_INT) {
1941 fprintf(logfile, "SMM: after RSM\n");
1942 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1943 }
1944#endif /* !VBOX */
1945}
1946
1947#endif /* !CONFIG_USER_ONLY */
1948
1949
1950/* division, flags are undefined */
1951
1952void helper_divb_AL(target_ulong t0)
1953{
1954 unsigned int num, den, q, r;
1955
1956 num = (EAX & 0xffff);
1957 den = (t0 & 0xff);
1958 if (den == 0) {
1959 raise_exception(EXCP00_DIVZ);
1960 }
1961 q = (num / den);
1962 if (q > 0xff)
1963 raise_exception(EXCP00_DIVZ);
1964 q &= 0xff;
1965 r = (num % den) & 0xff;
1966 EAX = (EAX & ~0xffff) | (r << 8) | q;
1967}
1968
1969void helper_idivb_AL(target_ulong t0)
1970{
1971 int num, den, q, r;
1972
1973 num = (int16_t)EAX;
1974 den = (int8_t)t0;
1975 if (den == 0) {
1976 raise_exception(EXCP00_DIVZ);
1977 }
1978 q = (num / den);
1979 if (q != (int8_t)q)
1980 raise_exception(EXCP00_DIVZ);
1981 q &= 0xff;
1982 r = (num % den) & 0xff;
1983 EAX = (EAX & ~0xffff) | (r << 8) | q;
1984}
1985
1986void helper_divw_AX(target_ulong t0)
1987{
1988 unsigned int num, den, q, r;
1989
1990 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1991 den = (t0 & 0xffff);
1992 if (den == 0) {
1993 raise_exception(EXCP00_DIVZ);
1994 }
1995 q = (num / den);
1996 if (q > 0xffff)
1997 raise_exception(EXCP00_DIVZ);
1998 q &= 0xffff;
1999 r = (num % den) & 0xffff;
2000 EAX = (EAX & ~0xffff) | q;
2001 EDX = (EDX & ~0xffff) | r;
2002}
2003
2004void helper_idivw_AX(target_ulong t0)
2005{
2006 int num, den, q, r;
2007
2008 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2009 den = (int16_t)t0;
2010 if (den == 0) {
2011 raise_exception(EXCP00_DIVZ);
2012 }
2013 q = (num / den);
2014 if (q != (int16_t)q)
2015 raise_exception(EXCP00_DIVZ);
2016 q &= 0xffff;
2017 r = (num % den) & 0xffff;
2018 EAX = (EAX & ~0xffff) | q;
2019 EDX = (EDX & ~0xffff) | r;
2020}
2021
2022void helper_divl_EAX(target_ulong t0)
2023{
2024 unsigned int den, r;
2025 uint64_t num, q;
2026
2027 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2028 den = t0;
2029 if (den == 0) {
2030 raise_exception(EXCP00_DIVZ);
2031 }
2032 q = (num / den);
2033 r = (num % den);
2034 if (q > 0xffffffff)
2035 raise_exception(EXCP00_DIVZ);
2036 EAX = (uint32_t)q;
2037 EDX = (uint32_t)r;
2038}
2039
2040void helper_idivl_EAX(target_ulong t0)
2041{
2042 int den, r;
2043 int64_t num, q;
2044
2045 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2046 den = t0;
2047 if (den == 0) {
2048 raise_exception(EXCP00_DIVZ);
2049 }
2050 q = (num / den);
2051 r = (num % den);
2052 if (q != (int32_t)q)
2053 raise_exception(EXCP00_DIVZ);
2054 EAX = (uint32_t)q;
2055 EDX = (uint32_t)r;
2056}
2057
2058/* bcd */
2059
2060/* XXX: exception */
2061void helper_aam(int base)
2062{
2063 int al, ah;
2064 al = EAX & 0xff;
2065 ah = al / base;
2066 al = al % base;
2067 EAX = (EAX & ~0xffff) | al | (ah << 8);
2068 CC_DST = al;
2069}
2070
2071void helper_aad(int base)
2072{
2073 int al, ah;
2074 al = EAX & 0xff;
2075 ah = (EAX >> 8) & 0xff;
2076 al = ((ah * base) + al) & 0xff;
2077 EAX = (EAX & ~0xffff) | al;
2078 CC_DST = al;
2079}
2080
2081void helper_aaa(void)
2082{
2083 int icarry;
2084 int al, ah, af;
2085 int eflags;
2086
2087 eflags = cc_table[CC_OP].compute_all();
2088 af = eflags & CC_A;
2089 al = EAX & 0xff;
2090 ah = (EAX >> 8) & 0xff;
2091
2092 icarry = (al > 0xf9);
2093 if (((al & 0x0f) > 9 ) || af) {
2094 al = (al + 6) & 0x0f;
2095 ah = (ah + 1 + icarry) & 0xff;
2096 eflags |= CC_C | CC_A;
2097 } else {
2098 eflags &= ~(CC_C | CC_A);
2099 al &= 0x0f;
2100 }
2101 EAX = (EAX & ~0xffff) | al | (ah << 8);
2102 CC_SRC = eflags;
2103 FORCE_RET();
2104}
2105
2106void helper_aas(void)
2107{
2108 int icarry;
2109 int al, ah, af;
2110 int eflags;
2111
2112 eflags = cc_table[CC_OP].compute_all();
2113 af = eflags & CC_A;
2114 al = EAX & 0xff;
2115 ah = (EAX >> 8) & 0xff;
2116
2117 icarry = (al < 6);
2118 if (((al & 0x0f) > 9 ) || af) {
2119 al = (al - 6) & 0x0f;
2120 ah = (ah - 1 - icarry) & 0xff;
2121 eflags |= CC_C | CC_A;
2122 } else {
2123 eflags &= ~(CC_C | CC_A);
2124 al &= 0x0f;
2125 }
2126 EAX = (EAX & ~0xffff) | al | (ah << 8);
2127 CC_SRC = eflags;
2128 FORCE_RET();
2129}
2130
2131void helper_daa(void)
2132{
2133 int al, af, cf;
2134 int eflags;
2135
2136 eflags = cc_table[CC_OP].compute_all();
2137 cf = eflags & CC_C;
2138 af = eflags & CC_A;
2139 al = EAX & 0xff;
2140
2141 eflags = 0;
2142 if (((al & 0x0f) > 9 ) || af) {
2143 al = (al + 6) & 0xff;
2144 eflags |= CC_A;
2145 }
2146 if ((al > 0x9f) || cf) {
2147 al = (al + 0x60) & 0xff;
2148 eflags |= CC_C;
2149 }
2150 EAX = (EAX & ~0xff) | al;
2151 /* well, speed is not an issue here, so we compute the flags by hand */
2152 eflags |= (al == 0) << 6; /* zf */
2153 eflags |= parity_table[al]; /* pf */
2154 eflags |= (al & 0x80); /* sf */
2155 CC_SRC = eflags;
2156 FORCE_RET();
2157}
2158
2159void helper_das(void)
2160{
2161 int al, al1, af, cf;
2162 int eflags;
2163
2164 eflags = cc_table[CC_OP].compute_all();
2165 cf = eflags & CC_C;
2166 af = eflags & CC_A;
2167 al = EAX & 0xff;
2168
2169 eflags = 0;
2170 al1 = al;
2171 if (((al & 0x0f) > 9 ) || af) {
2172 eflags |= CC_A;
2173 if (al < 6 || cf)
2174 eflags |= CC_C;
2175 al = (al - 6) & 0xff;
2176 }
2177 if ((al1 > 0x99) || cf) {
2178 al = (al - 0x60) & 0xff;
2179 eflags |= CC_C;
2180 }
2181 EAX = (EAX & ~0xff) | al;
2182 /* well, speed is not an issue here, so we compute the flags by hand */
2183 eflags |= (al == 0) << 6; /* zf */
2184 eflags |= parity_table[al]; /* pf */
2185 eflags |= (al & 0x80); /* sf */
2186 CC_SRC = eflags;
2187 FORCE_RET();
2188}
2189
2190void helper_into(int next_eip_addend)
2191{
2192 int eflags;
2193 eflags = cc_table[CC_OP].compute_all();
2194 if (eflags & CC_O) {
2195 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2196 }
2197}
2198
2199void helper_cmpxchg8b(target_ulong a0)
2200{
2201 uint64_t d;
2202 int eflags;
2203
2204 eflags = cc_table[CC_OP].compute_all();
2205 d = ldq(a0);
2206 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2207 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2208 eflags |= CC_Z;
2209 } else {
2210 /* always do the store */
2211 stq(a0, d);
2212 EDX = (uint32_t)(d >> 32);
2213 EAX = (uint32_t)d;
2214 eflags &= ~CC_Z;
2215 }
2216 CC_SRC = eflags;
2217}
2218
2219#ifdef TARGET_X86_64
2220void helper_cmpxchg16b(target_ulong a0)
2221{
2222 uint64_t d0, d1;
2223 int eflags;
2224
2225 if ((a0 & 0xf) != 0)
2226 raise_exception(EXCP0D_GPF);
2227 eflags = cc_table[CC_OP].compute_all();
2228 d0 = ldq(a0);
2229 d1 = ldq(a0 + 8);
2230 if (d0 == EAX && d1 == EDX) {
2231 stq(a0, EBX);
2232 stq(a0 + 8, ECX);
2233 eflags |= CC_Z;
2234 } else {
2235 /* always do the store */
2236 stq(a0, d0);
2237 stq(a0 + 8, d1);
2238 EDX = d1;
2239 EAX = d0;
2240 eflags &= ~CC_Z;
2241 }
2242 CC_SRC = eflags;
2243}
2244#endif
2245
2246void helper_single_step(void)
2247{
2248 env->dr[6] |= 0x4000;
2249 raise_exception(EXCP01_SSTP);
2250}
2251
2252void helper_cpuid(void)
2253{
2254#ifndef VBOX
2255 uint32_t index;
2256
2257 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2258
2259 index = (uint32_t)EAX;
2260 /* test if maximum index reached */
2261 if (index & 0x80000000) {
2262 if (index > env->cpuid_xlevel)
2263 index = env->cpuid_level;
2264 } else {
2265 if (index > env->cpuid_level)
2266 index = env->cpuid_level;
2267 }
2268
2269 switch(index) {
2270 case 0:
2271 EAX = env->cpuid_level;
2272 EBX = env->cpuid_vendor1;
2273 EDX = env->cpuid_vendor2;
2274 ECX = env->cpuid_vendor3;
2275 break;
2276 case 1:
2277 EAX = env->cpuid_version;
2278 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2279 ECX = env->cpuid_ext_features;
2280 EDX = env->cpuid_features;
2281 break;
2282 case 2:
2283 /* cache info: needed for Pentium Pro compatibility */
2284 EAX = 1;
2285 EBX = 0;
2286 ECX = 0;
2287 EDX = 0x2c307d;
2288 break;
2289 case 4:
2290 /* cache info: needed for Core compatibility */
2291 switch (ECX) {
2292 case 0: /* L1 dcache info */
2293 EAX = 0x0000121;
2294 EBX = 0x1c0003f;
2295 ECX = 0x000003f;
2296 EDX = 0x0000001;
2297 break;
2298 case 1: /* L1 icache info */
2299 EAX = 0x0000122;
2300 EBX = 0x1c0003f;
2301 ECX = 0x000003f;
2302 EDX = 0x0000001;
2303 break;
2304 case 2: /* L2 cache info */
2305 EAX = 0x0000143;
2306 EBX = 0x3c0003f;
2307 ECX = 0x0000fff;
2308 EDX = 0x0000001;
2309 break;
2310 default: /* end of info */
2311 EAX = 0;
2312 EBX = 0;
2313 ECX = 0;
2314 EDX = 0;
2315 break;
2316 }
2317
2318 break;
2319 case 5:
2320 /* mwait info: needed for Core compatibility */
2321 EAX = 0; /* Smallest monitor-line size in bytes */
2322 EBX = 0; /* Largest monitor-line size in bytes */
2323 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2324 EDX = 0;
2325 break;
2326 case 6:
2327 /* Thermal and Power Leaf */
2328 EAX = 0;
2329 EBX = 0;
2330 ECX = 0;
2331 EDX = 0;
2332 break;
2333 case 9:
2334 /* Direct Cache Access Information Leaf */
2335 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2336 EBX = 0;
2337 ECX = 0;
2338 EDX = 0;
2339 break;
2340 case 0xA:
2341 /* Architectural Performance Monitoring Leaf */
2342 EAX = 0;
2343 EBX = 0;
2344 ECX = 0;
2345 EDX = 0;
2346 break;
2347 case 0x80000000:
2348 EAX = env->cpuid_xlevel;
2349 EBX = env->cpuid_vendor1;
2350 EDX = env->cpuid_vendor2;
2351 ECX = env->cpuid_vendor3;
2352 break;
2353 case 0x80000001:
2354 EAX = env->cpuid_features;
2355 EBX = 0;
2356 ECX = env->cpuid_ext3_features;
2357 EDX = env->cpuid_ext2_features;
2358 break;
2359 case 0x80000002:
2360 case 0x80000003:
2361 case 0x80000004:
2362 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2363 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2364 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2365 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2366 break;
2367 case 0x80000005:
2368 /* cache info (L1 cache) */
2369 EAX = 0x01ff01ff;
2370 EBX = 0x01ff01ff;
2371 ECX = 0x40020140;
2372 EDX = 0x40020140;
2373 break;
2374 case 0x80000006:
2375 /* cache info (L2 cache) */
2376 EAX = 0;
2377 EBX = 0x42004200;
2378 ECX = 0x02008140;
2379 EDX = 0;
2380 break;
2381 case 0x80000008:
2382 /* virtual & phys address size in low 2 bytes. */
2383/* XXX: This value must match the one used in the MMU code. */
2384 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2385 /* 64 bit processor */
2386#if defined(USE_KQEMU)
2387 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2388#else
2389/* XXX: The physical address space is limited to 42 bits in exec.c. */
2390 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2391#endif
2392 } else {
2393#if defined(USE_KQEMU)
2394 EAX = 0x00000020; /* 32 bits physical */
2395#else
2396 if (env->cpuid_features & CPUID_PSE36)
2397 EAX = 0x00000024; /* 36 bits physical */
2398 else
2399 EAX = 0x00000020; /* 32 bits physical */
2400#endif
2401 }
2402 EBX = 0;
2403 ECX = 0;
2404 EDX = 0;
2405 break;
2406 case 0x8000000A:
2407 EAX = 0x00000001;
2408 EBX = 0;
2409 ECX = 0;
2410 EDX = 0;
2411 break;
2412 default:
2413 /* reserved values: zero */
2414 EAX = 0;
2415 EBX = 0;
2416 ECX = 0;
2417 EDX = 0;
2418 break;
2419 }
2420#else /* VBOX */
2421 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2422#endif /* VBOX */
2423}
2424
2425void helper_enter_level(int level, int data32, target_ulong t1)
2426{
2427 target_ulong ssp;
2428 uint32_t esp_mask, esp, ebp;
2429
2430 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2431 ssp = env->segs[R_SS].base;
2432 ebp = EBP;
2433 esp = ESP;
2434 if (data32) {
2435 /* 32 bit */
2436 esp -= 4;
2437 while (--level) {
2438 esp -= 4;
2439 ebp -= 4;
2440 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2441 }
2442 esp -= 4;
2443 stl(ssp + (esp & esp_mask), t1);
2444 } else {
2445 /* 16 bit */
2446 esp -= 2;
2447 while (--level) {
2448 esp -= 2;
2449 ebp -= 2;
2450 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2451 }
2452 esp -= 2;
2453 stw(ssp + (esp & esp_mask), t1);
2454 }
2455}
2456
2457#ifdef TARGET_X86_64
2458void helper_enter64_level(int level, int data64, target_ulong t1)
2459{
2460 target_ulong esp, ebp;
2461 ebp = EBP;
2462 esp = ESP;
2463
2464 if (data64) {
2465 /* 64 bit */
2466 esp -= 8;
2467 while (--level) {
2468 esp -= 8;
2469 ebp -= 8;
2470 stq(esp, ldq(ebp));
2471 }
2472 esp -= 8;
2473 stq(esp, t1);
2474 } else {
2475 /* 16 bit */
2476 esp -= 2;
2477 while (--level) {
2478 esp -= 2;
2479 ebp -= 2;
2480 stw(esp, lduw(ebp));
2481 }
2482 esp -= 2;
2483 stw(esp, t1);
2484 }
2485}
2486#endif
2487
2488void helper_lldt(int selector)
2489{
2490 SegmentCache *dt;
2491 uint32_t e1, e2;
2492#ifndef VBOX
2493 int index, entry_limit;
2494#else
2495 unsigned int index, entry_limit;
2496#endif
2497 target_ulong ptr;
2498
2499#ifdef VBOX
2500 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2501 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2502#endif
2503
2504 selector &= 0xffff;
2505 if ((selector & 0xfffc) == 0) {
2506 /* XXX: NULL selector case: invalid LDT */
2507 env->ldt.base = 0;
2508 env->ldt.limit = 0;
2509 } else {
2510 if (selector & 0x4)
2511 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2512 dt = &env->gdt;
2513 index = selector & ~7;
2514#ifdef TARGET_X86_64
2515 if (env->hflags & HF_LMA_MASK)
2516 entry_limit = 15;
2517 else
2518#endif
2519 entry_limit = 7;
2520 if ((index + entry_limit) > dt->limit)
2521 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2522 ptr = dt->base + index;
2523 e1 = ldl_kernel(ptr);
2524 e2 = ldl_kernel(ptr + 4);
2525 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2526 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2527 if (!(e2 & DESC_P_MASK))
2528 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2529#ifdef TARGET_X86_64
2530 if (env->hflags & HF_LMA_MASK) {
2531 uint32_t e3;
2532 e3 = ldl_kernel(ptr + 8);
2533 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2534 env->ldt.base |= (target_ulong)e3 << 32;
2535 } else
2536#endif
2537 {
2538 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2539 }
2540 }
2541 env->ldt.selector = selector;
2542#ifdef VBOX
2543 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2544 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2545#endif
2546}
2547
2548void helper_ltr(int selector)
2549{
2550 SegmentCache *dt;
2551 uint32_t e1, e2;
2552#ifndef VBOX
2553 int index, type, entry_limit;
2554#else
2555 unsigned int index;
2556 int type, entry_limit;
2557#endif
2558 target_ulong ptr;
2559
2560#ifdef VBOX
2561 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2562 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2563 env->tr.flags, (RTSEL)(selector & 0xffff)));
2564#endif
2565 selector &= 0xffff;
2566 if ((selector & 0xfffc) == 0) {
2567 /* NULL selector case: invalid TR */
2568 env->tr.base = 0;
2569 env->tr.limit = 0;
2570 env->tr.flags = 0;
2571 } else {
2572 if (selector & 0x4)
2573 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2574 dt = &env->gdt;
2575 index = selector & ~7;
2576#ifdef TARGET_X86_64
2577 if (env->hflags & HF_LMA_MASK)
2578 entry_limit = 15;
2579 else
2580#endif
2581 entry_limit = 7;
2582 if ((index + entry_limit) > dt->limit)
2583 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2584 ptr = dt->base + index;
2585 e1 = ldl_kernel(ptr);
2586 e2 = ldl_kernel(ptr + 4);
2587 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2588 if ((e2 & DESC_S_MASK) ||
2589 (type != 1 && type != 9))
2590 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2591 if (!(e2 & DESC_P_MASK))
2592 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2593#ifdef TARGET_X86_64
2594 if (env->hflags & HF_LMA_MASK) {
2595 uint32_t e3, e4;
2596 e3 = ldl_kernel(ptr + 8);
2597 e4 = ldl_kernel(ptr + 12);
2598 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2599 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2600 load_seg_cache_raw_dt(&env->tr, e1, e2);
2601 env->tr.base |= (target_ulong)e3 << 32;
2602 } else
2603#endif
2604 {
2605 load_seg_cache_raw_dt(&env->tr, e1, e2);
2606 }
2607 e2 |= DESC_TSS_BUSY_MASK;
2608 stl_kernel(ptr + 4, e2);
2609 }
2610 env->tr.selector = selector;
2611#ifdef VBOX
2612 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2613 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2614 env->tr.flags, (RTSEL)(selector & 0xffff)));
2615#endif
2616}
2617
2618/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2619void helper_load_seg(int seg_reg, int selector)
2620{
2621 uint32_t e1, e2;
2622 int cpl, dpl, rpl;
2623 SegmentCache *dt;
2624#ifndef VBOX
2625 int index;
2626#else
2627 unsigned int index;
2628#endif
2629 target_ulong ptr;
2630
2631 selector &= 0xffff;
2632 cpl = env->hflags & HF_CPL_MASK;
2633
2634#ifdef VBOX
2635 /* Trying to load a selector with CPL=1? */
2636 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2637 {
2638 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2639 selector = selector & 0xfffc;
2640 }
2641#endif
2642 if ((selector & 0xfffc) == 0) {
2643 /* null selector case */
2644 if (seg_reg == R_SS
2645#ifdef TARGET_X86_64
2646 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2647#endif
2648 )
2649 raise_exception_err(EXCP0D_GPF, 0);
2650 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2651 } else {
2652
2653 if (selector & 0x4)
2654 dt = &env->ldt;
2655 else
2656 dt = &env->gdt;
2657 index = selector & ~7;
2658 if ((index + 7) > dt->limit)
2659 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2660 ptr = dt->base + index;
2661 e1 = ldl_kernel(ptr);
2662 e2 = ldl_kernel(ptr + 4);
2663
2664 if (!(e2 & DESC_S_MASK))
2665 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2666 rpl = selector & 3;
2667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2668 if (seg_reg == R_SS) {
2669 /* must be writable segment */
2670 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2671 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2672 if (rpl != cpl || dpl != cpl)
2673 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2674 } else {
2675 /* must be readable segment */
2676 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2677 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2678
2679 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2680 /* if not conforming code, test rights */
2681 if (dpl < cpl || dpl < rpl)
2682 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2683 }
2684 }
2685
2686 if (!(e2 & DESC_P_MASK)) {
2687 if (seg_reg == R_SS)
2688 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2689 else
2690 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2691 }
2692
2693 /* set the access bit if not already set */
2694 if (!(e2 & DESC_A_MASK)) {
2695 e2 |= DESC_A_MASK;
2696 stl_kernel(ptr + 4, e2);
2697 }
2698
2699 cpu_x86_load_seg_cache(env, seg_reg, selector,
2700 get_seg_base(e1, e2),
2701 get_seg_limit(e1, e2),
2702 e2);
2703#if 0
2704 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2705 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2706#endif
2707 }
2708}
2709
2710/* protected mode jump */
2711void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2712 int next_eip_addend)
2713{
2714 int gate_cs, type;
2715 uint32_t e1, e2, cpl, dpl, rpl, limit;
2716 target_ulong next_eip;
2717
2718#ifdef VBOX
2719 e1 = e2 = 0;
2720#endif
2721 if ((new_cs & 0xfffc) == 0)
2722 raise_exception_err(EXCP0D_GPF, 0);
2723 if (load_segment(&e1, &e2, new_cs) != 0)
2724 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2725 cpl = env->hflags & HF_CPL_MASK;
2726 if (e2 & DESC_S_MASK) {
2727 if (!(e2 & DESC_CS_MASK))
2728 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2729 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2730 if (e2 & DESC_C_MASK) {
2731 /* conforming code segment */
2732 if (dpl > cpl)
2733 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2734 } else {
2735 /* non conforming code segment */
2736 rpl = new_cs & 3;
2737 if (rpl > cpl)
2738 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2739 if (dpl != cpl)
2740 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2741 }
2742 if (!(e2 & DESC_P_MASK))
2743 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2744 limit = get_seg_limit(e1, e2);
2745 if (new_eip > limit &&
2746 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2747 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2748 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2749 get_seg_base(e1, e2), limit, e2);
2750 EIP = new_eip;
2751 } else {
2752 /* jump to call or task gate */
2753 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2754 rpl = new_cs & 3;
2755 cpl = env->hflags & HF_CPL_MASK;
2756 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2757 switch(type) {
2758 case 1: /* 286 TSS */
2759 case 9: /* 386 TSS */
2760 case 5: /* task gate */
2761 if (dpl < cpl || dpl < rpl)
2762 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2763 next_eip = env->eip + next_eip_addend;
2764 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2765 CC_OP = CC_OP_EFLAGS;
2766 break;
2767 case 4: /* 286 call gate */
2768 case 12: /* 386 call gate */
2769 if ((dpl < cpl) || (dpl < rpl))
2770 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2771 if (!(e2 & DESC_P_MASK))
2772 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2773 gate_cs = e1 >> 16;
2774 new_eip = (e1 & 0xffff);
2775 if (type == 12)
2776 new_eip |= (e2 & 0xffff0000);
2777 if (load_segment(&e1, &e2, gate_cs) != 0)
2778 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2779 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2780 /* must be code segment */
2781 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2782 (DESC_S_MASK | DESC_CS_MASK)))
2783 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2784 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2785 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2786 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2787 if (!(e2 & DESC_P_MASK))
2788#ifdef VBOX /* See page 3-514 of 253666.pdf */
2789 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2790#else
2791 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2792#endif
2793 limit = get_seg_limit(e1, e2);
2794 if (new_eip > limit)
2795 raise_exception_err(EXCP0D_GPF, 0);
2796 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2797 get_seg_base(e1, e2), limit, e2);
2798 EIP = new_eip;
2799 break;
2800 default:
2801 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2802 break;
2803 }
2804 }
2805}
2806
2807/* real mode call */
2808void helper_lcall_real(int new_cs, target_ulong new_eip1,
2809 int shift, int next_eip)
2810{
2811 int new_eip;
2812 uint32_t esp, esp_mask;
2813 target_ulong ssp;
2814
2815 new_eip = new_eip1;
2816 esp = ESP;
2817 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2818 ssp = env->segs[R_SS].base;
2819 if (shift) {
2820 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2821 PUSHL(ssp, esp, esp_mask, next_eip);
2822 } else {
2823 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2824 PUSHW(ssp, esp, esp_mask, next_eip);
2825 }
2826
2827 SET_ESP(esp, esp_mask);
2828 env->eip = new_eip;
2829 env->segs[R_CS].selector = new_cs;
2830 env->segs[R_CS].base = (new_cs << 4);
2831}
2832
2833/* protected mode call */
2834void helper_lcall_protected(int new_cs, target_ulong new_eip,
2835 int shift, int next_eip_addend)
2836{
2837 int new_stack, i;
2838 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2839 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2840 uint32_t val, limit, old_sp_mask;
2841 target_ulong ssp, old_ssp, next_eip;
2842
2843#ifdef VBOX
2844 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2845#endif
2846 next_eip = env->eip + next_eip_addend;
2847#ifdef DEBUG_PCALL
2848 if (loglevel & CPU_LOG_PCALL) {
2849 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2850 new_cs, (uint32_t)new_eip, shift);
2851 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2852 }
2853#endif
2854 if ((new_cs & 0xfffc) == 0)
2855 raise_exception_err(EXCP0D_GPF, 0);
2856 if (load_segment(&e1, &e2, new_cs) != 0)
2857 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2858 cpl = env->hflags & HF_CPL_MASK;
2859#ifdef DEBUG_PCALL
2860 if (loglevel & CPU_LOG_PCALL) {
2861 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2862 }
2863#endif
2864 if (e2 & DESC_S_MASK) {
2865 if (!(e2 & DESC_CS_MASK))
2866 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2867 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2868 if (e2 & DESC_C_MASK) {
2869 /* conforming code segment */
2870 if (dpl > cpl)
2871 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2872 } else {
2873 /* non conforming code segment */
2874 rpl = new_cs & 3;
2875 if (rpl > cpl)
2876 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2877 if (dpl != cpl)
2878 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2879 }
2880 if (!(e2 & DESC_P_MASK))
2881 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2882
2883#ifdef TARGET_X86_64
2884 /* XXX: check 16/32 bit cases in long mode */
2885 if (shift == 2) {
2886 target_ulong rsp;
2887 /* 64 bit case */
2888 rsp = ESP;
2889 PUSHQ(rsp, env->segs[R_CS].selector);
2890 PUSHQ(rsp, next_eip);
2891 /* from this point, not restartable */
2892 ESP = rsp;
2893 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2894 get_seg_base(e1, e2),
2895 get_seg_limit(e1, e2), e2);
2896 EIP = new_eip;
2897 } else
2898#endif
2899 {
2900 sp = ESP;
2901 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2902 ssp = env->segs[R_SS].base;
2903 if (shift) {
2904 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2905 PUSHL(ssp, sp, sp_mask, next_eip);
2906 } else {
2907 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2908 PUSHW(ssp, sp, sp_mask, next_eip);
2909 }
2910
2911 limit = get_seg_limit(e1, e2);
2912 if (new_eip > limit)
2913 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2914 /* from this point, not restartable */
2915 SET_ESP(sp, sp_mask);
2916 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2917 get_seg_base(e1, e2), limit, e2);
2918 EIP = new_eip;
2919 }
2920 } else {
2921 /* check gate type */
2922 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2923 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2924 rpl = new_cs & 3;
2925 switch(type) {
2926 case 1: /* available 286 TSS */
2927 case 9: /* available 386 TSS */
2928 case 5: /* task gate */
2929 if (dpl < cpl || dpl < rpl)
2930 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2931 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2932 CC_OP = CC_OP_EFLAGS;
2933 return;
2934 case 4: /* 286 call gate */
2935 case 12: /* 386 call gate */
2936 break;
2937 default:
2938 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2939 break;
2940 }
2941 shift = type >> 3;
2942
2943 if (dpl < cpl || dpl < rpl)
2944 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2945 /* check valid bit */
2946 if (!(e2 & DESC_P_MASK))
2947 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2948 selector = e1 >> 16;
2949 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2950 param_count = e2 & 0x1f;
2951 if ((selector & 0xfffc) == 0)
2952 raise_exception_err(EXCP0D_GPF, 0);
2953
2954 if (load_segment(&e1, &e2, selector) != 0)
2955 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2956 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2957 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2958 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2959 if (dpl > cpl)
2960 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2961 if (!(e2 & DESC_P_MASK))
2962 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2963
2964 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2965 /* to inner privilege */
2966 get_ss_esp_from_tss(&ss, &sp, dpl);
2967#ifdef DEBUG_PCALL
2968 if (loglevel & CPU_LOG_PCALL)
2969 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2970 ss, sp, param_count, ESP);
2971#endif
2972 if ((ss & 0xfffc) == 0)
2973 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2974 if ((ss & 3) != dpl)
2975 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2976 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2977 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2978 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2979 if (ss_dpl != dpl)
2980 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2981 if (!(ss_e2 & DESC_S_MASK) ||
2982 (ss_e2 & DESC_CS_MASK) ||
2983 !(ss_e2 & DESC_W_MASK))
2984 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2985 if (!(ss_e2 & DESC_P_MASK))
2986#ifdef VBOX /* See page 3-99 of 253666.pdf */
2987 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2988#else
2989 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2990#endif
2991
2992 // push_size = ((param_count * 2) + 8) << shift;
2993
2994 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2995 old_ssp = env->segs[R_SS].base;
2996
2997 sp_mask = get_sp_mask(ss_e2);
2998 ssp = get_seg_base(ss_e1, ss_e2);
2999 if (shift) {
3000 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3001 PUSHL(ssp, sp, sp_mask, ESP);
3002 for(i = param_count - 1; i >= 0; i--) {
3003 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3004 PUSHL(ssp, sp, sp_mask, val);
3005 }
3006 } else {
3007 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3008 PUSHW(ssp, sp, sp_mask, ESP);
3009 for(i = param_count - 1; i >= 0; i--) {
3010 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3011 PUSHW(ssp, sp, sp_mask, val);
3012 }
3013 }
3014 new_stack = 1;
3015 } else {
3016 /* to same privilege */
3017 sp = ESP;
3018 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3019 ssp = env->segs[R_SS].base;
3020 // push_size = (4 << shift);
3021 new_stack = 0;
3022 }
3023
3024 if (shift) {
3025 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3026 PUSHL(ssp, sp, sp_mask, next_eip);
3027 } else {
3028 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3029 PUSHW(ssp, sp, sp_mask, next_eip);
3030 }
3031
3032 /* from this point, not restartable */
3033
3034 if (new_stack) {
3035 ss = (ss & ~3) | dpl;
3036 cpu_x86_load_seg_cache(env, R_SS, ss,
3037 ssp,
3038 get_seg_limit(ss_e1, ss_e2),
3039 ss_e2);
3040 }
3041
3042 selector = (selector & ~3) | dpl;
3043 cpu_x86_load_seg_cache(env, R_CS, selector,
3044 get_seg_base(e1, e2),
3045 get_seg_limit(e1, e2),
3046 e2);
3047 cpu_x86_set_cpl(env, dpl);
3048 SET_ESP(sp, sp_mask);
3049 EIP = offset;
3050 }
3051#ifdef USE_KQEMU
3052 if (kqemu_is_ok(env)) {
3053 env->exception_index = -1;
3054 cpu_loop_exit();
3055 }
3056#endif
3057}
3058
3059/* real and vm86 mode iret */
3060void helper_iret_real(int shift)
3061{
3062 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3063 target_ulong ssp;
3064 int eflags_mask;
3065#ifdef VBOX
3066 bool fVME = false;
3067
3068 remR3TrapClear(env->pVM);
3069#endif /* VBOX */
3070
3071 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3072 sp = ESP;
3073 ssp = env->segs[R_SS].base;
3074 if (shift == 1) {
3075 /* 32 bits */
3076 POPL(ssp, sp, sp_mask, new_eip);
3077 POPL(ssp, sp, sp_mask, new_cs);
3078 new_cs &= 0xffff;
3079 POPL(ssp, sp, sp_mask, new_eflags);
3080 } else {
3081 /* 16 bits */
3082 POPW(ssp, sp, sp_mask, new_eip);
3083 POPW(ssp, sp, sp_mask, new_cs);
3084 POPW(ssp, sp, sp_mask, new_eflags);
3085 }
3086#ifdef VBOX
3087 if ( (env->eflags & VM_MASK)
3088 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3089 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3090 {
3091 fVME = true;
3092 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3093 /* if TF will be set -> #GP */
3094 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3095 || (new_eflags & TF_MASK))
3096 raise_exception(EXCP0D_GPF);
3097 }
3098#endif /* VBOX */
3099 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3100 env->segs[R_CS].selector = new_cs;
3101 env->segs[R_CS].base = (new_cs << 4);
3102 env->eip = new_eip;
3103#ifdef VBOX
3104 if (fVME)
3105 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3106 else
3107#endif
3108 if (env->eflags & VM_MASK)
3109 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3110 else
3111 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3112 if (shift == 0)
3113 eflags_mask &= 0xffff;
3114 load_eflags(new_eflags, eflags_mask);
3115 env->hflags2 &= ~HF2_NMI_MASK;
3116#ifdef VBOX
3117 if (fVME)
3118 {
3119 if (new_eflags & IF_MASK)
3120 env->eflags |= VIF_MASK;
3121 else
3122 env->eflags &= ~VIF_MASK;
3123 }
3124#endif /* VBOX */
3125}
3126
3127#ifndef VBOX
3128static inline void validate_seg(int seg_reg, int cpl)
3129#else /* VBOX */
3130DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3131#endif /* VBOX */
3132{
3133 int dpl;
3134 uint32_t e2;
3135
3136 /* XXX: on x86_64, we do not want to nullify FS and GS because
3137 they may still contain a valid base. I would be interested to
3138 know how a real x86_64 CPU behaves */
3139 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3140 (env->segs[seg_reg].selector & 0xfffc) == 0)
3141 return;
3142
3143 e2 = env->segs[seg_reg].flags;
3144 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3145 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3146 /* data or non conforming code segment */
3147 if (dpl < cpl) {
3148 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3149 }
3150 }
3151}
3152
3153/* protected mode iret */
3154#ifndef VBOX
3155static inline void helper_ret_protected(int shift, int is_iret, int addend)
3156#else /* VBOX */
3157DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3158#endif /* VBOX */
3159{
3160 uint32_t new_cs, new_eflags, new_ss;
3161 uint32_t new_es, new_ds, new_fs, new_gs;
3162 uint32_t e1, e2, ss_e1, ss_e2;
3163 int cpl, dpl, rpl, eflags_mask, iopl;
3164 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3165
3166#ifdef VBOX
3167 ss_e1 = ss_e2 = e1 = e2 = 0;
3168#endif
3169
3170#ifdef TARGET_X86_64
3171 if (shift == 2)
3172 sp_mask = -1;
3173 else
3174#endif
3175 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3176 sp = ESP;
3177 ssp = env->segs[R_SS].base;
3178 new_eflags = 0; /* avoid warning */
3179#ifdef TARGET_X86_64
3180 if (shift == 2) {
3181 POPQ(sp, new_eip);
3182 POPQ(sp, new_cs);
3183 new_cs &= 0xffff;
3184 if (is_iret) {
3185 POPQ(sp, new_eflags);
3186 }
3187 } else
3188#endif
3189 if (shift == 1) {
3190 /* 32 bits */
3191 POPL(ssp, sp, sp_mask, new_eip);
3192 POPL(ssp, sp, sp_mask, new_cs);
3193 new_cs &= 0xffff;
3194 if (is_iret) {
3195 POPL(ssp, sp, sp_mask, new_eflags);
3196#if defined(VBOX) && defined(DEBUG)
3197 printf("iret: new CS %04X\n", new_cs);
3198 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3199 printf("iret: new EFLAGS %08X\n", new_eflags);
3200 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3201#endif
3202 if (new_eflags & VM_MASK)
3203 goto return_to_vm86;
3204 }
3205#ifdef VBOX
3206 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3207 {
3208#ifdef DEBUG
3209 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3210#endif
3211 new_cs = new_cs & 0xfffc;
3212 }
3213#endif
3214 } else {
3215 /* 16 bits */
3216 POPW(ssp, sp, sp_mask, new_eip);
3217 POPW(ssp, sp, sp_mask, new_cs);
3218 if (is_iret)
3219 POPW(ssp, sp, sp_mask, new_eflags);
3220 }
3221#ifdef DEBUG_PCALL
3222 if (loglevel & CPU_LOG_PCALL) {
3223 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3224 new_cs, new_eip, shift, addend);
3225 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3226 }
3227#endif
3228 if ((new_cs & 0xfffc) == 0)
3229 {
3230#if defined(VBOX) && defined(DEBUG)
3231 printf("new_cs & 0xfffc) == 0\n");
3232#endif
3233 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3234 }
3235 if (load_segment(&e1, &e2, new_cs) != 0)
3236 {
3237#if defined(VBOX) && defined(DEBUG)
3238 printf("load_segment failed\n");
3239#endif
3240 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3241 }
3242 if (!(e2 & DESC_S_MASK) ||
3243 !(e2 & DESC_CS_MASK))
3244 {
3245#if defined(VBOX) && defined(DEBUG)
3246 printf("e2 mask %08x\n", e2);
3247#endif
3248 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3249 }
3250 cpl = env->hflags & HF_CPL_MASK;
3251 rpl = new_cs & 3;
3252 if (rpl < cpl)
3253 {
3254#if defined(VBOX) && defined(DEBUG)
3255 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3256#endif
3257 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3258 }
3259 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3260 if (e2 & DESC_C_MASK) {
3261 if (dpl > rpl)
3262 {
3263#if defined(VBOX) && defined(DEBUG)
3264 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3265#endif
3266 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3267 }
3268 } else {
3269 if (dpl != rpl)
3270 {
3271#if defined(VBOX) && defined(DEBUG)
3272 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3273#endif
3274 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3275 }
3276 }
3277 if (!(e2 & DESC_P_MASK))
3278 {
3279#if defined(VBOX) && defined(DEBUG)
3280 printf("DESC_P_MASK e2=%08x\n", e2);
3281#endif
3282 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3283 }
3284
3285 sp += addend;
3286 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3287 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3288 /* return to same privilege level */
3289 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3290 get_seg_base(e1, e2),
3291 get_seg_limit(e1, e2),
3292 e2);
3293 } else {
3294 /* return to different privilege level */
3295#ifdef TARGET_X86_64
3296 if (shift == 2) {
3297 POPQ(sp, new_esp);
3298 POPQ(sp, new_ss);
3299 new_ss &= 0xffff;
3300 } else
3301#endif
3302 if (shift == 1) {
3303 /* 32 bits */
3304 POPL(ssp, sp, sp_mask, new_esp);
3305 POPL(ssp, sp, sp_mask, new_ss);
3306 new_ss &= 0xffff;
3307 } else {
3308 /* 16 bits */
3309 POPW(ssp, sp, sp_mask, new_esp);
3310 POPW(ssp, sp, sp_mask, new_ss);
3311 }
3312#ifdef DEBUG_PCALL
3313 if (loglevel & CPU_LOG_PCALL) {
3314 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3315 new_ss, new_esp);
3316 }
3317#endif
3318 if ((new_ss & 0xfffc) == 0) {
3319#ifdef TARGET_X86_64
3320 /* NULL ss is allowed in long mode if cpl != 3*/
3321 /* XXX: test CS64 ? */
3322 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3323 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3324 0, 0xffffffff,
3325 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3326 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3327 DESC_W_MASK | DESC_A_MASK);
3328 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3329 } else
3330#endif
3331 {
3332 raise_exception_err(EXCP0D_GPF, 0);
3333 }
3334 } else {
3335 if ((new_ss & 3) != rpl)
3336 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3337 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3338 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3339 if (!(ss_e2 & DESC_S_MASK) ||
3340 (ss_e2 & DESC_CS_MASK) ||
3341 !(ss_e2 & DESC_W_MASK))
3342 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3343 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3344 if (dpl != rpl)
3345 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3346 if (!(ss_e2 & DESC_P_MASK))
3347 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3348 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3349 get_seg_base(ss_e1, ss_e2),
3350 get_seg_limit(ss_e1, ss_e2),
3351 ss_e2);
3352 }
3353
3354 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3355 get_seg_base(e1, e2),
3356 get_seg_limit(e1, e2),
3357 e2);
3358 cpu_x86_set_cpl(env, rpl);
3359 sp = new_esp;
3360#ifdef TARGET_X86_64
3361 if (env->hflags & HF_CS64_MASK)
3362 sp_mask = -1;
3363 else
3364#endif
3365 sp_mask = get_sp_mask(ss_e2);
3366
3367 /* validate data segments */
3368 validate_seg(R_ES, rpl);
3369 validate_seg(R_DS, rpl);
3370 validate_seg(R_FS, rpl);
3371 validate_seg(R_GS, rpl);
3372
3373 sp += addend;
3374 }
3375 SET_ESP(sp, sp_mask);
3376 env->eip = new_eip;
3377 if (is_iret) {
3378 /* NOTE: 'cpl' is the _old_ CPL */
3379 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3380 if (cpl == 0)
3381#ifdef VBOX
3382 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3383#else
3384 eflags_mask |= IOPL_MASK;
3385#endif
3386 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3387 if (cpl <= iopl)
3388 eflags_mask |= IF_MASK;
3389 if (shift == 0)
3390 eflags_mask &= 0xffff;
3391 load_eflags(new_eflags, eflags_mask);
3392 }
3393 return;
3394
3395 return_to_vm86:
3396 POPL(ssp, sp, sp_mask, new_esp);
3397 POPL(ssp, sp, sp_mask, new_ss);
3398 POPL(ssp, sp, sp_mask, new_es);
3399 POPL(ssp, sp, sp_mask, new_ds);
3400 POPL(ssp, sp, sp_mask, new_fs);
3401 POPL(ssp, sp, sp_mask, new_gs);
3402
3403 /* modify processor state */
3404 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3405 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3406 load_seg_vm(R_CS, new_cs & 0xffff);
3407 cpu_x86_set_cpl(env, 3);
3408 load_seg_vm(R_SS, new_ss & 0xffff);
3409 load_seg_vm(R_ES, new_es & 0xffff);
3410 load_seg_vm(R_DS, new_ds & 0xffff);
3411 load_seg_vm(R_FS, new_fs & 0xffff);
3412 load_seg_vm(R_GS, new_gs & 0xffff);
3413
3414 env->eip = new_eip & 0xffff;
3415 ESP = new_esp;
3416}
3417
3418void helper_iret_protected(int shift, int next_eip)
3419{
3420 int tss_selector, type;
3421 uint32_t e1, e2;
3422
3423#ifdef VBOX
3424 e1 = e2 = 0;
3425 remR3TrapClear(env->pVM);
3426#endif
3427
3428 /* specific case for TSS */
3429 if (env->eflags & NT_MASK) {
3430#ifdef TARGET_X86_64
3431 if (env->hflags & HF_LMA_MASK)
3432 raise_exception_err(EXCP0D_GPF, 0);
3433#endif
3434 tss_selector = lduw_kernel(env->tr.base + 0);
3435 if (tss_selector & 4)
3436 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3437 if (load_segment(&e1, &e2, tss_selector) != 0)
3438 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3439 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3440 /* NOTE: we check both segment and busy TSS */
3441 if (type != 3)
3442 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3443 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3444 } else {
3445 helper_ret_protected(shift, 1, 0);
3446 }
3447 env->hflags2 &= ~HF2_NMI_MASK;
3448#ifdef USE_KQEMU
3449 if (kqemu_is_ok(env)) {
3450 CC_OP = CC_OP_EFLAGS;
3451 env->exception_index = -1;
3452 cpu_loop_exit();
3453 }
3454#endif
3455}
3456
3457void helper_lret_protected(int shift, int addend)
3458{
3459 helper_ret_protected(shift, 0, addend);
3460#ifdef USE_KQEMU
3461 if (kqemu_is_ok(env)) {
3462 env->exception_index = -1;
3463 cpu_loop_exit();
3464 }
3465#endif
3466}
3467
3468void helper_sysenter(void)
3469{
3470 if (env->sysenter_cs == 0) {
3471 raise_exception_err(EXCP0D_GPF, 0);
3472 }
3473 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3474 cpu_x86_set_cpl(env, 0);
3475
3476#ifdef TARGET_X86_64
3477 if (env->hflags & HF_LMA_MASK) {
3478 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3479 0, 0xffffffff,
3480 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3481 DESC_S_MASK |
3482 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3483 } else
3484#endif
3485 {
3486 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3487 0, 0xffffffff,
3488 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3489 DESC_S_MASK |
3490 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3491 }
3492 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3493 0, 0xffffffff,
3494 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3495 DESC_S_MASK |
3496 DESC_W_MASK | DESC_A_MASK);
3497 ESP = env->sysenter_esp;
3498 EIP = env->sysenter_eip;
3499}
3500
3501void helper_sysexit(int dflag)
3502{
3503 int cpl;
3504
3505 cpl = env->hflags & HF_CPL_MASK;
3506 if (env->sysenter_cs == 0 || cpl != 0) {
3507 raise_exception_err(EXCP0D_GPF, 0);
3508 }
3509 cpu_x86_set_cpl(env, 3);
3510#ifdef TARGET_X86_64
3511 if (dflag == 2) {
3512 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3513 0, 0xffffffff,
3514 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3515 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3516 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3517 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3518 0, 0xffffffff,
3519 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3520 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3521 DESC_W_MASK | DESC_A_MASK);
3522 } else
3523#endif
3524 {
3525 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3526 0, 0xffffffff,
3527 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3528 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3529 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3530 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3531 0, 0xffffffff,
3532 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3533 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3534 DESC_W_MASK | DESC_A_MASK);
3535 }
3536 ESP = ECX;
3537 EIP = EDX;
3538#ifdef USE_KQEMU
3539 if (kqemu_is_ok(env)) {
3540 env->exception_index = -1;
3541 cpu_loop_exit();
3542 }
3543#endif
3544}
3545
3546#if defined(CONFIG_USER_ONLY)
3547target_ulong helper_read_crN(int reg)
3548{
3549 return 0;
3550}
3551
3552void helper_write_crN(int reg, target_ulong t0)
3553{
3554}
3555#else
3556target_ulong helper_read_crN(int reg)
3557{
3558 target_ulong val;
3559
3560 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3561 switch(reg) {
3562 default:
3563 val = env->cr[reg];
3564 break;
3565 case 8:
3566 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3567 val = cpu_get_apic_tpr(env);
3568 } else {
3569 val = env->v_tpr;
3570 }
3571 break;
3572 }
3573 return val;
3574}
3575
3576void helper_write_crN(int reg, target_ulong t0)
3577{
3578 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3579 switch(reg) {
3580 case 0:
3581 cpu_x86_update_cr0(env, t0);
3582 break;
3583 case 3:
3584 cpu_x86_update_cr3(env, t0);
3585 break;
3586 case 4:
3587 cpu_x86_update_cr4(env, t0);
3588 break;
3589 case 8:
3590 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3591 cpu_set_apic_tpr(env, t0);
3592 }
3593 env->v_tpr = t0 & 0x0f;
3594 break;
3595 default:
3596 env->cr[reg] = t0;
3597 break;
3598 }
3599}
3600#endif
3601
3602void helper_lmsw(target_ulong t0)
3603{
3604 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3605 if already set to one. */
3606 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3607 helper_write_crN(0, t0);
3608}
3609
3610void helper_clts(void)
3611{
3612 env->cr[0] &= ~CR0_TS_MASK;
3613 env->hflags &= ~HF_TS_MASK;
3614}
3615
3616/* XXX: do more */
3617void helper_movl_drN_T0(int reg, target_ulong t0)
3618{
3619 env->dr[reg] = t0;
3620}
3621
3622void helper_invlpg(target_ulong addr)
3623{
3624 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3625 tlb_flush_page(env, addr);
3626}
3627
3628void helper_rdtsc(void)
3629{
3630 uint64_t val;
3631
3632 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3633 raise_exception(EXCP0D_GPF);
3634 }
3635 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3636
3637 val = cpu_get_tsc(env) + env->tsc_offset;
3638 EAX = (uint32_t)(val);
3639 EDX = (uint32_t)(val >> 32);
3640}
3641
3642#ifdef VBOX
3643void helper_rdtscp(void)
3644{
3645 uint64_t val;
3646 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3647 raise_exception(EXCP0D_GPF);
3648 }
3649
3650 val = cpu_get_tsc(env);
3651 EAX = (uint32_t)(val);
3652 EDX = (uint32_t)(val >> 32);
3653 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3654}
3655#endif
3656
3657void helper_rdpmc(void)
3658{
3659 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3660 raise_exception(EXCP0D_GPF);
3661 }
3662 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3663
3664 /* currently unimplemented */
3665 raise_exception_err(EXCP06_ILLOP, 0);
3666}
3667
3668#if defined(CONFIG_USER_ONLY)
3669void helper_wrmsr(void)
3670{
3671}
3672
3673void helper_rdmsr(void)
3674{
3675}
3676#else
3677void helper_wrmsr(void)
3678{
3679 uint64_t val;
3680
3681 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3682
3683 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3684
3685 switch((uint32_t)ECX) {
3686 case MSR_IA32_SYSENTER_CS:
3687 env->sysenter_cs = val & 0xffff;
3688 break;
3689 case MSR_IA32_SYSENTER_ESP:
3690 env->sysenter_esp = val;
3691 break;
3692 case MSR_IA32_SYSENTER_EIP:
3693 env->sysenter_eip = val;
3694 break;
3695 case MSR_IA32_APICBASE:
3696 cpu_set_apic_base(env, val);
3697 break;
3698 case MSR_EFER:
3699 {
3700 uint64_t update_mask;
3701 update_mask = 0;
3702 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3703 update_mask |= MSR_EFER_SCE;
3704 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3705 update_mask |= MSR_EFER_LME;
3706 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3707 update_mask |= MSR_EFER_FFXSR;
3708 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3709 update_mask |= MSR_EFER_NXE;
3710 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3711 update_mask |= MSR_EFER_SVME;
3712 cpu_load_efer(env, (env->efer & ~update_mask) |
3713 (val & update_mask));
3714 }
3715 break;
3716 case MSR_STAR:
3717 env->star = val;
3718 break;
3719 case MSR_PAT:
3720 env->pat = val;
3721 break;
3722 case MSR_VM_HSAVE_PA:
3723 env->vm_hsave = val;
3724 break;
3725#ifdef TARGET_X86_64
3726 case MSR_LSTAR:
3727 env->lstar = val;
3728 break;
3729 case MSR_CSTAR:
3730 env->cstar = val;
3731 break;
3732 case MSR_FMASK:
3733 env->fmask = val;
3734 break;
3735 case MSR_FSBASE:
3736 env->segs[R_FS].base = val;
3737 break;
3738 case MSR_GSBASE:
3739 env->segs[R_GS].base = val;
3740 break;
3741 case MSR_KERNELGSBASE:
3742 env->kernelgsbase = val;
3743 break;
3744#endif
3745 default:
3746#ifndef VBOX
3747 /* XXX: exception ? */
3748 break;
3749#else /* VBOX */
3750 {
3751 uint32_t ecx = (uint32_t)ECX;
3752 /* In X2APIC specification this range is reserved for APIC control. */
3753 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3754 cpu_apic_wrmsr(env, ecx, val);
3755 /** @todo else exception? */
3756 break;
3757 }
3758 case MSR_K8_TSC_AUX:
3759 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3760 break;
3761#endif /* VBOX */
3762 }
3763}
3764
3765void helper_rdmsr(void)
3766{
3767 uint64_t val;
3768
3769 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3770
3771 switch((uint32_t)ECX) {
3772 case MSR_IA32_SYSENTER_CS:
3773 val = env->sysenter_cs;
3774 break;
3775 case MSR_IA32_SYSENTER_ESP:
3776 val = env->sysenter_esp;
3777 break;
3778 case MSR_IA32_SYSENTER_EIP:
3779 val = env->sysenter_eip;
3780 break;
3781 case MSR_IA32_APICBASE:
3782 val = cpu_get_apic_base(env);
3783 break;
3784 case MSR_EFER:
3785 val = env->efer;
3786 break;
3787 case MSR_STAR:
3788 val = env->star;
3789 break;
3790 case MSR_PAT:
3791 val = env->pat;
3792 break;
3793 case MSR_VM_HSAVE_PA:
3794 val = env->vm_hsave;
3795 break;
3796 case MSR_IA32_PERF_STATUS:
3797 /* tsc_increment_by_tick */
3798 val = 1000ULL;
3799 /* CPU multiplier */
3800 val |= (((uint64_t)4ULL) << 40);
3801 break;
3802#ifdef TARGET_X86_64
3803 case MSR_LSTAR:
3804 val = env->lstar;
3805 break;
3806 case MSR_CSTAR:
3807 val = env->cstar;
3808 break;
3809 case MSR_FMASK:
3810 val = env->fmask;
3811 break;
3812 case MSR_FSBASE:
3813 val = env->segs[R_FS].base;
3814 break;
3815 case MSR_GSBASE:
3816 val = env->segs[R_GS].base;
3817 break;
3818 case MSR_KERNELGSBASE:
3819 val = env->kernelgsbase;
3820 break;
3821#endif
3822#ifdef USE_KQEMU
3823 case MSR_QPI_COMMBASE:
3824 if (env->kqemu_enabled) {
3825 val = kqemu_comm_base;
3826 } else {
3827 val = 0;
3828 }
3829 break;
3830#endif
3831 default:
3832#ifndef VBOX
3833 /* XXX: exception ? */
3834 val = 0;
3835 break;
3836#else /* VBOX */
3837 {
3838 uint32_t ecx = (uint32_t)ECX;
3839 /* In X2APIC specification this range is reserved for APIC control. */
3840 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3841 val = cpu_apic_rdmsr(env, ecx);
3842 else
3843 val = 0; /** @todo else exception? */
3844 break;
3845 }
3846 case MSR_K8_TSC_AUX:
3847 val = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3848 break;
3849#endif /* VBOX */
3850 }
3851 EAX = (uint32_t)(val);
3852 EDX = (uint32_t)(val >> 32);
3853}
3854#endif
3855
3856target_ulong helper_lsl(target_ulong selector1)
3857{
3858 unsigned int limit;
3859 uint32_t e1, e2, eflags, selector;
3860 int rpl, dpl, cpl, type;
3861
3862 selector = selector1 & 0xffff;
3863 eflags = cc_table[CC_OP].compute_all();
3864 if (load_segment(&e1, &e2, selector) != 0)
3865 goto fail;
3866 rpl = selector & 3;
3867 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3868 cpl = env->hflags & HF_CPL_MASK;
3869 if (e2 & DESC_S_MASK) {
3870 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3871 /* conforming */
3872 } else {
3873 if (dpl < cpl || dpl < rpl)
3874 goto fail;
3875 }
3876 } else {
3877 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3878 switch(type) {
3879 case 1:
3880 case 2:
3881 case 3:
3882 case 9:
3883 case 11:
3884 break;
3885 default:
3886 goto fail;
3887 }
3888 if (dpl < cpl || dpl < rpl) {
3889 fail:
3890 CC_SRC = eflags & ~CC_Z;
3891 return 0;
3892 }
3893 }
3894 limit = get_seg_limit(e1, e2);
3895 CC_SRC = eflags | CC_Z;
3896 return limit;
3897}
3898
3899target_ulong helper_lar(target_ulong selector1)
3900{
3901 uint32_t e1, e2, eflags, selector;
3902 int rpl, dpl, cpl, type;
3903
3904 selector = selector1 & 0xffff;
3905 eflags = cc_table[CC_OP].compute_all();
3906 if ((selector & 0xfffc) == 0)
3907 goto fail;
3908 if (load_segment(&e1, &e2, selector) != 0)
3909 goto fail;
3910 rpl = selector & 3;
3911 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3912 cpl = env->hflags & HF_CPL_MASK;
3913 if (e2 & DESC_S_MASK) {
3914 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3915 /* conforming */
3916 } else {
3917 if (dpl < cpl || dpl < rpl)
3918 goto fail;
3919 }
3920 } else {
3921 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3922 switch(type) {
3923 case 1:
3924 case 2:
3925 case 3:
3926 case 4:
3927 case 5:
3928 case 9:
3929 case 11:
3930 case 12:
3931 break;
3932 default:
3933 goto fail;
3934 }
3935 if (dpl < cpl || dpl < rpl) {
3936 fail:
3937 CC_SRC = eflags & ~CC_Z;
3938 return 0;
3939 }
3940 }
3941 CC_SRC = eflags | CC_Z;
3942 return e2 & 0x00f0ff00;
3943}
3944
3945void helper_verr(target_ulong selector1)
3946{
3947 uint32_t e1, e2, eflags, selector;
3948 int rpl, dpl, cpl;
3949
3950 selector = selector1 & 0xffff;
3951 eflags = cc_table[CC_OP].compute_all();
3952 if ((selector & 0xfffc) == 0)
3953 goto fail;
3954 if (load_segment(&e1, &e2, selector) != 0)
3955 goto fail;
3956 if (!(e2 & DESC_S_MASK))
3957 goto fail;
3958 rpl = selector & 3;
3959 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3960 cpl = env->hflags & HF_CPL_MASK;
3961 if (e2 & DESC_CS_MASK) {
3962 if (!(e2 & DESC_R_MASK))
3963 goto fail;
3964 if (!(e2 & DESC_C_MASK)) {
3965 if (dpl < cpl || dpl < rpl)
3966 goto fail;
3967 }
3968 } else {
3969 if (dpl < cpl || dpl < rpl) {
3970 fail:
3971 CC_SRC = eflags & ~CC_Z;
3972 return;
3973 }
3974 }
3975 CC_SRC = eflags | CC_Z;
3976}
3977
3978void helper_verw(target_ulong selector1)
3979{
3980 uint32_t e1, e2, eflags, selector;
3981 int rpl, dpl, cpl;
3982
3983 selector = selector1 & 0xffff;
3984 eflags = cc_table[CC_OP].compute_all();
3985 if ((selector & 0xfffc) == 0)
3986 goto fail;
3987 if (load_segment(&e1, &e2, selector) != 0)
3988 goto fail;
3989 if (!(e2 & DESC_S_MASK))
3990 goto fail;
3991 rpl = selector & 3;
3992 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3993 cpl = env->hflags & HF_CPL_MASK;
3994 if (e2 & DESC_CS_MASK) {
3995 goto fail;
3996 } else {
3997 if (dpl < cpl || dpl < rpl)
3998 goto fail;
3999 if (!(e2 & DESC_W_MASK)) {
4000 fail:
4001 CC_SRC = eflags & ~CC_Z;
4002 return;
4003 }
4004 }
4005 CC_SRC = eflags | CC_Z;
4006}
4007
4008/* x87 FPU helpers */
4009
4010static void fpu_set_exception(int mask)
4011{
4012 env->fpus |= mask;
4013 if (env->fpus & (~env->fpuc & FPUC_EM))
4014 env->fpus |= FPUS_SE | FPUS_B;
4015}
4016
4017#ifndef VBOX
4018static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4019#else /* VBOX */
4020DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4021#endif /* VBOX */
4022{
4023 if (b == 0.0)
4024 fpu_set_exception(FPUS_ZE);
4025 return a / b;
4026}
4027
4028void fpu_raise_exception(void)
4029{
4030 if (env->cr[0] & CR0_NE_MASK) {
4031 raise_exception(EXCP10_COPR);
4032 }
4033#if !defined(CONFIG_USER_ONLY)
4034 else {
4035 cpu_set_ferr(env);
4036 }
4037#endif
4038}
4039
4040void helper_flds_FT0(uint32_t val)
4041{
4042 union {
4043 float32 f;
4044 uint32_t i;
4045 } u;
4046 u.i = val;
4047 FT0 = float32_to_floatx(u.f, &env->fp_status);
4048}
4049
4050void helper_fldl_FT0(uint64_t val)
4051{
4052 union {
4053 float64 f;
4054 uint64_t i;
4055 } u;
4056 u.i = val;
4057 FT0 = float64_to_floatx(u.f, &env->fp_status);
4058}
4059
4060void helper_fildl_FT0(int32_t val)
4061{
4062 FT0 = int32_to_floatx(val, &env->fp_status);
4063}
4064
4065void helper_flds_ST0(uint32_t val)
4066{
4067 int new_fpstt;
4068 union {
4069 float32 f;
4070 uint32_t i;
4071 } u;
4072 new_fpstt = (env->fpstt - 1) & 7;
4073 u.i = val;
4074 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4075 env->fpstt = new_fpstt;
4076 env->fptags[new_fpstt] = 0; /* validate stack entry */
4077}
4078
4079void helper_fldl_ST0(uint64_t val)
4080{
4081 int new_fpstt;
4082 union {
4083 float64 f;
4084 uint64_t i;
4085 } u;
4086 new_fpstt = (env->fpstt - 1) & 7;
4087 u.i = val;
4088 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4089 env->fpstt = new_fpstt;
4090 env->fptags[new_fpstt] = 0; /* validate stack entry */
4091}
4092
4093void helper_fildl_ST0(int32_t val)
4094{
4095 int new_fpstt;
4096 new_fpstt = (env->fpstt - 1) & 7;
4097 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4098 env->fpstt = new_fpstt;
4099 env->fptags[new_fpstt] = 0; /* validate stack entry */
4100}
4101
4102void helper_fildll_ST0(int64_t val)
4103{
4104 int new_fpstt;
4105 new_fpstt = (env->fpstt - 1) & 7;
4106 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4107 env->fpstt = new_fpstt;
4108 env->fptags[new_fpstt] = 0; /* validate stack entry */
4109}
4110
4111uint32_t helper_fsts_ST0(void)
4112{
4113 union {
4114 float32 f;
4115 uint32_t i;
4116 } u;
4117 u.f = floatx_to_float32(ST0, &env->fp_status);
4118 return u.i;
4119}
4120
4121uint64_t helper_fstl_ST0(void)
4122{
4123 union {
4124 float64 f;
4125 uint64_t i;
4126 } u;
4127 u.f = floatx_to_float64(ST0, &env->fp_status);
4128 return u.i;
4129}
4130
4131int32_t helper_fist_ST0(void)
4132{
4133 int32_t val;
4134 val = floatx_to_int32(ST0, &env->fp_status);
4135 if (val != (int16_t)val)
4136 val = -32768;
4137 return val;
4138}
4139
4140int32_t helper_fistl_ST0(void)
4141{
4142 int32_t val;
4143 val = floatx_to_int32(ST0, &env->fp_status);
4144 return val;
4145}
4146
4147int64_t helper_fistll_ST0(void)
4148{
4149 int64_t val;
4150 val = floatx_to_int64(ST0, &env->fp_status);
4151 return val;
4152}
4153
4154int32_t helper_fistt_ST0(void)
4155{
4156 int32_t val;
4157 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4158 if (val != (int16_t)val)
4159 val = -32768;
4160 return val;
4161}
4162
4163int32_t helper_fisttl_ST0(void)
4164{
4165 int32_t val;
4166 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4167 return val;
4168}
4169
4170int64_t helper_fisttll_ST0(void)
4171{
4172 int64_t val;
4173 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4174 return val;
4175}
4176
4177void helper_fldt_ST0(target_ulong ptr)
4178{
4179 int new_fpstt;
4180 new_fpstt = (env->fpstt - 1) & 7;
4181 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4182 env->fpstt = new_fpstt;
4183 env->fptags[new_fpstt] = 0; /* validate stack entry */
4184}
4185
4186void helper_fstt_ST0(target_ulong ptr)
4187{
4188 helper_fstt(ST0, ptr);
4189}
4190
4191void helper_fpush(void)
4192{
4193 fpush();
4194}
4195
4196void helper_fpop(void)
4197{
4198 fpop();
4199}
4200
4201void helper_fdecstp(void)
4202{
4203 env->fpstt = (env->fpstt - 1) & 7;
4204 env->fpus &= (~0x4700);
4205}
4206
4207void helper_fincstp(void)
4208{
4209 env->fpstt = (env->fpstt + 1) & 7;
4210 env->fpus &= (~0x4700);
4211}
4212
4213/* FPU move */
4214
4215void helper_ffree_STN(int st_index)
4216{
4217 env->fptags[(env->fpstt + st_index) & 7] = 1;
4218}
4219
4220void helper_fmov_ST0_FT0(void)
4221{
4222 ST0 = FT0;
4223}
4224
4225void helper_fmov_FT0_STN(int st_index)
4226{
4227 FT0 = ST(st_index);
4228}
4229
4230void helper_fmov_ST0_STN(int st_index)
4231{
4232 ST0 = ST(st_index);
4233}
4234
4235void helper_fmov_STN_ST0(int st_index)
4236{
4237 ST(st_index) = ST0;
4238}
4239
4240void helper_fxchg_ST0_STN(int st_index)
4241{
4242 CPU86_LDouble tmp;
4243 tmp = ST(st_index);
4244 ST(st_index) = ST0;
4245 ST0 = tmp;
4246}
4247
4248/* FPU operations */
4249
4250static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4251
4252void helper_fcom_ST0_FT0(void)
4253{
4254 int ret;
4255
4256 ret = floatx_compare(ST0, FT0, &env->fp_status);
4257 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4258 FORCE_RET();
4259}
4260
4261void helper_fucom_ST0_FT0(void)
4262{
4263 int ret;
4264
4265 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4266 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4267 FORCE_RET();
4268}
4269
4270static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4271
4272void helper_fcomi_ST0_FT0(void)
4273{
4274 int eflags;
4275 int ret;
4276
4277 ret = floatx_compare(ST0, FT0, &env->fp_status);
4278 eflags = cc_table[CC_OP].compute_all();
4279 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4280 CC_SRC = eflags;
4281 FORCE_RET();
4282}
4283
4284void helper_fucomi_ST0_FT0(void)
4285{
4286 int eflags;
4287 int ret;
4288
4289 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4290 eflags = cc_table[CC_OP].compute_all();
4291 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4292 CC_SRC = eflags;
4293 FORCE_RET();
4294}
4295
4296void helper_fadd_ST0_FT0(void)
4297{
4298 ST0 += FT0;
4299}
4300
4301void helper_fmul_ST0_FT0(void)
4302{
4303 ST0 *= FT0;
4304}
4305
4306void helper_fsub_ST0_FT0(void)
4307{
4308 ST0 -= FT0;
4309}
4310
4311void helper_fsubr_ST0_FT0(void)
4312{
4313 ST0 = FT0 - ST0;
4314}
4315
4316void helper_fdiv_ST0_FT0(void)
4317{
4318 ST0 = helper_fdiv(ST0, FT0);
4319}
4320
4321void helper_fdivr_ST0_FT0(void)
4322{
4323 ST0 = helper_fdiv(FT0, ST0);
4324}
4325
4326/* fp operations between STN and ST0 */
4327
4328void helper_fadd_STN_ST0(int st_index)
4329{
4330 ST(st_index) += ST0;
4331}
4332
4333void helper_fmul_STN_ST0(int st_index)
4334{
4335 ST(st_index) *= ST0;
4336}
4337
4338void helper_fsub_STN_ST0(int st_index)
4339{
4340 ST(st_index) -= ST0;
4341}
4342
4343void helper_fsubr_STN_ST0(int st_index)
4344{
4345 CPU86_LDouble *p;
4346 p = &ST(st_index);
4347 *p = ST0 - *p;
4348}
4349
4350void helper_fdiv_STN_ST0(int st_index)
4351{
4352 CPU86_LDouble *p;
4353 p = &ST(st_index);
4354 *p = helper_fdiv(*p, ST0);
4355}
4356
4357void helper_fdivr_STN_ST0(int st_index)
4358{
4359 CPU86_LDouble *p;
4360 p = &ST(st_index);
4361 *p = helper_fdiv(ST0, *p);
4362}
4363
4364/* misc FPU operations */
4365void helper_fchs_ST0(void)
4366{
4367 ST0 = floatx_chs(ST0);
4368}
4369
4370void helper_fabs_ST0(void)
4371{
4372 ST0 = floatx_abs(ST0);
4373}
4374
4375void helper_fld1_ST0(void)
4376{
4377 ST0 = f15rk[1];
4378}
4379
4380void helper_fldl2t_ST0(void)
4381{
4382 ST0 = f15rk[6];
4383}
4384
4385void helper_fldl2e_ST0(void)
4386{
4387 ST0 = f15rk[5];
4388}
4389
4390void helper_fldpi_ST0(void)
4391{
4392 ST0 = f15rk[2];
4393}
4394
4395void helper_fldlg2_ST0(void)
4396{
4397 ST0 = f15rk[3];
4398}
4399
4400void helper_fldln2_ST0(void)
4401{
4402 ST0 = f15rk[4];
4403}
4404
4405void helper_fldz_ST0(void)
4406{
4407 ST0 = f15rk[0];
4408}
4409
4410void helper_fldz_FT0(void)
4411{
4412 FT0 = f15rk[0];
4413}
4414
4415uint32_t helper_fnstsw(void)
4416{
4417 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4418}
4419
4420uint32_t helper_fnstcw(void)
4421{
4422 return env->fpuc;
4423}
4424
4425static void update_fp_status(void)
4426{
4427 int rnd_type;
4428
4429 /* set rounding mode */
4430 switch(env->fpuc & RC_MASK) {
4431 default:
4432 case RC_NEAR:
4433 rnd_type = float_round_nearest_even;
4434 break;
4435 case RC_DOWN:
4436 rnd_type = float_round_down;
4437 break;
4438 case RC_UP:
4439 rnd_type = float_round_up;
4440 break;
4441 case RC_CHOP:
4442 rnd_type = float_round_to_zero;
4443 break;
4444 }
4445 set_float_rounding_mode(rnd_type, &env->fp_status);
4446#ifdef FLOATX80
4447 switch((env->fpuc >> 8) & 3) {
4448 case 0:
4449 rnd_type = 32;
4450 break;
4451 case 2:
4452 rnd_type = 64;
4453 break;
4454 case 3:
4455 default:
4456 rnd_type = 80;
4457 break;
4458 }
4459 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4460#endif
4461}
4462
4463void helper_fldcw(uint32_t val)
4464{
4465 env->fpuc = val;
4466 update_fp_status();
4467}
4468
4469void helper_fclex(void)
4470{
4471 env->fpus &= 0x7f00;
4472}
4473
4474void helper_fwait(void)
4475{
4476 if (env->fpus & FPUS_SE)
4477 fpu_raise_exception();
4478 FORCE_RET();
4479}
4480
4481void helper_fninit(void)
4482{
4483 env->fpus = 0;
4484 env->fpstt = 0;
4485 env->fpuc = 0x37f;
4486 env->fptags[0] = 1;
4487 env->fptags[1] = 1;
4488 env->fptags[2] = 1;
4489 env->fptags[3] = 1;
4490 env->fptags[4] = 1;
4491 env->fptags[5] = 1;
4492 env->fptags[6] = 1;
4493 env->fptags[7] = 1;
4494}
4495
4496/* BCD ops */
4497
4498void helper_fbld_ST0(target_ulong ptr)
4499{
4500 CPU86_LDouble tmp;
4501 uint64_t val;
4502 unsigned int v;
4503 int i;
4504
4505 val = 0;
4506 for(i = 8; i >= 0; i--) {
4507 v = ldub(ptr + i);
4508 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4509 }
4510 tmp = val;
4511 if (ldub(ptr + 9) & 0x80)
4512 tmp = -tmp;
4513 fpush();
4514 ST0 = tmp;
4515}
4516
4517void helper_fbst_ST0(target_ulong ptr)
4518{
4519 int v;
4520 target_ulong mem_ref, mem_end;
4521 int64_t val;
4522
4523 val = floatx_to_int64(ST0, &env->fp_status);
4524 mem_ref = ptr;
4525 mem_end = mem_ref + 9;
4526 if (val < 0) {
4527 stb(mem_end, 0x80);
4528 val = -val;
4529 } else {
4530 stb(mem_end, 0x00);
4531 }
4532 while (mem_ref < mem_end) {
4533 if (val == 0)
4534 break;
4535 v = val % 100;
4536 val = val / 100;
4537 v = ((v / 10) << 4) | (v % 10);
4538 stb(mem_ref++, v);
4539 }
4540 while (mem_ref < mem_end) {
4541 stb(mem_ref++, 0);
4542 }
4543}
4544
4545void helper_f2xm1(void)
4546{
4547 ST0 = pow(2.0,ST0) - 1.0;
4548}
4549
4550void helper_fyl2x(void)
4551{
4552 CPU86_LDouble fptemp;
4553
4554 fptemp = ST0;
4555 if (fptemp>0.0){
4556 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4557 ST1 *= fptemp;
4558 fpop();
4559 } else {
4560 env->fpus &= (~0x4700);
4561 env->fpus |= 0x400;
4562 }
4563}
4564
4565void helper_fptan(void)
4566{
4567 CPU86_LDouble fptemp;
4568
4569 fptemp = ST0;
4570 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4571 env->fpus |= 0x400;
4572 } else {
4573 ST0 = tan(fptemp);
4574 fpush();
4575 ST0 = 1.0;
4576 env->fpus &= (~0x400); /* C2 <-- 0 */
4577 /* the above code is for |arg| < 2**52 only */
4578 }
4579}
4580
4581void helper_fpatan(void)
4582{
4583 CPU86_LDouble fptemp, fpsrcop;
4584
4585 fpsrcop = ST1;
4586 fptemp = ST0;
4587 ST1 = atan2(fpsrcop,fptemp);
4588 fpop();
4589}
4590
4591void helper_fxtract(void)
4592{
4593 CPU86_LDoubleU temp;
4594 unsigned int expdif;
4595
4596 temp.d = ST0;
4597 expdif = EXPD(temp) - EXPBIAS;
4598 /*DP exponent bias*/
4599 ST0 = expdif;
4600 fpush();
4601 BIASEXPONENT(temp);
4602 ST0 = temp.d;
4603}
4604
4605#ifdef VBOX
4606#ifdef _MSC_VER
4607/* MSC cannot divide by zero */
4608extern double _Nan;
4609#define NaN _Nan
4610#else
4611#define NaN (0.0 / 0.0)
4612#endif
4613#endif /* VBOX */
4614
4615void helper_fprem1(void)
4616{
4617 CPU86_LDouble dblq, fpsrcop, fptemp;
4618 CPU86_LDoubleU fpsrcop1, fptemp1;
4619 int expdif;
4620 signed long long int q;
4621
4622#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4623 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4624#else
4625 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4626#endif
4627 ST0 = 0.0 / 0.0; /* NaN */
4628 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4629 return;
4630 }
4631
4632 fpsrcop = ST0;
4633 fptemp = ST1;
4634 fpsrcop1.d = fpsrcop;
4635 fptemp1.d = fptemp;
4636 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4637
4638 if (expdif < 0) {
4639 /* optimisation? taken from the AMD docs */
4640 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4641 /* ST0 is unchanged */
4642 return;
4643 }
4644
4645 if (expdif < 53) {
4646 dblq = fpsrcop / fptemp;
4647 /* round dblq towards nearest integer */
4648 dblq = rint(dblq);
4649 ST0 = fpsrcop - fptemp * dblq;
4650
4651 /* convert dblq to q by truncating towards zero */
4652 if (dblq < 0.0)
4653 q = (signed long long int)(-dblq);
4654 else
4655 q = (signed long long int)dblq;
4656
4657 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4658 /* (C0,C3,C1) <-- (q2,q1,q0) */
4659 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4660 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4661 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4662 } else {
4663 env->fpus |= 0x400; /* C2 <-- 1 */
4664 fptemp = pow(2.0, expdif - 50);
4665 fpsrcop = (ST0 / ST1) / fptemp;
4666 /* fpsrcop = integer obtained by chopping */
4667 fpsrcop = (fpsrcop < 0.0) ?
4668 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4669 ST0 -= (ST1 * fpsrcop * fptemp);
4670 }
4671}
4672
4673void helper_fprem(void)
4674{
4675 CPU86_LDouble dblq, fpsrcop, fptemp;
4676 CPU86_LDoubleU fpsrcop1, fptemp1;
4677 int expdif;
4678 signed long long int q;
4679
4680#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4681 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4682#else
4683 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4684#endif
4685 ST0 = 0.0 / 0.0; /* NaN */
4686 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4687 return;
4688 }
4689
4690 fpsrcop = (CPU86_LDouble)ST0;
4691 fptemp = (CPU86_LDouble)ST1;
4692 fpsrcop1.d = fpsrcop;
4693 fptemp1.d = fptemp;
4694 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4695
4696 if (expdif < 0) {
4697 /* optimisation? taken from the AMD docs */
4698 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4699 /* ST0 is unchanged */
4700 return;
4701 }
4702
4703 if ( expdif < 53 ) {
4704 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4705 /* round dblq towards zero */
4706 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4707 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4708
4709 /* convert dblq to q by truncating towards zero */
4710 if (dblq < 0.0)
4711 q = (signed long long int)(-dblq);
4712 else
4713 q = (signed long long int)dblq;
4714
4715 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4716 /* (C0,C3,C1) <-- (q2,q1,q0) */
4717 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4718 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4719 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4720 } else {
4721 int N = 32 + (expdif % 32); /* as per AMD docs */
4722 env->fpus |= 0x400; /* C2 <-- 1 */
4723 fptemp = pow(2.0, (double)(expdif - N));
4724 fpsrcop = (ST0 / ST1) / fptemp;
4725 /* fpsrcop = integer obtained by chopping */
4726 fpsrcop = (fpsrcop < 0.0) ?
4727 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4728 ST0 -= (ST1 * fpsrcop * fptemp);
4729 }
4730}
4731
4732void helper_fyl2xp1(void)
4733{
4734 CPU86_LDouble fptemp;
4735
4736 fptemp = ST0;
4737 if ((fptemp+1.0)>0.0) {
4738 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4739 ST1 *= fptemp;
4740 fpop();
4741 } else {
4742 env->fpus &= (~0x4700);
4743 env->fpus |= 0x400;
4744 }
4745}
4746
4747void helper_fsqrt(void)
4748{
4749 CPU86_LDouble fptemp;
4750
4751 fptemp = ST0;
4752 if (fptemp<0.0) {
4753 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4754 env->fpus |= 0x400;
4755 }
4756 ST0 = sqrt(fptemp);
4757}
4758
4759void helper_fsincos(void)
4760{
4761 CPU86_LDouble fptemp;
4762
4763 fptemp = ST0;
4764 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4765 env->fpus |= 0x400;
4766 } else {
4767 ST0 = sin(fptemp);
4768 fpush();
4769 ST0 = cos(fptemp);
4770 env->fpus &= (~0x400); /* C2 <-- 0 */
4771 /* the above code is for |arg| < 2**63 only */
4772 }
4773}
4774
4775void helper_frndint(void)
4776{
4777 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4778}
4779
4780void helper_fscale(void)
4781{
4782 ST0 = ldexp (ST0, (int)(ST1));
4783}
4784
4785void helper_fsin(void)
4786{
4787 CPU86_LDouble fptemp;
4788
4789 fptemp = ST0;
4790 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4791 env->fpus |= 0x400;
4792 } else {
4793 ST0 = sin(fptemp);
4794 env->fpus &= (~0x400); /* C2 <-- 0 */
4795 /* the above code is for |arg| < 2**53 only */
4796 }
4797}
4798
4799void helper_fcos(void)
4800{
4801 CPU86_LDouble fptemp;
4802
4803 fptemp = ST0;
4804 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4805 env->fpus |= 0x400;
4806 } else {
4807 ST0 = cos(fptemp);
4808 env->fpus &= (~0x400); /* C2 <-- 0 */
4809 /* the above code is for |arg5 < 2**63 only */
4810 }
4811}
4812
4813void helper_fxam_ST0(void)
4814{
4815 CPU86_LDoubleU temp;
4816 int expdif;
4817
4818 temp.d = ST0;
4819
4820 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4821 if (SIGND(temp))
4822 env->fpus |= 0x200; /* C1 <-- 1 */
4823
4824 /* XXX: test fptags too */
4825 expdif = EXPD(temp);
4826 if (expdif == MAXEXPD) {
4827#ifdef USE_X86LDOUBLE
4828 if (MANTD(temp) == 0x8000000000000000ULL)
4829#else
4830 if (MANTD(temp) == 0)
4831#endif
4832 env->fpus |= 0x500 /*Infinity*/;
4833 else
4834 env->fpus |= 0x100 /*NaN*/;
4835 } else if (expdif == 0) {
4836 if (MANTD(temp) == 0)
4837 env->fpus |= 0x4000 /*Zero*/;
4838 else
4839 env->fpus |= 0x4400 /*Denormal*/;
4840 } else {
4841 env->fpus |= 0x400;
4842 }
4843}
4844
4845void helper_fstenv(target_ulong ptr, int data32)
4846{
4847 int fpus, fptag, exp, i;
4848 uint64_t mant;
4849 CPU86_LDoubleU tmp;
4850
4851 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4852 fptag = 0;
4853 for (i=7; i>=0; i--) {
4854 fptag <<= 2;
4855 if (env->fptags[i]) {
4856 fptag |= 3;
4857 } else {
4858 tmp.d = env->fpregs[i].d;
4859 exp = EXPD(tmp);
4860 mant = MANTD(tmp);
4861 if (exp == 0 && mant == 0) {
4862 /* zero */
4863 fptag |= 1;
4864 } else if (exp == 0 || exp == MAXEXPD
4865#ifdef USE_X86LDOUBLE
4866 || (mant & (1LL << 63)) == 0
4867#endif
4868 ) {
4869 /* NaNs, infinity, denormal */
4870 fptag |= 2;
4871 }
4872 }
4873 }
4874 if (data32) {
4875 /* 32 bit */
4876 stl(ptr, env->fpuc);
4877 stl(ptr + 4, fpus);
4878 stl(ptr + 8, fptag);
4879 stl(ptr + 12, 0); /* fpip */
4880 stl(ptr + 16, 0); /* fpcs */
4881 stl(ptr + 20, 0); /* fpoo */
4882 stl(ptr + 24, 0); /* fpos */
4883 } else {
4884 /* 16 bit */
4885 stw(ptr, env->fpuc);
4886 stw(ptr + 2, fpus);
4887 stw(ptr + 4, fptag);
4888 stw(ptr + 6, 0);
4889 stw(ptr + 8, 0);
4890 stw(ptr + 10, 0);
4891 stw(ptr + 12, 0);
4892 }
4893}
4894
4895void helper_fldenv(target_ulong ptr, int data32)
4896{
4897 int i, fpus, fptag;
4898
4899 if (data32) {
4900 env->fpuc = lduw(ptr);
4901 fpus = lduw(ptr + 4);
4902 fptag = lduw(ptr + 8);
4903 }
4904 else {
4905 env->fpuc = lduw(ptr);
4906 fpus = lduw(ptr + 2);
4907 fptag = lduw(ptr + 4);
4908 }
4909 env->fpstt = (fpus >> 11) & 7;
4910 env->fpus = fpus & ~0x3800;
4911 for(i = 0;i < 8; i++) {
4912 env->fptags[i] = ((fptag & 3) == 3);
4913 fptag >>= 2;
4914 }
4915}
4916
4917void helper_fsave(target_ulong ptr, int data32)
4918{
4919 CPU86_LDouble tmp;
4920 int i;
4921
4922 helper_fstenv(ptr, data32);
4923
4924 ptr += (14 << data32);
4925 for(i = 0;i < 8; i++) {
4926 tmp = ST(i);
4927 helper_fstt(tmp, ptr);
4928 ptr += 10;
4929 }
4930
4931 /* fninit */
4932 env->fpus = 0;
4933 env->fpstt = 0;
4934 env->fpuc = 0x37f;
4935 env->fptags[0] = 1;
4936 env->fptags[1] = 1;
4937 env->fptags[2] = 1;
4938 env->fptags[3] = 1;
4939 env->fptags[4] = 1;
4940 env->fptags[5] = 1;
4941 env->fptags[6] = 1;
4942 env->fptags[7] = 1;
4943}
4944
4945void helper_frstor(target_ulong ptr, int data32)
4946{
4947 CPU86_LDouble tmp;
4948 int i;
4949
4950 helper_fldenv(ptr, data32);
4951 ptr += (14 << data32);
4952
4953 for(i = 0;i < 8; i++) {
4954 tmp = helper_fldt(ptr);
4955 ST(i) = tmp;
4956 ptr += 10;
4957 }
4958}
4959
4960void helper_fxsave(target_ulong ptr, int data64)
4961{
4962 int fpus, fptag, i, nb_xmm_regs;
4963 CPU86_LDouble tmp;
4964 target_ulong addr;
4965
4966 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4967 fptag = 0;
4968 for(i = 0; i < 8; i++) {
4969 fptag |= (env->fptags[i] << i);
4970 }
4971 stw(ptr, env->fpuc);
4972 stw(ptr + 2, fpus);
4973 stw(ptr + 4, fptag ^ 0xff);
4974#ifdef TARGET_X86_64
4975 if (data64) {
4976 stq(ptr + 0x08, 0); /* rip */
4977 stq(ptr + 0x10, 0); /* rdp */
4978 } else
4979#endif
4980 {
4981 stl(ptr + 0x08, 0); /* eip */
4982 stl(ptr + 0x0c, 0); /* sel */
4983 stl(ptr + 0x10, 0); /* dp */
4984 stl(ptr + 0x14, 0); /* sel */
4985 }
4986
4987 addr = ptr + 0x20;
4988 for(i = 0;i < 8; i++) {
4989 tmp = ST(i);
4990 helper_fstt(tmp, addr);
4991 addr += 16;
4992 }
4993
4994 if (env->cr[4] & CR4_OSFXSR_MASK) {
4995 /* XXX: finish it */
4996 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4997 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4998 if (env->hflags & HF_CS64_MASK)
4999 nb_xmm_regs = 16;
5000 else
5001 nb_xmm_regs = 8;
5002 addr = ptr + 0xa0;
5003 for(i = 0; i < nb_xmm_regs; i++) {
5004 stq(addr, env->xmm_regs[i].XMM_Q(0));
5005 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5006 addr += 16;
5007 }
5008 }
5009}
5010
5011void helper_fxrstor(target_ulong ptr, int data64)
5012{
5013 int i, fpus, fptag, nb_xmm_regs;
5014 CPU86_LDouble tmp;
5015 target_ulong addr;
5016
5017 env->fpuc = lduw(ptr);
5018 fpus = lduw(ptr + 2);
5019 fptag = lduw(ptr + 4);
5020 env->fpstt = (fpus >> 11) & 7;
5021 env->fpus = fpus & ~0x3800;
5022 fptag ^= 0xff;
5023 for(i = 0;i < 8; i++) {
5024 env->fptags[i] = ((fptag >> i) & 1);
5025 }
5026
5027 addr = ptr + 0x20;
5028 for(i = 0;i < 8; i++) {
5029 tmp = helper_fldt(addr);
5030 ST(i) = tmp;
5031 addr += 16;
5032 }
5033
5034 if (env->cr[4] & CR4_OSFXSR_MASK) {
5035 /* XXX: finish it */
5036 env->mxcsr = ldl(ptr + 0x18);
5037 //ldl(ptr + 0x1c);
5038 if (env->hflags & HF_CS64_MASK)
5039 nb_xmm_regs = 16;
5040 else
5041 nb_xmm_regs = 8;
5042 addr = ptr + 0xa0;
5043 for(i = 0; i < nb_xmm_regs; i++) {
5044#if !defined(VBOX) || __GNUC__ < 4
5045 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5046 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5047#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5048# if 1
5049 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5050 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5051 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5052 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5053# else
5054 /* this works fine on Mac OS X, gcc 4.0.1 */
5055 uint64_t u64 = ldq(addr);
5056 env->xmm_regs[i].XMM_Q(0);
5057 u64 = ldq(addr + 4);
5058 env->xmm_regs[i].XMM_Q(1) = u64;
5059# endif
5060#endif
5061 addr += 16;
5062 }
5063 }
5064}
5065
5066#ifndef USE_X86LDOUBLE
5067
5068void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5069{
5070 CPU86_LDoubleU temp;
5071 int e;
5072
5073 temp.d = f;
5074 /* mantissa */
5075 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5076 /* exponent + sign */
5077 e = EXPD(temp) - EXPBIAS + 16383;
5078 e |= SIGND(temp) >> 16;
5079 *pexp = e;
5080}
5081
5082CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5083{
5084 CPU86_LDoubleU temp;
5085 int e;
5086 uint64_t ll;
5087
5088 /* XXX: handle overflow ? */
5089 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5090 e |= (upper >> 4) & 0x800; /* sign */
5091 ll = (mant >> 11) & ((1LL << 52) - 1);
5092#ifdef __arm__
5093 temp.l.upper = (e << 20) | (ll >> 32);
5094 temp.l.lower = ll;
5095#else
5096 temp.ll = ll | ((uint64_t)e << 52);
5097#endif
5098 return temp.d;
5099}
5100
5101#else
5102
5103void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5104{
5105 CPU86_LDoubleU temp;
5106
5107 temp.d = f;
5108 *pmant = temp.l.lower;
5109 *pexp = temp.l.upper;
5110}
5111
5112CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5113{
5114 CPU86_LDoubleU temp;
5115
5116 temp.l.upper = upper;
5117 temp.l.lower = mant;
5118 return temp.d;
5119}
5120#endif
5121
5122#ifdef TARGET_X86_64
5123
5124//#define DEBUG_MULDIV
5125
5126static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5127{
5128 *plow += a;
5129 /* carry test */
5130 if (*plow < a)
5131 (*phigh)++;
5132 *phigh += b;
5133}
5134
5135static void neg128(uint64_t *plow, uint64_t *phigh)
5136{
5137 *plow = ~ *plow;
5138 *phigh = ~ *phigh;
5139 add128(plow, phigh, 1, 0);
5140}
5141
5142/* return TRUE if overflow */
5143static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5144{
5145 uint64_t q, r, a1, a0;
5146 int i, qb, ab;
5147
5148 a0 = *plow;
5149 a1 = *phigh;
5150 if (a1 == 0) {
5151 q = a0 / b;
5152 r = a0 % b;
5153 *plow = q;
5154 *phigh = r;
5155 } else {
5156 if (a1 >= b)
5157 return 1;
5158 /* XXX: use a better algorithm */
5159 for(i = 0; i < 64; i++) {
5160 ab = a1 >> 63;
5161 a1 = (a1 << 1) | (a0 >> 63);
5162 if (ab || a1 >= b) {
5163 a1 -= b;
5164 qb = 1;
5165 } else {
5166 qb = 0;
5167 }
5168 a0 = (a0 << 1) | qb;
5169 }
5170#if defined(DEBUG_MULDIV)
5171 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5172 *phigh, *plow, b, a0, a1);
5173#endif
5174 *plow = a0;
5175 *phigh = a1;
5176 }
5177 return 0;
5178}
5179
5180/* return TRUE if overflow */
5181static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5182{
5183 int sa, sb;
5184 sa = ((int64_t)*phigh < 0);
5185 if (sa)
5186 neg128(plow, phigh);
5187 sb = (b < 0);
5188 if (sb)
5189 b = -b;
5190 if (div64(plow, phigh, b) != 0)
5191 return 1;
5192 if (sa ^ sb) {
5193 if (*plow > (1ULL << 63))
5194 return 1;
5195 *plow = - *plow;
5196 } else {
5197 if (*plow >= (1ULL << 63))
5198 return 1;
5199 }
5200 if (sa)
5201 *phigh = - *phigh;
5202 return 0;
5203}
5204
5205void helper_mulq_EAX_T0(target_ulong t0)
5206{
5207 uint64_t r0, r1;
5208
5209 mulu64(&r0, &r1, EAX, t0);
5210 EAX = r0;
5211 EDX = r1;
5212 CC_DST = r0;
5213 CC_SRC = r1;
5214}
5215
5216void helper_imulq_EAX_T0(target_ulong t0)
5217{
5218 uint64_t r0, r1;
5219
5220 muls64(&r0, &r1, EAX, t0);
5221 EAX = r0;
5222 EDX = r1;
5223 CC_DST = r0;
5224 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5225}
5226
5227target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5228{
5229 uint64_t r0, r1;
5230
5231 muls64(&r0, &r1, t0, t1);
5232 CC_DST = r0;
5233 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5234 return r0;
5235}
5236
5237void helper_divq_EAX(target_ulong t0)
5238{
5239 uint64_t r0, r1;
5240 if (t0 == 0) {
5241 raise_exception(EXCP00_DIVZ);
5242 }
5243 r0 = EAX;
5244 r1 = EDX;
5245 if (div64(&r0, &r1, t0))
5246 raise_exception(EXCP00_DIVZ);
5247 EAX = r0;
5248 EDX = r1;
5249}
5250
5251void helper_idivq_EAX(target_ulong t0)
5252{
5253 uint64_t r0, r1;
5254 if (t0 == 0) {
5255 raise_exception(EXCP00_DIVZ);
5256 }
5257 r0 = EAX;
5258 r1 = EDX;
5259 if (idiv64(&r0, &r1, t0))
5260 raise_exception(EXCP00_DIVZ);
5261 EAX = r0;
5262 EDX = r1;
5263}
5264#endif
5265
5266static void do_hlt(void)
5267{
5268 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5269 env->halted = 1;
5270 env->exception_index = EXCP_HLT;
5271 cpu_loop_exit();
5272}
5273
5274void helper_hlt(int next_eip_addend)
5275{
5276 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5277 EIP += next_eip_addend;
5278
5279 do_hlt();
5280}
5281
5282void helper_monitor(target_ulong ptr)
5283{
5284 if ((uint32_t)ECX != 0)
5285 raise_exception(EXCP0D_GPF);
5286 /* XXX: store address ? */
5287 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5288}
5289
5290void helper_mwait(int next_eip_addend)
5291{
5292 if ((uint32_t)ECX != 0)
5293 raise_exception(EXCP0D_GPF);
5294#ifdef VBOX
5295 helper_hlt(next_eip_addend);
5296#else
5297 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5298 EIP += next_eip_addend;
5299
5300 /* XXX: not complete but not completely erroneous */
5301 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5302 /* more than one CPU: do not sleep because another CPU may
5303 wake this one */
5304 } else {
5305 do_hlt();
5306 }
5307#endif
5308}
5309
5310void helper_debug(void)
5311{
5312 env->exception_index = EXCP_DEBUG;
5313 cpu_loop_exit();
5314}
5315
5316void helper_raise_interrupt(int intno, int next_eip_addend)
5317{
5318 raise_interrupt(intno, 1, 0, next_eip_addend);
5319}
5320
5321void helper_raise_exception(int exception_index)
5322{
5323 raise_exception(exception_index);
5324}
5325
5326void helper_cli(void)
5327{
5328 env->eflags &= ~IF_MASK;
5329}
5330
5331void helper_sti(void)
5332{
5333 env->eflags |= IF_MASK;
5334}
5335
5336#ifdef VBOX
5337void helper_cli_vme(void)
5338{
5339 env->eflags &= ~IF_MASK;
5340}
5341
5342void helper_sti_vme(void)
5343{
5344 /* First check, then change eflags according to the AMD manual */
5345 if (env->eflags & VIP_MASK) {
5346 raise_exception(EXCP0D_GPF);
5347 }
5348 env->eflags |= IF_MASK;
5349}
5350#endif
5351
5352#if 0
5353/* vm86plus instructions */
5354void helper_cli_vm(void)
5355{
5356 env->eflags &= ~VIF_MASK;
5357}
5358
5359void helper_sti_vm(void)
5360{
5361 env->eflags |= VIF_MASK;
5362 if (env->eflags & VIP_MASK) {
5363 raise_exception(EXCP0D_GPF);
5364 }
5365}
5366#endif
5367
5368void helper_set_inhibit_irq(void)
5369{
5370 env->hflags |= HF_INHIBIT_IRQ_MASK;
5371}
5372
5373void helper_reset_inhibit_irq(void)
5374{
5375 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5376}
5377
5378void helper_boundw(target_ulong a0, int v)
5379{
5380 int low, high;
5381 low = ldsw(a0);
5382 high = ldsw(a0 + 2);
5383 v = (int16_t)v;
5384 if (v < low || v > high) {
5385 raise_exception(EXCP05_BOUND);
5386 }
5387 FORCE_RET();
5388}
5389
5390void helper_boundl(target_ulong a0, int v)
5391{
5392 int low, high;
5393 low = ldl(a0);
5394 high = ldl(a0 + 4);
5395 if (v < low || v > high) {
5396 raise_exception(EXCP05_BOUND);
5397 }
5398 FORCE_RET();
5399}
5400
5401static float approx_rsqrt(float a)
5402{
5403 return 1.0 / sqrt(a);
5404}
5405
5406static float approx_rcp(float a)
5407{
5408 return 1.0 / a;
5409}
5410
5411#if !defined(CONFIG_USER_ONLY)
5412
5413#define MMUSUFFIX _mmu
5414
5415#define SHIFT 0
5416#include "softmmu_template.h"
5417
5418#define SHIFT 1
5419#include "softmmu_template.h"
5420
5421#define SHIFT 2
5422#include "softmmu_template.h"
5423
5424#define SHIFT 3
5425#include "softmmu_template.h"
5426
5427#endif
5428
5429/* try to fill the TLB and return an exception if error. If retaddr is
5430 NULL, it means that the function was called in C code (i.e. not
5431 from generated code or from helper.c) */
5432/* XXX: fix it to restore all registers */
5433void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5434{
5435 TranslationBlock *tb;
5436 int ret;
5437 unsigned long pc;
5438 CPUX86State *saved_env;
5439
5440 /* XXX: hack to restore env in all cases, even if not called from
5441 generated code */
5442 saved_env = env;
5443 env = cpu_single_env;
5444
5445 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5446 if (ret) {
5447 if (retaddr) {
5448 /* now we have a real cpu fault */
5449 pc = (unsigned long)retaddr;
5450 tb = tb_find_pc(pc);
5451 if (tb) {
5452 /* the PC is inside the translated code. It means that we have
5453 a virtual CPU fault */
5454 cpu_restore_state(tb, env, pc, NULL);
5455 }
5456 }
5457 raise_exception_err(env->exception_index, env->error_code);
5458 }
5459 env = saved_env;
5460}
5461
5462#ifdef VBOX
5463
5464/**
5465 * Correctly computes the eflags.
5466 * @returns eflags.
5467 * @param env1 CPU environment.
5468 */
5469uint32_t raw_compute_eflags(CPUX86State *env1)
5470{
5471 CPUX86State *savedenv = env;
5472 uint32_t efl;
5473 env = env1;
5474 efl = compute_eflags();
5475 env = savedenv;
5476 return efl;
5477}
5478
5479/**
5480 * Reads byte from virtual address in guest memory area.
5481 * XXX: is it working for any addresses? swapped out pages?
5482 * @returns readed data byte.
5483 * @param env1 CPU environment.
5484 * @param pvAddr GC Virtual address.
5485 */
5486uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5487{
5488 CPUX86State *savedenv = env;
5489 uint8_t u8;
5490 env = env1;
5491 u8 = ldub_kernel(addr);
5492 env = savedenv;
5493 return u8;
5494}
5495
5496/**
5497 * Reads byte from virtual address in guest memory area.
5498 * XXX: is it working for any addresses? swapped out pages?
5499 * @returns readed data byte.
5500 * @param env1 CPU environment.
5501 * @param pvAddr GC Virtual address.
5502 */
5503uint16_t read_word(CPUX86State *env1, target_ulong addr)
5504{
5505 CPUX86State *savedenv = env;
5506 uint16_t u16;
5507 env = env1;
5508 u16 = lduw_kernel(addr);
5509 env = savedenv;
5510 return u16;
5511}
5512
5513/**
5514 * Reads byte from virtual address in guest memory area.
5515 * XXX: is it working for any addresses? swapped out pages?
5516 * @returns readed data byte.
5517 * @param env1 CPU environment.
5518 * @param pvAddr GC Virtual address.
5519 */
5520uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5521{
5522 CPUX86State *savedenv = env;
5523 uint32_t u32;
5524 env = env1;
5525 u32 = ldl_kernel(addr);
5526 env = savedenv;
5527 return u32;
5528}
5529
5530/**
5531 * Writes byte to virtual address in guest memory area.
5532 * XXX: is it working for any addresses? swapped out pages?
5533 * @returns readed data byte.
5534 * @param env1 CPU environment.
5535 * @param pvAddr GC Virtual address.
5536 * @param val byte value
5537 */
5538void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5539{
5540 CPUX86State *savedenv = env;
5541 env = env1;
5542 stb(addr, val);
5543 env = savedenv;
5544}
5545
5546void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5547{
5548 CPUX86State *savedenv = env;
5549 env = env1;
5550 stw(addr, val);
5551 env = savedenv;
5552}
5553
5554void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5555{
5556 CPUX86State *savedenv = env;
5557 env = env1;
5558 stl(addr, val);
5559 env = savedenv;
5560}
5561
5562/**
5563 * Correctly loads selector into segment register with updating internal
5564 * qemu data/caches.
5565 * @param env1 CPU environment.
5566 * @param seg_reg Segment register.
5567 * @param selector Selector to load.
5568 */
5569void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5570{
5571 CPUX86State *savedenv = env;
5572 jmp_buf old_buf;
5573
5574 env = env1;
5575
5576 if ( env->eflags & X86_EFL_VM
5577 || !(env->cr[0] & X86_CR0_PE))
5578 {
5579 load_seg_vm(seg_reg, selector);
5580
5581 env = savedenv;
5582
5583 /* Successful sync. */
5584 env1->segs[seg_reg].newselector = 0;
5585 }
5586 else
5587 {
5588 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5589 time critical - let's not do that */
5590#if 0
5591 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5592#endif
5593 if (setjmp(env1->jmp_env) == 0)
5594 {
5595 if (seg_reg == R_CS)
5596 {
5597 uint32_t e1, e2;
5598 e1 = e2 = 0;
5599 load_segment(&e1, &e2, selector);
5600 cpu_x86_load_seg_cache(env, R_CS, selector,
5601 get_seg_base(e1, e2),
5602 get_seg_limit(e1, e2),
5603 e2);
5604 }
5605 else
5606 tss_load_seg(seg_reg, selector);
5607 env = savedenv;
5608
5609 /* Successful sync. */
5610 env1->segs[seg_reg].newselector = 0;
5611 }
5612 else
5613 {
5614 env = savedenv;
5615
5616 /* Postpone sync until the guest uses the selector. */
5617 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5618 env1->segs[seg_reg].newselector = selector;
5619 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5620 env1->exception_index = -1;
5621 env1->error_code = 0;
5622 env1->old_exception = -1;
5623 }
5624#if 0
5625 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5626#endif
5627 }
5628
5629}
5630
5631DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5632{
5633 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5634}
5635
5636
5637int emulate_single_instr(CPUX86State *env1)
5638{
5639 TranslationBlock *tb;
5640 TranslationBlock *current;
5641 int flags;
5642 uint8_t *tc_ptr;
5643 target_ulong old_eip;
5644
5645 /* ensures env is loaded in ebp! */
5646 CPUX86State *savedenv = env;
5647 env = env1;
5648
5649 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5650
5651 current = env->current_tb;
5652 env->current_tb = NULL;
5653 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5654
5655 /*
5656 * Translate only one instruction.
5657 */
5658 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5659 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5660 env->segs[R_CS].base, flags, 0);
5661
5662 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5663
5664
5665 /* tb_link_phys: */
5666 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5667 tb->jmp_next[0] = NULL;
5668 tb->jmp_next[1] = NULL;
5669 Assert(tb->jmp_next[0] == NULL);
5670 Assert(tb->jmp_next[1] == NULL);
5671 if (tb->tb_next_offset[0] != 0xffff)
5672 tb_reset_jump(tb, 0);
5673 if (tb->tb_next_offset[1] != 0xffff)
5674 tb_reset_jump(tb, 1);
5675
5676 /*
5677 * Execute it using emulation
5678 */
5679 old_eip = env->eip;
5680 env->current_tb = tb;
5681
5682 /*
5683 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5684 * perhaps not a very safe hack
5685 */
5686 while(old_eip == env->eip)
5687 {
5688 tc_ptr = tb->tc_ptr;
5689
5690#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5691 int fake_ret;
5692 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5693#else
5694 tcg_qemu_tb_exec(tc_ptr);
5695#endif
5696 /*
5697 * Exit once we detect an external interrupt and interrupts are enabled
5698 */
5699 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5700 ( (env->eflags & IF_MASK) &&
5701 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5702 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5703 {
5704 break;
5705 }
5706 }
5707 env->current_tb = current;
5708
5709 tb_phys_invalidate(tb, -1);
5710 tb_free(tb);
5711/*
5712 Assert(tb->tb_next_offset[0] == 0xffff);
5713 Assert(tb->tb_next_offset[1] == 0xffff);
5714 Assert(tb->tb_next[0] == 0xffff);
5715 Assert(tb->tb_next[1] == 0xffff);
5716 Assert(tb->jmp_next[0] == NULL);
5717 Assert(tb->jmp_next[1] == NULL);
5718 Assert(tb->jmp_first == NULL); */
5719
5720 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5721
5722 /*
5723 * Execute the next instruction when we encounter instruction fusing.
5724 */
5725 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5726 {
5727 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5728 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5729 emulate_single_instr(env);
5730 }
5731
5732 env = savedenv;
5733 return 0;
5734}
5735
5736/**
5737 * Correctly loads a new ldtr selector.
5738 *
5739 * @param env1 CPU environment.
5740 * @param selector Selector to load.
5741 */
5742void sync_ldtr(CPUX86State *env1, int selector)
5743{
5744 CPUX86State *saved_env = env;
5745 if (setjmp(env1->jmp_env) == 0)
5746 {
5747 env = env1;
5748 helper_lldt(selector);
5749 env = saved_env;
5750 }
5751 else
5752 {
5753 env = saved_env;
5754#ifdef VBOX_STRICT
5755 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5756#endif
5757 }
5758}
5759
5760/**
5761 * Correctly loads a new tr selector.
5762 *
5763 * @param env1 CPU environment.
5764 * @param selector Selector to load.
5765 */
5766int sync_tr(CPUX86State *env1, int selector)
5767{
5768 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
5769 SegmentCache *dt;
5770 uint32_t e1, e2;
5771 int index, type, entry_limit;
5772 target_ulong ptr;
5773 CPUX86State *saved_env = env;
5774 env = env1;
5775
5776 selector &= 0xffff;
5777 if ((selector & 0xfffc) == 0) {
5778 /* NULL selector case: invalid TR */
5779 env->tr.base = 0;
5780 env->tr.limit = 0;
5781 env->tr.flags = 0;
5782 } else {
5783 if (selector & 0x4)
5784 goto l_failure;
5785 dt = &env->gdt;
5786 index = selector & ~7;
5787#ifdef TARGET_X86_64
5788 if (env->hflags & HF_LMA_MASK)
5789 entry_limit = 15;
5790 else
5791#endif
5792 entry_limit = 7;
5793 if ((index + entry_limit) > dt->limit)
5794 goto l_failure;
5795 ptr = dt->base + index;
5796 e1 = ldl_kernel(ptr);
5797 e2 = ldl_kernel(ptr + 4);
5798 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5799 if ((e2 & DESC_S_MASK) /*||
5800 (type != 1 && type != 9)*/)
5801 goto l_failure;
5802 if (!(e2 & DESC_P_MASK))
5803 goto l_failure;
5804#ifdef TARGET_X86_64
5805 if (env->hflags & HF_LMA_MASK) {
5806 uint32_t e3;
5807 e3 = ldl_kernel(ptr + 8);
5808 load_seg_cache_raw_dt(&env->tr, e1, e2);
5809 env->tr.base |= (target_ulong)e3 << 32;
5810 } else
5811#endif
5812 {
5813 load_seg_cache_raw_dt(&env->tr, e1, e2);
5814 }
5815 e2 |= DESC_TSS_BUSY_MASK;
5816 stl_kernel(ptr + 4, e2);
5817 }
5818 env->tr.selector = selector;
5819
5820 env = saved_env;
5821 return 0;
5822l_failure:
5823 AssertMsgFailed(("selector=%d\n", selector));
5824 return -1;
5825}
5826
5827
5828int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5829 uint32_t *esp_ptr, int dpl)
5830{
5831 int type, index, shift;
5832
5833 CPUX86State *savedenv = env;
5834 env = env1;
5835
5836 if (!(env->tr.flags & DESC_P_MASK))
5837 cpu_abort(env, "invalid tss");
5838 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5839 if ((type & 7) != 1)
5840 cpu_abort(env, "invalid tss type %d", type);
5841 shift = type >> 3;
5842 index = (dpl * 4 + 2) << shift;
5843 if (index + (4 << shift) - 1 > env->tr.limit)
5844 {
5845 env = savedenv;
5846 return 0;
5847 }
5848 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5849
5850 if (shift == 0) {
5851 *esp_ptr = lduw_kernel(env->tr.base + index);
5852 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5853 } else {
5854 *esp_ptr = ldl_kernel(env->tr.base + index);
5855 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5856 }
5857
5858 env = savedenv;
5859 return 1;
5860}
5861
5862//*****************************************************************************
5863// Needs to be at the bottom of the file (overriding macros)
5864
5865#ifndef VBOX
5866static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5867#else /* VBOX */
5868DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5869#endif /* VBOX */
5870{
5871 return *(CPU86_LDouble *)ptr;
5872}
5873
5874#ifndef VBOX
5875static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5876#else /* VBOX */
5877DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5878#endif /* VBOX */
5879{
5880 *(CPU86_LDouble *)ptr = f;
5881}
5882
5883#undef stw
5884#undef stl
5885#undef stq
5886#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5887#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5888#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5889#define data64 0
5890
5891//*****************************************************************************
5892void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5893{
5894 int fpus, fptag, i, nb_xmm_regs;
5895 CPU86_LDouble tmp;
5896 uint8_t *addr;
5897
5898 if (env->cpuid_features & CPUID_FXSR)
5899 {
5900 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5901 fptag = 0;
5902 for(i = 0; i < 8; i++) {
5903 fptag |= (env->fptags[i] << i);
5904 }
5905 stw(ptr, env->fpuc);
5906 stw(ptr + 2, fpus);
5907 stw(ptr + 4, fptag ^ 0xff);
5908
5909 addr = ptr + 0x20;
5910 for(i = 0;i < 8; i++) {
5911 tmp = ST(i);
5912 helper_fstt_raw(tmp, addr);
5913 addr += 16;
5914 }
5915
5916 if (env->cr[4] & CR4_OSFXSR_MASK) {
5917 /* XXX: finish it */
5918 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5919 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5920 nb_xmm_regs = 8 << data64;
5921 addr = ptr + 0xa0;
5922 for(i = 0; i < nb_xmm_regs; i++) {
5923#if __GNUC__ < 4
5924 stq(addr, env->xmm_regs[i].XMM_Q(0));
5925 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5926#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5927 stl(addr, env->xmm_regs[i].XMM_L(0));
5928 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5929 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5930 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5931#endif
5932 addr += 16;
5933 }
5934 }
5935 }
5936 else
5937 {
5938 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5939 int fptag;
5940
5941 fp->FCW = env->fpuc;
5942 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5943 fptag = 0;
5944 for (i=7; i>=0; i--) {
5945 fptag <<= 2;
5946 if (env->fptags[i]) {
5947 fptag |= 3;
5948 } else {
5949 /* the FPU automatically computes it */
5950 }
5951 }
5952 fp->FTW = fptag;
5953
5954 for(i = 0;i < 8; i++) {
5955 tmp = ST(i);
5956 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
5957 }
5958 }
5959}
5960
5961//*****************************************************************************
5962#undef lduw
5963#undef ldl
5964#undef ldq
5965#define lduw(a) *(uint16_t *)(a)
5966#define ldl(a) *(uint32_t *)(a)
5967#define ldq(a) *(uint64_t *)(a)
5968//*****************************************************************************
5969void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5970{
5971 int i, fpus, fptag, nb_xmm_regs;
5972 CPU86_LDouble tmp;
5973 uint8_t *addr;
5974
5975 if (env->cpuid_features & CPUID_FXSR)
5976 {
5977 env->fpuc = lduw(ptr);
5978 fpus = lduw(ptr + 2);
5979 fptag = lduw(ptr + 4);
5980 env->fpstt = (fpus >> 11) & 7;
5981 env->fpus = fpus & ~0x3800;
5982 fptag ^= 0xff;
5983 for(i = 0;i < 8; i++) {
5984 env->fptags[i] = ((fptag >> i) & 1);
5985 }
5986
5987 addr = ptr + 0x20;
5988 for(i = 0;i < 8; i++) {
5989 tmp = helper_fldt_raw(addr);
5990 ST(i) = tmp;
5991 addr += 16;
5992 }
5993
5994 if (env->cr[4] & CR4_OSFXSR_MASK) {
5995 /* XXX: finish it, endianness */
5996 env->mxcsr = ldl(ptr + 0x18);
5997 //ldl(ptr + 0x1c);
5998 nb_xmm_regs = 8 << data64;
5999 addr = ptr + 0xa0;
6000 for(i = 0; i < nb_xmm_regs; i++) {
6001#if HC_ARCH_BITS == 32
6002 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6003 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6004 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6005 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6006 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6007#else
6008 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6009 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6010#endif
6011 addr += 16;
6012 }
6013 }
6014 }
6015 else
6016 {
6017 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6018 int fptag, j;
6019
6020 env->fpuc = fp->FCW;
6021 env->fpstt = (fp->FSW >> 11) & 7;
6022 env->fpus = fp->FSW & ~0x3800;
6023 fptag = fp->FTW;
6024 for(i = 0;i < 8; i++) {
6025 env->fptags[i] = ((fptag & 3) == 3);
6026 fptag >>= 2;
6027 }
6028 j = env->fpstt;
6029 for(i = 0;i < 8; i++) {
6030 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6031 ST(i) = tmp;
6032 }
6033 }
6034}
6035//*****************************************************************************
6036//*****************************************************************************
6037
6038#endif /* VBOX */
6039
6040/* Secure Virtual Machine helpers */
6041
6042#if defined(CONFIG_USER_ONLY)
6043
6044void helper_vmrun(int aflag, int next_eip_addend)
6045{
6046}
6047void helper_vmmcall(void)
6048{
6049}
6050void helper_vmload(int aflag)
6051{
6052}
6053void helper_vmsave(int aflag)
6054{
6055}
6056void helper_stgi(void)
6057{
6058}
6059void helper_clgi(void)
6060{
6061}
6062void helper_skinit(void)
6063{
6064}
6065void helper_invlpga(int aflag)
6066{
6067}
6068void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6069{
6070}
6071void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6072{
6073}
6074
6075void helper_svm_check_io(uint32_t port, uint32_t param,
6076 uint32_t next_eip_addend)
6077{
6078}
6079#else
6080
6081#ifndef VBOX
6082static inline void svm_save_seg(target_phys_addr_t addr,
6083#else /* VBOX */
6084DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6085#endif /* VBOX */
6086 const SegmentCache *sc)
6087{
6088 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6089 sc->selector);
6090 stq_phys(addr + offsetof(struct vmcb_seg, base),
6091 sc->base);
6092 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6093 sc->limit);
6094 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6095 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6096}
6097
6098#ifndef VBOX
6099static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6100#else /* VBOX */
6101DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6102#endif /* VBOX */
6103{
6104 unsigned int flags;
6105
6106 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6107 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6108 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6109 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6110 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6111}
6112
6113#ifndef VBOX
6114static inline void svm_load_seg_cache(target_phys_addr_t addr,
6115#else /* VBOX */
6116DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6117#endif /* VBOX */
6118 CPUState *env, int seg_reg)
6119{
6120 SegmentCache sc1, *sc = &sc1;
6121 svm_load_seg(addr, sc);
6122 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6123 sc->base, sc->limit, sc->flags);
6124}
6125
6126void helper_vmrun(int aflag, int next_eip_addend)
6127{
6128 target_ulong addr;
6129 uint32_t event_inj;
6130 uint32_t int_ctl;
6131
6132 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6133
6134 if (aflag == 2)
6135 addr = EAX;
6136 else
6137 addr = (uint32_t)EAX;
6138
6139 if (loglevel & CPU_LOG_TB_IN_ASM)
6140 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6141
6142 env->vm_vmcb = addr;
6143
6144 /* save the current CPU state in the hsave page */
6145 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6146 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6147
6148 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6149 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6150
6151 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6152 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6153 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6154 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6155 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6156 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6157
6158 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6159 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6160
6161 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6162 &env->segs[R_ES]);
6163 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6164 &env->segs[R_CS]);
6165 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6166 &env->segs[R_SS]);
6167 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6168 &env->segs[R_DS]);
6169
6170 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6171 EIP + next_eip_addend);
6172 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6173 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6174
6175 /* load the interception bitmaps so we do not need to access the
6176 vmcb in svm mode */
6177 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6178 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6179 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6180 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6181 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6182 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6183
6184 /* enable intercepts */
6185 env->hflags |= HF_SVMI_MASK;
6186
6187 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6188
6189 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6190 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6191
6192 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6193 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6194
6195 /* clear exit_info_2 so we behave like the real hardware */
6196 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6197
6198 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6199 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6200 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6201 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6202 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6203 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6204 if (int_ctl & V_INTR_MASKING_MASK) {
6205 env->v_tpr = int_ctl & V_TPR_MASK;
6206 env->hflags2 |= HF2_VINTR_MASK;
6207 if (env->eflags & IF_MASK)
6208 env->hflags2 |= HF2_HIF_MASK;
6209 }
6210
6211 cpu_load_efer(env,
6212 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6213 env->eflags = 0;
6214 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6215 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6216 CC_OP = CC_OP_EFLAGS;
6217
6218 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6219 env, R_ES);
6220 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6221 env, R_CS);
6222 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6223 env, R_SS);
6224 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6225 env, R_DS);
6226
6227 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6228 env->eip = EIP;
6229 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6230 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6231 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6232 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6233 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6234
6235 /* FIXME: guest state consistency checks */
6236
6237 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6238 case TLB_CONTROL_DO_NOTHING:
6239 break;
6240 case TLB_CONTROL_FLUSH_ALL_ASID:
6241 /* FIXME: this is not 100% correct but should work for now */
6242 tlb_flush(env, 1);
6243 break;
6244 }
6245
6246 env->hflags2 |= HF2_GIF_MASK;
6247
6248 if (int_ctl & V_IRQ_MASK) {
6249 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6250 }
6251
6252 /* maybe we need to inject an event */
6253 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6254 if (event_inj & SVM_EVTINJ_VALID) {
6255 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6256 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6257 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6258 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6259
6260 if (loglevel & CPU_LOG_TB_IN_ASM)
6261 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6262 /* FIXME: need to implement valid_err */
6263 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6264 case SVM_EVTINJ_TYPE_INTR:
6265 env->exception_index = vector;
6266 env->error_code = event_inj_err;
6267 env->exception_is_int = 0;
6268 env->exception_next_eip = -1;
6269 if (loglevel & CPU_LOG_TB_IN_ASM)
6270 fprintf(logfile, "INTR");
6271 /* XXX: is it always correct ? */
6272 do_interrupt(vector, 0, 0, 0, 1);
6273 break;
6274 case SVM_EVTINJ_TYPE_NMI:
6275 env->exception_index = EXCP02_NMI;
6276 env->error_code = event_inj_err;
6277 env->exception_is_int = 0;
6278 env->exception_next_eip = EIP;
6279 if (loglevel & CPU_LOG_TB_IN_ASM)
6280 fprintf(logfile, "NMI");
6281 cpu_loop_exit();
6282 break;
6283 case SVM_EVTINJ_TYPE_EXEPT:
6284 env->exception_index = vector;
6285 env->error_code = event_inj_err;
6286 env->exception_is_int = 0;
6287 env->exception_next_eip = -1;
6288 if (loglevel & CPU_LOG_TB_IN_ASM)
6289 fprintf(logfile, "EXEPT");
6290 cpu_loop_exit();
6291 break;
6292 case SVM_EVTINJ_TYPE_SOFT:
6293 env->exception_index = vector;
6294 env->error_code = event_inj_err;
6295 env->exception_is_int = 1;
6296 env->exception_next_eip = EIP;
6297 if (loglevel & CPU_LOG_TB_IN_ASM)
6298 fprintf(logfile, "SOFT");
6299 cpu_loop_exit();
6300 break;
6301 }
6302 if (loglevel & CPU_LOG_TB_IN_ASM)
6303 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6304 }
6305}
6306
6307void helper_vmmcall(void)
6308{
6309 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6310 raise_exception(EXCP06_ILLOP);
6311}
6312
6313void helper_vmload(int aflag)
6314{
6315 target_ulong addr;
6316 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6317
6318 if (aflag == 2)
6319 addr = EAX;
6320 else
6321 addr = (uint32_t)EAX;
6322
6323 if (loglevel & CPU_LOG_TB_IN_ASM)
6324 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6325 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6326 env->segs[R_FS].base);
6327
6328 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6329 env, R_FS);
6330 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6331 env, R_GS);
6332 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6333 &env->tr);
6334 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6335 &env->ldt);
6336
6337#ifdef TARGET_X86_64
6338 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6339 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6340 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6341 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6342#endif
6343 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6344 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6345 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6346 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6347}
6348
6349void helper_vmsave(int aflag)
6350{
6351 target_ulong addr;
6352 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6353
6354 if (aflag == 2)
6355 addr = EAX;
6356 else
6357 addr = (uint32_t)EAX;
6358
6359 if (loglevel & CPU_LOG_TB_IN_ASM)
6360 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6361 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6362 env->segs[R_FS].base);
6363
6364 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6365 &env->segs[R_FS]);
6366 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6367 &env->segs[R_GS]);
6368 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6369 &env->tr);
6370 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6371 &env->ldt);
6372
6373#ifdef TARGET_X86_64
6374 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6375 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6376 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6377 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6378#endif
6379 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6380 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6381 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6382 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6383}
6384
6385void helper_stgi(void)
6386{
6387 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6388 env->hflags2 |= HF2_GIF_MASK;
6389}
6390
6391void helper_clgi(void)
6392{
6393 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6394 env->hflags2 &= ~HF2_GIF_MASK;
6395}
6396
6397void helper_skinit(void)
6398{
6399 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6400 /* XXX: not implemented */
6401 raise_exception(EXCP06_ILLOP);
6402}
6403
6404void helper_invlpga(int aflag)
6405{
6406 target_ulong addr;
6407 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6408
6409 if (aflag == 2)
6410 addr = EAX;
6411 else
6412 addr = (uint32_t)EAX;
6413
6414 /* XXX: could use the ASID to see if it is needed to do the
6415 flush */
6416 tlb_flush_page(env, addr);
6417}
6418
6419void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6420{
6421 if (likely(!(env->hflags & HF_SVMI_MASK)))
6422 return;
6423#ifndef VBOX
6424 switch(type) {
6425#ifndef VBOX
6426 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6427#else
6428 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6429 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6430 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6431#endif
6432 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6433 helper_vmexit(type, param);
6434 }
6435 break;
6436#ifndef VBOX
6437 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6438#else
6439 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6440 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6441 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6442#endif
6443 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6444 helper_vmexit(type, param);
6445 }
6446 break;
6447 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6448 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6449 helper_vmexit(type, param);
6450 }
6451 break;
6452 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6453 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6454 helper_vmexit(type, param);
6455 }
6456 break;
6457 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6458 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6459 helper_vmexit(type, param);
6460 }
6461 break;
6462 case SVM_EXIT_MSR:
6463 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6464 /* FIXME: this should be read in at vmrun (faster this way?) */
6465 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6466 uint32_t t0, t1;
6467 switch((uint32_t)ECX) {
6468 case 0 ... 0x1fff:
6469 t0 = (ECX * 2) % 8;
6470 t1 = ECX / 8;
6471 break;
6472 case 0xc0000000 ... 0xc0001fff:
6473 t0 = (8192 + ECX - 0xc0000000) * 2;
6474 t1 = (t0 / 8);
6475 t0 %= 8;
6476 break;
6477 case 0xc0010000 ... 0xc0011fff:
6478 t0 = (16384 + ECX - 0xc0010000) * 2;
6479 t1 = (t0 / 8);
6480 t0 %= 8;
6481 break;
6482 default:
6483 helper_vmexit(type, param);
6484 t0 = 0;
6485 t1 = 0;
6486 break;
6487 }
6488 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6489 helper_vmexit(type, param);
6490 }
6491 break;
6492 default:
6493 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6494 helper_vmexit(type, param);
6495 }
6496 break;
6497 }
6498#else
6499 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6500#endif
6501}
6502
6503void helper_svm_check_io(uint32_t port, uint32_t param,
6504 uint32_t next_eip_addend)
6505{
6506 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6507 /* FIXME: this should be read in at vmrun (faster this way?) */
6508 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6509 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6510 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6511 /* next EIP */
6512 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6513 env->eip + next_eip_addend);
6514 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6515 }
6516 }
6517}
6518
6519/* Note: currently only 32 bits of exit_code are used */
6520void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6521{
6522 uint32_t int_ctl;
6523
6524 if (loglevel & CPU_LOG_TB_IN_ASM)
6525 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6526 exit_code, exit_info_1,
6527 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6528 EIP);
6529
6530 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6531 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6532 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6533 } else {
6534 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6535 }
6536
6537 /* Save the VM state in the vmcb */
6538 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6539 &env->segs[R_ES]);
6540 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6541 &env->segs[R_CS]);
6542 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6543 &env->segs[R_SS]);
6544 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6545 &env->segs[R_DS]);
6546
6547 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6548 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6549
6550 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6551 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6552
6553 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6554 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6555 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6556 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6557 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6558
6559 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6560 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6561 int_ctl |= env->v_tpr & V_TPR_MASK;
6562 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6563 int_ctl |= V_IRQ_MASK;
6564 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6565
6566 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6567 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6568 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6569 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6570 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6571 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6572 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6573
6574 /* Reload the host state from vm_hsave */
6575 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6576 env->hflags &= ~HF_SVMI_MASK;
6577 env->intercept = 0;
6578 env->intercept_exceptions = 0;
6579 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6580 env->tsc_offset = 0;
6581
6582 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6583 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6584
6585 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6586 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6587
6588 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6589 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6590 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6591 /* we need to set the efer after the crs so the hidden flags get
6592 set properly */
6593 cpu_load_efer(env,
6594 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6595 env->eflags = 0;
6596 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6597 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6598 CC_OP = CC_OP_EFLAGS;
6599
6600 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6601 env, R_ES);
6602 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6603 env, R_CS);
6604 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6605 env, R_SS);
6606 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6607 env, R_DS);
6608
6609 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6610 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6611 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6612
6613 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6614 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6615
6616 /* other setups */
6617 cpu_x86_set_cpl(env, 0);
6618 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6619 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6620
6621 env->hflags2 &= ~HF2_GIF_MASK;
6622 /* FIXME: Resets the current ASID register to zero (host ASID). */
6623
6624 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6625
6626 /* Clears the TSC_OFFSET inside the processor. */
6627
6628 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6629 from the page table indicated the host's CR3. If the PDPEs contain
6630 illegal state, the processor causes a shutdown. */
6631
6632 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6633 env->cr[0] |= CR0_PE_MASK;
6634 env->eflags &= ~VM_MASK;
6635
6636 /* Disables all breakpoints in the host DR7 register. */
6637
6638 /* Checks the reloaded host state for consistency. */
6639
6640 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6641 host's code segment or non-canonical (in the case of long mode), a
6642 #GP fault is delivered inside the host.) */
6643
6644 /* remove any pending exception */
6645 env->exception_index = -1;
6646 env->error_code = 0;
6647 env->old_exception = -1;
6648
6649 cpu_loop_exit();
6650}
6651
6652#endif
6653
6654/* MMX/SSE */
6655/* XXX: optimize by storing fptt and fptags in the static cpu state */
6656void helper_enter_mmx(void)
6657{
6658 env->fpstt = 0;
6659 *(uint32_t *)(env->fptags) = 0;
6660 *(uint32_t *)(env->fptags + 4) = 0;
6661}
6662
6663void helper_emms(void)
6664{
6665 /* set to empty state */
6666 *(uint32_t *)(env->fptags) = 0x01010101;
6667 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6668}
6669
6670/* XXX: suppress */
6671void helper_movq(uint64_t *d, uint64_t *s)
6672{
6673 *d = *s;
6674}
6675
6676#define SHIFT 0
6677#include "ops_sse.h"
6678
6679#define SHIFT 1
6680#include "ops_sse.h"
6681
6682#define SHIFT 0
6683#include "helper_template.h"
6684#undef SHIFT
6685
6686#define SHIFT 1
6687#include "helper_template.h"
6688#undef SHIFT
6689
6690#define SHIFT 2
6691#include "helper_template.h"
6692#undef SHIFT
6693
6694#ifdef TARGET_X86_64
6695
6696#define SHIFT 3
6697#include "helper_template.h"
6698#undef SHIFT
6699
6700#endif
6701
6702/* bit operations */
6703target_ulong helper_bsf(target_ulong t0)
6704{
6705 int count;
6706 target_ulong res;
6707
6708 res = t0;
6709 count = 0;
6710 while ((res & 1) == 0) {
6711 count++;
6712 res >>= 1;
6713 }
6714 return count;
6715}
6716
6717target_ulong helper_bsr(target_ulong t0)
6718{
6719 int count;
6720 target_ulong res, mask;
6721
6722 res = t0;
6723 count = TARGET_LONG_BITS - 1;
6724 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6725 while ((res & mask) == 0) {
6726 count--;
6727 res <<= 1;
6728 }
6729 return count;
6730}
6731
6732
6733static int compute_all_eflags(void)
6734{
6735 return CC_SRC;
6736}
6737
6738static int compute_c_eflags(void)
6739{
6740 return CC_SRC & CC_C;
6741}
6742
6743#ifndef VBOX
6744CCTable cc_table[CC_OP_NB] = {
6745 [CC_OP_DYNAMIC] = { /* should never happen */ },
6746
6747 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6748
6749 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6750 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6751 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6752
6753 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6754 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6755 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6756
6757 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6758 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6759 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6760
6761 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6762 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6763 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6764
6765 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6766 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6767 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6768
6769 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6770 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6771 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6772
6773 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6774 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6775 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6776
6777 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6778 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6779 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6780
6781 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6782 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6783 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6784
6785 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6786 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6787 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6788
6789#ifdef TARGET_X86_64
6790 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6791
6792 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6793
6794 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6795
6796 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6797
6798 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6799
6800 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6801
6802 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6803
6804 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6805
6806 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6807
6808 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6809#endif
6810};
6811#else /* VBOX */
6812/* Sync carefully with cpu.h */
6813CCTable cc_table[CC_OP_NB] = {
6814 /* CC_OP_DYNAMIC */ { 0, 0 },
6815
6816 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6817
6818 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6819 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6820 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6821#ifdef TARGET_X86_64
6822 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6823#else
6824 /* CC_OP_MULQ */ { 0, 0 },
6825#endif
6826
6827 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6828 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6829 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6830#ifdef TARGET_X86_64
6831 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6832#else
6833 /* CC_OP_ADDQ */ { 0, 0 },
6834#endif
6835
6836 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6837 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6838 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6839#ifdef TARGET_X86_64
6840 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6841#else
6842 /* CC_OP_ADCQ */ { 0, 0 },
6843#endif
6844
6845 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6846 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6847 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6848#ifdef TARGET_X86_64
6849 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6850#else
6851 /* CC_OP_SUBQ */ { 0, 0 },
6852#endif
6853
6854 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6855 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6856 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6857#ifdef TARGET_X86_64
6858 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6859#else
6860 /* CC_OP_SBBQ */ { 0, 0 },
6861#endif
6862
6863 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6864 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6865 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6866#ifdef TARGET_X86_64
6867 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6868#else
6869 /* CC_OP_LOGICQ */ { 0, 0 },
6870#endif
6871
6872 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6873 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6874 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6875#ifdef TARGET_X86_64
6876 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6877#else
6878 /* CC_OP_INCQ */ { 0, 0 },
6879#endif
6880
6881 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6882 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6883 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6884#ifdef TARGET_X86_64
6885 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6886#else
6887 /* CC_OP_DECQ */ { 0, 0 },
6888#endif
6889
6890 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6891 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6892 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6893#ifdef TARGET_X86_64
6894 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6895#else
6896 /* CC_OP_SHLQ */ { 0, 0 },
6897#endif
6898
6899 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6900 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6901 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6902#ifdef TARGET_X86_64
6903 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6904#else
6905 /* CC_OP_SARQ */ { 0, 0 },
6906#endif
6907};
6908#endif /* VBOX */
6909
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette