VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 36290

Last change on this file since 36290 was 36175, checked in by vboxsync, 14 years ago

rem: Synced up to v0.11.1 (35bfc7324e2e6946c4113ada5db30553a1a7c40b) from git://git.savannah.nongnu.org/qemu.git.

  • Property svn:eol-style set to native
File size: 193.2 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "exec-all.h"
32#include "host-utils.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39//#define DEBUG_PCALL
40
41
42#ifdef DEBUG_PCALL
43# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
44# define LOG_PCALL_STATE(env) \
45 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
46#else
47# define LOG_PCALL(...) do { } while (0)
48# define LOG_PCALL_STATE(env) do { } while (0)
49#endif
50
51
52#if 0
53#define raise_exception_err(a, b)\
54do {\
55 qemu_log("raise_exception line=%d\n", __LINE__);\
56 (raise_exception_err)(a, b);\
57} while (0)
58#endif
59
60static const uint8_t parity_table[256] = {
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
82 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
89 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
90 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
93};
94
95/* modulo 17 table */
96static const uint8_t rclw_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 9,10,11,12,13,14,15,
99 16, 0, 1, 2, 3, 4, 5, 6,
100 7, 8, 9,10,11,12,13,14,
101};
102
103/* modulo 9 table */
104static const uint8_t rclb_table[32] = {
105 0, 1, 2, 3, 4, 5, 6, 7,
106 8, 0, 1, 2, 3, 4, 5, 6,
107 7, 8, 0, 1, 2, 3, 4, 5,
108 6, 7, 8, 0, 1, 2, 3, 4,
109};
110
111static const CPU86_LDouble f15rk[7] =
112{
113 0.00000000000000000000L,
114 1.00000000000000000000L,
115 3.14159265358979323851L, /*pi*/
116 0.30102999566398119523L, /*lg2*/
117 0.69314718055994530943L, /*ln2*/
118 1.44269504088896340739L, /*l2e*/
119 3.32192809488736234781L, /*l2t*/
120};
121
122/* broken thread support */
123
124static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
125
126void helper_lock(void)
127{
128 spin_lock(&global_cpu_lock);
129}
130
131void helper_unlock(void)
132{
133 spin_unlock(&global_cpu_lock);
134}
135
136void helper_write_eflags(target_ulong t0, uint32_t update_mask)
137{
138 load_eflags(t0, update_mask);
139}
140
141target_ulong helper_read_eflags(void)
142{
143 uint32_t eflags;
144 eflags = helper_cc_compute_all(CC_OP);
145 eflags |= (DF & DF_MASK);
146 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
147 return eflags;
148}
149
150#ifdef VBOX
151
152void helper_write_eflags_vme(target_ulong t0)
153{
154 unsigned int new_eflags = t0;
155
156 assert(env->eflags & (1<<VM_SHIFT));
157
158 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
159 /* if TF will be set -> #GP */
160 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
161 || (new_eflags & TF_MASK)) {
162 raise_exception(EXCP0D_GPF);
163 } else {
164 load_eflags(new_eflags,
165 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
166
167 if (new_eflags & IF_MASK) {
168 env->eflags |= VIF_MASK;
169 } else {
170 env->eflags &= ~VIF_MASK;
171 }
172 }
173}
174
175target_ulong helper_read_eflags_vme(void)
176{
177 uint32_t eflags;
178 eflags = helper_cc_compute_all(CC_OP);
179 eflags |= (DF & DF_MASK);
180 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
181 if (env->eflags & VIF_MASK)
182 eflags |= IF_MASK;
183 else
184 eflags &= ~IF_MASK;
185
186 /* According to AMD manual, should be read with IOPL == 3 */
187 eflags |= (3 << IOPL_SHIFT);
188
189 /* We only use helper_read_eflags_vme() in 16-bits mode */
190 return eflags & 0xffff;
191}
192
193void helper_dump_state()
194{
195 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
196 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
197 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
198 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
199 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
200 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
201 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
202}
203
204#endif /* VBOX */
205
206/* return non zero if error */
207static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
208 int selector)
209{
210 SegmentCache *dt;
211 int index;
212 target_ulong ptr;
213
214#ifdef VBOX
215 /* Trying to load a selector with CPL=1? */
216 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
217 {
218 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
219 selector = selector & 0xfffc;
220 }
221#endif /* VBOX */
222
223 if (selector & 0x4)
224 dt = &env->ldt;
225 else
226 dt = &env->gdt;
227 index = selector & ~7;
228 if ((index + 7) > dt->limit)
229 return -1;
230 ptr = dt->base + index;
231 *e1_ptr = ldl_kernel(ptr);
232 *e2_ptr = ldl_kernel(ptr + 4);
233 return 0;
234}
235
236static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
237{
238 unsigned int limit;
239 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
240 if (e2 & DESC_G_MASK)
241 limit = (limit << 12) | 0xfff;
242 return limit;
243}
244
245static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
246{
247 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
248}
249
250static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
251{
252 sc->base = get_seg_base(e1, e2);
253 sc->limit = get_seg_limit(e1, e2);
254 sc->flags = e2;
255}
256
257/* init the segment cache in vm86 mode. */
258static inline void load_seg_vm(int seg, int selector)
259{
260 selector &= 0xffff;
261#ifdef VBOX
262 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
263 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
264 flags |= (3 << DESC_DPL_SHIFT);
265
266 cpu_x86_load_seg_cache(env, seg, selector,
267 (selector << 4), 0xffff, flags);
268#else /* VBOX */
269 cpu_x86_load_seg_cache(env, seg, selector,
270 (selector << 4), 0xffff, 0);
271#endif /* VBOX */
272}
273
274static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
275 uint32_t *esp_ptr, int dpl)
276{
277#ifndef VBOX
278 int type, index, shift;
279#else
280 unsigned int type, index, shift;
281#endif
282
283#if 0
284 {
285 int i;
286 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
287 for(i=0;i<env->tr.limit;i++) {
288 printf("%02x ", env->tr.base[i]);
289 if ((i & 7) == 7) printf("\n");
290 }
291 printf("\n");
292 }
293#endif
294
295 if (!(env->tr.flags & DESC_P_MASK))
296 cpu_abort(env, "invalid tss");
297 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
298 if ((type & 7) != 1)
299 cpu_abort(env, "invalid tss type");
300 shift = type >> 3;
301 index = (dpl * 4 + 2) << shift;
302 if (index + (4 << shift) - 1 > env->tr.limit)
303 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
304 if (shift == 0) {
305 *esp_ptr = lduw_kernel(env->tr.base + index);
306 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
307 } else {
308 *esp_ptr = ldl_kernel(env->tr.base + index);
309 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
310 }
311}
312
313/* XXX: merge with load_seg() */
314static void tss_load_seg(int seg_reg, int selector)
315{
316 uint32_t e1, e2;
317 int rpl, dpl, cpl;
318
319#ifdef VBOX
320 e1 = e2 = 0; /* gcc warning? */
321 cpl = env->hflags & HF_CPL_MASK;
322 /* Trying to load a selector with CPL=1? */
323 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
324 {
325 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
326 selector = selector & 0xfffc;
327 }
328#endif /* VBOX */
329
330 if ((selector & 0xfffc) != 0) {
331 if (load_segment(&e1, &e2, selector) != 0)
332 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
333 if (!(e2 & DESC_S_MASK))
334 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
335 rpl = selector & 3;
336 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
337 cpl = env->hflags & HF_CPL_MASK;
338 if (seg_reg == R_CS) {
339 if (!(e2 & DESC_CS_MASK))
340 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
341 /* XXX: is it correct ? */
342 if (dpl != rpl)
343 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
344 if ((e2 & DESC_C_MASK) && dpl > rpl)
345 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346 } else if (seg_reg == R_SS) {
347 /* SS must be writable data */
348 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 if (dpl != cpl || dpl != rpl)
351 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
352 } else {
353 /* not readable code */
354 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* if data or non conforming code, checks the rights */
357 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
358 if (dpl < cpl || dpl < rpl)
359 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
360 }
361 }
362 if (!(e2 & DESC_P_MASK))
363 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
364 cpu_x86_load_seg_cache(env, seg_reg, selector,
365 get_seg_base(e1, e2),
366 get_seg_limit(e1, e2),
367 e2);
368 } else {
369 if (seg_reg == R_SS || seg_reg == R_CS)
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371#ifdef VBOX
372# if 0
373 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
374 cpu_x86_load_seg_cache(env, seg_reg, selector,
375 0, 0, 0);
376# endif
377#endif /* VBOX */
378 }
379}
380
381#define SWITCH_TSS_JMP 0
382#define SWITCH_TSS_IRET 1
383#define SWITCH_TSS_CALL 2
384
385/* XXX: restore CPU state in registers (PowerPC case) */
386static void switch_tss(int tss_selector,
387 uint32_t e1, uint32_t e2, int source,
388 uint32_t next_eip)
389{
390 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
391 target_ulong tss_base;
392 uint32_t new_regs[8], new_segs[6];
393 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
394 uint32_t old_eflags, eflags_mask;
395 SegmentCache *dt;
396#ifndef VBOX
397 int index;
398#else
399 unsigned int index;
400#endif
401 target_ulong ptr;
402
403 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
404 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
405
406 /* if task gate, we read the TSS segment and we load it */
407 if (type == 5) {
408 if (!(e2 & DESC_P_MASK))
409 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
410 tss_selector = e1 >> 16;
411 if (tss_selector & 4)
412 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
413 if (load_segment(&e1, &e2, tss_selector) != 0)
414 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
415 if (e2 & DESC_S_MASK)
416 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
417 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
418 if ((type & 7) != 1)
419 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
420 }
421
422 if (!(e2 & DESC_P_MASK))
423 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
424
425 if (type & 8)
426 tss_limit_max = 103;
427 else
428 tss_limit_max = 43;
429 tss_limit = get_seg_limit(e1, e2);
430 tss_base = get_seg_base(e1, e2);
431 if ((tss_selector & 4) != 0 ||
432 tss_limit < tss_limit_max)
433 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
434 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
435 if (old_type & 8)
436 old_tss_limit_max = 103;
437 else
438 old_tss_limit_max = 43;
439
440 /* read all the registers from the new TSS */
441 if (type & 8) {
442 /* 32 bit */
443 new_cr3 = ldl_kernel(tss_base + 0x1c);
444 new_eip = ldl_kernel(tss_base + 0x20);
445 new_eflags = ldl_kernel(tss_base + 0x24);
446 for(i = 0; i < 8; i++)
447 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
448 for(i = 0; i < 6; i++)
449 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
450 new_ldt = lduw_kernel(tss_base + 0x60);
451 new_trap = ldl_kernel(tss_base + 0x64);
452 } else {
453 /* 16 bit */
454 new_cr3 = 0;
455 new_eip = lduw_kernel(tss_base + 0x0e);
456 new_eflags = lduw_kernel(tss_base + 0x10);
457 for(i = 0; i < 8; i++)
458 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
459 for(i = 0; i < 4; i++)
460 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
461 new_ldt = lduw_kernel(tss_base + 0x2a);
462 new_segs[R_FS] = 0;
463 new_segs[R_GS] = 0;
464 new_trap = 0;
465 }
466
467 /* NOTE: we must avoid memory exceptions during the task switch,
468 so we make dummy accesses before */
469 /* XXX: it can still fail in some cases, so a bigger hack is
470 necessary to valid the TLB after having done the accesses */
471
472 v1 = ldub_kernel(env->tr.base);
473 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
474 stb_kernel(env->tr.base, v1);
475 stb_kernel(env->tr.base + old_tss_limit_max, v2);
476
477 /* clear busy bit (it is restartable) */
478 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
479 target_ulong ptr;
480 uint32_t e2;
481 ptr = env->gdt.base + (env->tr.selector & ~7);
482 e2 = ldl_kernel(ptr + 4);
483 e2 &= ~DESC_TSS_BUSY_MASK;
484 stl_kernel(ptr + 4, e2);
485 }
486 old_eflags = compute_eflags();
487 if (source == SWITCH_TSS_IRET)
488 old_eflags &= ~NT_MASK;
489
490 /* save the current state in the old TSS */
491 if (type & 8) {
492 /* 32 bit */
493 stl_kernel(env->tr.base + 0x20, next_eip);
494 stl_kernel(env->tr.base + 0x24, old_eflags);
495 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
496 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
497 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
498 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
499 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
500 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
501 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
502 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
503 for(i = 0; i < 6; i++)
504 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
505#ifdef VBOX
506 /* Must store the ldt as it gets reloaded and might have been changed. */
507 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
508#endif
509#if defined(VBOX) && defined(DEBUG)
510 printf("TSS 32 bits switch\n");
511 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
512#endif
513 } else {
514 /* 16 bit */
515 stw_kernel(env->tr.base + 0x0e, next_eip);
516 stw_kernel(env->tr.base + 0x10, old_eflags);
517 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
518 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
519 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
520 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
521 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
522 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
523 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
524 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
525 for(i = 0; i < 4; i++)
526 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
527#ifdef VBOX
528 /* Must store the ldt as it gets reloaded and might have been changed. */
529 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
530#endif
531 }
532
533 /* now if an exception occurs, it will occurs in the next task
534 context */
535
536 if (source == SWITCH_TSS_CALL) {
537 stw_kernel(tss_base, env->tr.selector);
538 new_eflags |= NT_MASK;
539 }
540
541 /* set busy bit */
542 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
543 target_ulong ptr;
544 uint32_t e2;
545 ptr = env->gdt.base + (tss_selector & ~7);
546 e2 = ldl_kernel(ptr + 4);
547 e2 |= DESC_TSS_BUSY_MASK;
548 stl_kernel(ptr + 4, e2);
549 }
550
551 /* set the new CPU state */
552 /* from this point, any exception which occurs can give problems */
553 env->cr[0] |= CR0_TS_MASK;
554 env->hflags |= HF_TS_MASK;
555 env->tr.selector = tss_selector;
556 env->tr.base = tss_base;
557 env->tr.limit = tss_limit;
558 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
559
560 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
561 cpu_x86_update_cr3(env, new_cr3);
562 }
563
564 /* load all registers without an exception, then reload them with
565 possible exception */
566 env->eip = new_eip;
567 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
568 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
569 if (!(type & 8))
570 eflags_mask &= 0xffff;
571 load_eflags(new_eflags, eflags_mask);
572 /* XXX: what to do in 16 bit case ? */
573 EAX = new_regs[0];
574 ECX = new_regs[1];
575 EDX = new_regs[2];
576 EBX = new_regs[3];
577 ESP = new_regs[4];
578 EBP = new_regs[5];
579 ESI = new_regs[6];
580 EDI = new_regs[7];
581 if (new_eflags & VM_MASK) {
582 for(i = 0; i < 6; i++)
583 load_seg_vm(i, new_segs[i]);
584 /* in vm86, CPL is always 3 */
585 cpu_x86_set_cpl(env, 3);
586 } else {
587 /* CPL is set the RPL of CS */
588 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
589 /* first just selectors as the rest may trigger exceptions */
590 for(i = 0; i < 6; i++)
591 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
592 }
593
594 env->ldt.selector = new_ldt & ~4;
595 env->ldt.base = 0;
596 env->ldt.limit = 0;
597 env->ldt.flags = 0;
598
599 /* load the LDT */
600 if (new_ldt & 4)
601 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
602
603 if ((new_ldt & 0xfffc) != 0) {
604 dt = &env->gdt;
605 index = new_ldt & ~7;
606 if ((index + 7) > dt->limit)
607 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
608 ptr = dt->base + index;
609 e1 = ldl_kernel(ptr);
610 e2 = ldl_kernel(ptr + 4);
611 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
612 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
613 if (!(e2 & DESC_P_MASK))
614 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
615 load_seg_cache_raw_dt(&env->ldt, e1, e2);
616 }
617
618 /* load the segments */
619 if (!(new_eflags & VM_MASK)) {
620 tss_load_seg(R_CS, new_segs[R_CS]);
621 tss_load_seg(R_SS, new_segs[R_SS]);
622 tss_load_seg(R_ES, new_segs[R_ES]);
623 tss_load_seg(R_DS, new_segs[R_DS]);
624 tss_load_seg(R_FS, new_segs[R_FS]);
625 tss_load_seg(R_GS, new_segs[R_GS]);
626 }
627
628 /* check that EIP is in the CS segment limits */
629 if (new_eip > env->segs[R_CS].limit) {
630 /* XXX: different exception if CALL ? */
631 raise_exception_err(EXCP0D_GPF, 0);
632 }
633
634#ifndef CONFIG_USER_ONLY
635 /* reset local breakpoints */
636 if (env->dr[7] & 0x55) {
637 for (i = 0; i < 4; i++) {
638 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
639 hw_breakpoint_remove(env, i);
640 }
641 env->dr[7] &= ~0x55;
642 }
643#endif
644}
645
646/* check if Port I/O is allowed in TSS */
647static inline void check_io(int addr, int size)
648{
649#ifndef VBOX
650 int io_offset, val, mask;
651#else
652 int val, mask;
653 unsigned int io_offset;
654#endif /* VBOX */
655
656 /* TSS must be a valid 32 bit one */
657 if (!(env->tr.flags & DESC_P_MASK) ||
658 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
659 env->tr.limit < 103)
660 goto fail;
661 io_offset = lduw_kernel(env->tr.base + 0x66);
662 io_offset += (addr >> 3);
663 /* Note: the check needs two bytes */
664 if ((io_offset + 1) > env->tr.limit)
665 goto fail;
666 val = lduw_kernel(env->tr.base + io_offset);
667 val >>= (addr & 7);
668 mask = (1 << size) - 1;
669 /* all bits must be zero to allow the I/O */
670 if ((val & mask) != 0) {
671 fail:
672 raise_exception_err(EXCP0D_GPF, 0);
673 }
674}
675
676#ifdef VBOX
677/* Keep in sync with gen_check_external_event() */
678void helper_check_external_event()
679{
680 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
681 | CPU_INTERRUPT_EXTERNAL_TIMER
682 | CPU_INTERRUPT_EXTERNAL_DMA))
683 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
684 && (env->eflags & IF_MASK)
685 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
686 {
687 helper_external_event();
688 }
689
690}
691
692void helper_sync_seg(uint32_t reg)
693{
694 if (env->segs[reg].newselector)
695 sync_seg(env, reg, env->segs[reg].newselector);
696}
697#endif /* VBOX */
698
699void helper_check_iob(uint32_t t0)
700{
701 check_io(t0, 1);
702}
703
704void helper_check_iow(uint32_t t0)
705{
706 check_io(t0, 2);
707}
708
709void helper_check_iol(uint32_t t0)
710{
711 check_io(t0, 4);
712}
713
714void helper_outb(uint32_t port, uint32_t data)
715{
716 cpu_outb(env, port, data & 0xff);
717}
718
719target_ulong helper_inb(uint32_t port)
720{
721 return cpu_inb(env, port);
722}
723
724void helper_outw(uint32_t port, uint32_t data)
725{
726 cpu_outw(env, port, data & 0xffff);
727}
728
729target_ulong helper_inw(uint32_t port)
730{
731 return cpu_inw(env, port);
732}
733
734void helper_outl(uint32_t port, uint32_t data)
735{
736 cpu_outl(env, port, data);
737}
738
739target_ulong helper_inl(uint32_t port)
740{
741 return cpu_inl(env, port);
742}
743
744static inline unsigned int get_sp_mask(unsigned int e2)
745{
746 if (e2 & DESC_B_MASK)
747 return 0xffffffff;
748 else
749 return 0xffff;
750}
751
752static int exeption_has_error_code(int intno)
753{
754 switch(intno) {
755 case 8:
756 case 10:
757 case 11:
758 case 12:
759 case 13:
760 case 14:
761 case 17:
762 return 1;
763 }
764 return 0;
765}
766
767#ifdef TARGET_X86_64
768#define SET_ESP(val, sp_mask)\
769do {\
770 if ((sp_mask) == 0xffff)\
771 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
772 else if ((sp_mask) == 0xffffffffLL)\
773 ESP = (uint32_t)(val);\
774 else\
775 ESP = (val);\
776} while (0)
777#else
778#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
779#endif
780
781/* in 64-bit machines, this can overflow. So this segment addition macro
782 * can be used to trim the value to 32-bit whenever needed */
783#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
784
785/* XXX: add a is_user flag to have proper security support */
786#define PUSHW(ssp, sp, sp_mask, val)\
787{\
788 sp -= 2;\
789 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
790}
791
792#define PUSHL(ssp, sp, sp_mask, val)\
793{\
794 sp -= 4;\
795 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
796}
797
798#define POPW(ssp, sp, sp_mask, val)\
799{\
800 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
801 sp += 2;\
802}
803
804#define POPL(ssp, sp, sp_mask, val)\
805{\
806 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
807 sp += 4;\
808}
809
810/* protected mode interrupt */
811static void do_interrupt_protected(int intno, int is_int, int error_code,
812 unsigned int next_eip, int is_hw)
813{
814 SegmentCache *dt;
815 target_ulong ptr, ssp;
816 int type, dpl, selector, ss_dpl, cpl;
817 int has_error_code, new_stack, shift;
818 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
819 uint32_t old_eip, sp_mask;
820
821#ifdef VBOX
822 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
823 cpu_loop_exit();
824#endif
825
826 has_error_code = 0;
827 if (!is_int && !is_hw)
828 has_error_code = exeption_has_error_code(intno);
829 if (is_int)
830 old_eip = next_eip;
831 else
832 old_eip = env->eip;
833
834 dt = &env->idt;
835#ifndef VBOX
836 if (intno * 8 + 7 > dt->limit)
837#else
838 if ((unsigned)intno * 8 + 7 > dt->limit)
839#endif
840 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
841 ptr = dt->base + intno * 8;
842 e1 = ldl_kernel(ptr);
843 e2 = ldl_kernel(ptr + 4);
844 /* check gate type */
845 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
846 switch(type) {
847 case 5: /* task gate */
848 /* must do that check here to return the correct error code */
849 if (!(e2 & DESC_P_MASK))
850 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
851 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
852 if (has_error_code) {
853 int type;
854 uint32_t mask;
855 /* push the error code */
856 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
857 shift = type >> 3;
858 if (env->segs[R_SS].flags & DESC_B_MASK)
859 mask = 0xffffffff;
860 else
861 mask = 0xffff;
862 esp = (ESP - (2 << shift)) & mask;
863 ssp = env->segs[R_SS].base + esp;
864 if (shift)
865 stl_kernel(ssp, error_code);
866 else
867 stw_kernel(ssp, error_code);
868 SET_ESP(esp, mask);
869 }
870 return;
871 case 6: /* 286 interrupt gate */
872 case 7: /* 286 trap gate */
873 case 14: /* 386 interrupt gate */
874 case 15: /* 386 trap gate */
875 break;
876 default:
877 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
878 break;
879 }
880 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
881 cpl = env->hflags & HF_CPL_MASK;
882 /* check privilege if software int */
883 if (is_int && dpl < cpl)
884 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
885 /* check valid bit */
886 if (!(e2 & DESC_P_MASK))
887 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
888 selector = e1 >> 16;
889 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
890 if ((selector & 0xfffc) == 0)
891 raise_exception_err(EXCP0D_GPF, 0);
892
893 if (load_segment(&e1, &e2, selector) != 0)
894 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
895 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
896 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
897 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
898 if (dpl > cpl)
899 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
900 if (!(e2 & DESC_P_MASK))
901 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
902 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
903 /* to inner privilege */
904 get_ss_esp_from_tss(&ss, &esp, dpl);
905 if ((ss & 0xfffc) == 0)
906 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
907 if ((ss & 3) != dpl)
908 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
909 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
910 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
911 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
912 if (ss_dpl != dpl)
913 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
914 if (!(ss_e2 & DESC_S_MASK) ||
915 (ss_e2 & DESC_CS_MASK) ||
916 !(ss_e2 & DESC_W_MASK))
917 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
918 if (!(ss_e2 & DESC_P_MASK))
919#ifdef VBOX /* See page 3-477 of 253666.pdf */
920 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
921#else
922 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
923#endif
924 new_stack = 1;
925 sp_mask = get_sp_mask(ss_e2);
926 ssp = get_seg_base(ss_e1, ss_e2);
927#if defined(VBOX) && defined(DEBUG)
928 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
929#endif
930 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
931 /* to same privilege */
932 if (env->eflags & VM_MASK)
933 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934 new_stack = 0;
935 sp_mask = get_sp_mask(env->segs[R_SS].flags);
936 ssp = env->segs[R_SS].base;
937 esp = ESP;
938 dpl = cpl;
939 } else {
940 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
941 new_stack = 0; /* avoid warning */
942 sp_mask = 0; /* avoid warning */
943 ssp = 0; /* avoid warning */
944 esp = 0; /* avoid warning */
945 }
946
947 shift = type >> 3;
948
949#if 0
950 /* XXX: check that enough room is available */
951 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
952 if (env->eflags & VM_MASK)
953 push_size += 8;
954 push_size <<= shift;
955#endif
956 if (shift == 1) {
957 if (new_stack) {
958 if (env->eflags & VM_MASK) {
959 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
960 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
961 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
962 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
963 }
964 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
965 PUSHL(ssp, esp, sp_mask, ESP);
966 }
967 PUSHL(ssp, esp, sp_mask, compute_eflags());
968 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
969 PUSHL(ssp, esp, sp_mask, old_eip);
970 if (has_error_code) {
971 PUSHL(ssp, esp, sp_mask, error_code);
972 }
973 } else {
974 if (new_stack) {
975 if (env->eflags & VM_MASK) {
976 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
977 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
978 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
979 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
980 }
981 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
982 PUSHW(ssp, esp, sp_mask, ESP);
983 }
984 PUSHW(ssp, esp, sp_mask, compute_eflags());
985 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
986 PUSHW(ssp, esp, sp_mask, old_eip);
987 if (has_error_code) {
988 PUSHW(ssp, esp, sp_mask, error_code);
989 }
990 }
991
992 if (new_stack) {
993 if (env->eflags & VM_MASK) {
994 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
995 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
996 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
997 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
998 }
999 ss = (ss & ~3) | dpl;
1000 cpu_x86_load_seg_cache(env, R_SS, ss,
1001 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1002 }
1003 SET_ESP(esp, sp_mask);
1004
1005 selector = (selector & ~3) | dpl;
1006 cpu_x86_load_seg_cache(env, R_CS, selector,
1007 get_seg_base(e1, e2),
1008 get_seg_limit(e1, e2),
1009 e2);
1010 cpu_x86_set_cpl(env, dpl);
1011 env->eip = offset;
1012
1013 /* interrupt gate clear IF mask */
1014 if ((type & 1) == 0) {
1015 env->eflags &= ~IF_MASK;
1016 }
1017#ifndef VBOX
1018 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1019#else
1020 /*
1021 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1022 * gets confused by seemingly changed EFLAGS. See #3491 and
1023 * public bug #2341.
1024 */
1025 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1026#endif
1027}
1028
1029#ifdef VBOX
1030
1031/* check if VME interrupt redirection is enabled in TSS */
1032DECLINLINE(bool) is_vme_irq_redirected(int intno)
1033{
1034 unsigned int io_offset, intredir_offset;
1035 unsigned char val, mask;
1036
1037 /* TSS must be a valid 32 bit one */
1038 if (!(env->tr.flags & DESC_P_MASK) ||
1039 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1040 env->tr.limit < 103)
1041 goto fail;
1042 io_offset = lduw_kernel(env->tr.base + 0x66);
1043 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1044 if (io_offset < 0x68 + 0x20)
1045 io_offset = 0x68 + 0x20;
1046 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1047 intredir_offset = io_offset - 0x20;
1048
1049 intredir_offset += (intno >> 3);
1050 if ((intredir_offset) > env->tr.limit)
1051 goto fail;
1052
1053 val = ldub_kernel(env->tr.base + intredir_offset);
1054 mask = 1 << (unsigned char)(intno & 7);
1055
1056 /* bit set means no redirection. */
1057 if ((val & mask) != 0) {
1058 return false;
1059 }
1060 return true;
1061
1062fail:
1063 raise_exception_err(EXCP0D_GPF, 0);
1064 return true;
1065}
1066
1067/* V86 mode software interrupt with CR4.VME=1 */
1068static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1069{
1070 target_ulong ptr, ssp;
1071 int selector;
1072 uint32_t offset, esp;
1073 uint32_t old_cs, old_eflags;
1074 uint32_t iopl;
1075
1076 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1077
1078 if (!is_vme_irq_redirected(intno))
1079 {
1080 if (iopl == 3)
1081 {
1082 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1083 return;
1084 }
1085 else
1086 raise_exception_err(EXCP0D_GPF, 0);
1087 }
1088
1089 /* virtual mode idt is at linear address 0 */
1090 ptr = 0 + intno * 4;
1091 offset = lduw_kernel(ptr);
1092 selector = lduw_kernel(ptr + 2);
1093 esp = ESP;
1094 ssp = env->segs[R_SS].base;
1095 old_cs = env->segs[R_CS].selector;
1096
1097 old_eflags = compute_eflags();
1098 if (iopl < 3)
1099 {
1100 /* copy VIF into IF and set IOPL to 3 */
1101 if (env->eflags & VIF_MASK)
1102 old_eflags |= IF_MASK;
1103 else
1104 old_eflags &= ~IF_MASK;
1105
1106 old_eflags |= (3 << IOPL_SHIFT);
1107 }
1108
1109 /* XXX: use SS segment size ? */
1110 PUSHW(ssp, esp, 0xffff, old_eflags);
1111 PUSHW(ssp, esp, 0xffff, old_cs);
1112 PUSHW(ssp, esp, 0xffff, next_eip);
1113
1114 /* update processor state */
1115 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1116 env->eip = offset;
1117 env->segs[R_CS].selector = selector;
1118 env->segs[R_CS].base = (selector << 4);
1119 env->eflags &= ~(TF_MASK | RF_MASK);
1120
1121 if (iopl < 3)
1122 env->eflags &= ~VIF_MASK;
1123 else
1124 env->eflags &= ~IF_MASK;
1125}
1126
1127#endif /* VBOX */
1128
1129#ifdef TARGET_X86_64
1130
1131#define PUSHQ(sp, val)\
1132{\
1133 sp -= 8;\
1134 stq_kernel(sp, (val));\
1135}
1136
1137#define POPQ(sp, val)\
1138{\
1139 val = ldq_kernel(sp);\
1140 sp += 8;\
1141}
1142
1143static inline target_ulong get_rsp_from_tss(int level)
1144{
1145 int index;
1146
1147#if 0
1148 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1149 env->tr.base, env->tr.limit);
1150#endif
1151
1152 if (!(env->tr.flags & DESC_P_MASK))
1153 cpu_abort(env, "invalid tss");
1154 index = 8 * level + 4;
1155 if ((index + 7) > env->tr.limit)
1156 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1157 return ldq_kernel(env->tr.base + index);
1158}
1159
1160/* 64 bit interrupt */
1161static void do_interrupt64(int intno, int is_int, int error_code,
1162 target_ulong next_eip, int is_hw)
1163{
1164 SegmentCache *dt;
1165 target_ulong ptr;
1166 int type, dpl, selector, cpl, ist;
1167 int has_error_code, new_stack;
1168 uint32_t e1, e2, e3, ss;
1169 target_ulong old_eip, esp, offset;
1170
1171#ifdef VBOX
1172 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1173 cpu_loop_exit();
1174#endif
1175
1176 has_error_code = 0;
1177 if (!is_int && !is_hw)
1178 has_error_code = exeption_has_error_code(intno);
1179 if (is_int)
1180 old_eip = next_eip;
1181 else
1182 old_eip = env->eip;
1183
1184 dt = &env->idt;
1185 if (intno * 16 + 15 > dt->limit)
1186 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1187 ptr = dt->base + intno * 16;
1188 e1 = ldl_kernel(ptr);
1189 e2 = ldl_kernel(ptr + 4);
1190 e3 = ldl_kernel(ptr + 8);
1191 /* check gate type */
1192 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1193 switch(type) {
1194 case 14: /* 386 interrupt gate */
1195 case 15: /* 386 trap gate */
1196 break;
1197 default:
1198 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1199 break;
1200 }
1201 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1202 cpl = env->hflags & HF_CPL_MASK;
1203 /* check privilege if software int */
1204 if (is_int && dpl < cpl)
1205 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1206 /* check valid bit */
1207 if (!(e2 & DESC_P_MASK))
1208 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1209 selector = e1 >> 16;
1210 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1211 ist = e2 & 7;
1212 if ((selector & 0xfffc) == 0)
1213 raise_exception_err(EXCP0D_GPF, 0);
1214
1215 if (load_segment(&e1, &e2, selector) != 0)
1216 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1217 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1218 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1219 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1220 if (dpl > cpl)
1221 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1222 if (!(e2 & DESC_P_MASK))
1223 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1224 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1225 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1226 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1227 /* to inner privilege */
1228 if (ist != 0)
1229 esp = get_rsp_from_tss(ist + 3);
1230 else
1231 esp = get_rsp_from_tss(dpl);
1232 esp &= ~0xfLL; /* align stack */
1233 ss = 0;
1234 new_stack = 1;
1235 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1236 /* to same privilege */
1237 if (env->eflags & VM_MASK)
1238 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1239 new_stack = 0;
1240 if (ist != 0)
1241 esp = get_rsp_from_tss(ist + 3);
1242 else
1243 esp = ESP;
1244 esp &= ~0xfLL; /* align stack */
1245 dpl = cpl;
1246 } else {
1247 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1248 new_stack = 0; /* avoid warning */
1249 esp = 0; /* avoid warning */
1250 }
1251
1252 PUSHQ(esp, env->segs[R_SS].selector);
1253 PUSHQ(esp, ESP);
1254 PUSHQ(esp, compute_eflags());
1255 PUSHQ(esp, env->segs[R_CS].selector);
1256 PUSHQ(esp, old_eip);
1257 if (has_error_code) {
1258 PUSHQ(esp, error_code);
1259 }
1260
1261 if (new_stack) {
1262 ss = 0 | dpl;
1263 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1264 }
1265 ESP = esp;
1266
1267 selector = (selector & ~3) | dpl;
1268 cpu_x86_load_seg_cache(env, R_CS, selector,
1269 get_seg_base(e1, e2),
1270 get_seg_limit(e1, e2),
1271 e2);
1272 cpu_x86_set_cpl(env, dpl);
1273 env->eip = offset;
1274
1275 /* interrupt gate clear IF mask */
1276 if ((type & 1) == 0) {
1277 env->eflags &= ~IF_MASK;
1278 }
1279#ifndef VBOX
1280 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1281#else /* VBOX */
1282 /*
1283 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1284 * gets confused by seemingly changed EFLAGS. See #3491 and
1285 * public bug #2341.
1286 */
1287 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1288#endif /* VBOX */
1289}
1290#endif
1291
1292#ifdef TARGET_X86_64
1293#if defined(CONFIG_USER_ONLY)
1294void helper_syscall(int next_eip_addend)
1295{
1296 env->exception_index = EXCP_SYSCALL;
1297 env->exception_next_eip = env->eip + next_eip_addend;
1298 cpu_loop_exit();
1299}
1300#else
1301void helper_syscall(int next_eip_addend)
1302{
1303 int selector;
1304
1305 if (!(env->efer & MSR_EFER_SCE)) {
1306 raise_exception_err(EXCP06_ILLOP, 0);
1307 }
1308 selector = (env->star >> 32) & 0xffff;
1309 if (env->hflags & HF_LMA_MASK) {
1310 int code64;
1311
1312 ECX = env->eip + next_eip_addend;
1313 env->regs[11] = compute_eflags();
1314
1315 code64 = env->hflags & HF_CS64_MASK;
1316
1317 cpu_x86_set_cpl(env, 0);
1318 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1319 0, 0xffffffff,
1320 DESC_G_MASK | DESC_P_MASK |
1321 DESC_S_MASK |
1322 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1323 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1324 0, 0xffffffff,
1325 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1326 DESC_S_MASK |
1327 DESC_W_MASK | DESC_A_MASK);
1328 env->eflags &= ~env->fmask;
1329 load_eflags(env->eflags, 0);
1330 if (code64)
1331 env->eip = env->lstar;
1332 else
1333 env->eip = env->cstar;
1334 } else {
1335 ECX = (uint32_t)(env->eip + next_eip_addend);
1336
1337 cpu_x86_set_cpl(env, 0);
1338 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1339 0, 0xffffffff,
1340 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1341 DESC_S_MASK |
1342 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1343 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1344 0, 0xffffffff,
1345 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1346 DESC_S_MASK |
1347 DESC_W_MASK | DESC_A_MASK);
1348 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1349 env->eip = (uint32_t)env->star;
1350 }
1351}
1352#endif
1353#endif
1354
1355#ifdef TARGET_X86_64
1356void helper_sysret(int dflag)
1357{
1358 int cpl, selector;
1359
1360 if (!(env->efer & MSR_EFER_SCE)) {
1361 raise_exception_err(EXCP06_ILLOP, 0);
1362 }
1363 cpl = env->hflags & HF_CPL_MASK;
1364 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1365 raise_exception_err(EXCP0D_GPF, 0);
1366 }
1367 selector = (env->star >> 48) & 0xffff;
1368 if (env->hflags & HF_LMA_MASK) {
1369 if (dflag == 2) {
1370 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1371 0, 0xffffffff,
1372 DESC_G_MASK | DESC_P_MASK |
1373 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1374 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1375 DESC_L_MASK);
1376 env->eip = ECX;
1377 } else {
1378 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1379 0, 0xffffffff,
1380 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1381 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1382 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1383 env->eip = (uint32_t)ECX;
1384 }
1385 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1386 0, 0xffffffff,
1387 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1388 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1389 DESC_W_MASK | DESC_A_MASK);
1390 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1391 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1392 cpu_x86_set_cpl(env, 3);
1393 } else {
1394 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1395 0, 0xffffffff,
1396 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1397 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1398 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1399 env->eip = (uint32_t)ECX;
1400 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1401 0, 0xffffffff,
1402 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1403 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1404 DESC_W_MASK | DESC_A_MASK);
1405 env->eflags |= IF_MASK;
1406 cpu_x86_set_cpl(env, 3);
1407 }
1408#ifdef CONFIG_KQEMU
1409 if (kqemu_is_ok(env)) {
1410 if (env->hflags & HF_LMA_MASK)
1411 CC_OP = CC_OP_EFLAGS;
1412 env->exception_index = -1;
1413 cpu_loop_exit();
1414 }
1415#endif
1416}
1417#endif
1418
1419#ifdef VBOX
1420/**
1421 * Checks and processes external VMM events.
1422 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1423 */
1424void helper_external_event(void)
1425{
1426# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1427 uintptr_t uSP;
1428# ifdef RT_ARCH_AMD64
1429 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1430# else
1431 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1432# endif
1433 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1434# endif
1435 /* Keep in sync with flags checked by gen_check_external_event() */
1436 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1437 {
1438 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1439 ~CPU_INTERRUPT_EXTERNAL_HARD);
1440 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1441 }
1442 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1443 {
1444 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1445 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1446 cpu_exit(env);
1447 }
1448 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1449 {
1450 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1451 ~CPU_INTERRUPT_EXTERNAL_DMA);
1452 remR3DmaRun(env);
1453 }
1454 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1455 {
1456 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1457 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1458 remR3TimersRun(env);
1459 }
1460}
1461/* helper for recording call instruction addresses for later scanning */
1462void helper_record_call()
1463{
1464 if ( !(env->state & CPU_RAW_RING0)
1465 && (env->cr[0] & CR0_PG_MASK)
1466 && !(env->eflags & X86_EFL_IF))
1467 remR3RecordCall(env);
1468}
1469#endif /* VBOX */
1470
1471/* real mode interrupt */
1472static void do_interrupt_real(int intno, int is_int, int error_code,
1473 unsigned int next_eip)
1474{
1475 SegmentCache *dt;
1476 target_ulong ptr, ssp;
1477 int selector;
1478 uint32_t offset, esp;
1479 uint32_t old_cs, old_eip;
1480
1481 /* real mode (simpler !) */
1482 dt = &env->idt;
1483#ifndef VBOX
1484 if (intno * 4 + 3 > dt->limit)
1485#else
1486 if ((unsigned)intno * 4 + 3 > dt->limit)
1487#endif
1488 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1489 ptr = dt->base + intno * 4;
1490 offset = lduw_kernel(ptr);
1491 selector = lduw_kernel(ptr + 2);
1492 esp = ESP;
1493 ssp = env->segs[R_SS].base;
1494 if (is_int)
1495 old_eip = next_eip;
1496 else
1497 old_eip = env->eip;
1498 old_cs = env->segs[R_CS].selector;
1499 /* XXX: use SS segment size ? */
1500 PUSHW(ssp, esp, 0xffff, compute_eflags());
1501 PUSHW(ssp, esp, 0xffff, old_cs);
1502 PUSHW(ssp, esp, 0xffff, old_eip);
1503
1504 /* update processor state */
1505 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1506 env->eip = offset;
1507 env->segs[R_CS].selector = selector;
1508 env->segs[R_CS].base = (selector << 4);
1509 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1510}
1511
1512/* fake user mode interrupt */
1513void do_interrupt_user(int intno, int is_int, int error_code,
1514 target_ulong next_eip)
1515{
1516 SegmentCache *dt;
1517 target_ulong ptr;
1518 int dpl, cpl, shift;
1519 uint32_t e2;
1520
1521 dt = &env->idt;
1522 if (env->hflags & HF_LMA_MASK) {
1523 shift = 4;
1524 } else {
1525 shift = 3;
1526 }
1527 ptr = dt->base + (intno << shift);
1528 e2 = ldl_kernel(ptr + 4);
1529
1530 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1531 cpl = env->hflags & HF_CPL_MASK;
1532 /* check privilege if software int */
1533 if (is_int && dpl < cpl)
1534 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1535
1536 /* Since we emulate only user space, we cannot do more than
1537 exiting the emulation with the suitable exception and error
1538 code */
1539 if (is_int)
1540 EIP = next_eip;
1541}
1542
1543#if !defined(CONFIG_USER_ONLY)
1544static void handle_even_inj(int intno, int is_int, int error_code,
1545 int is_hw, int rm)
1546{
1547 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1548 if (!(event_inj & SVM_EVTINJ_VALID)) {
1549 int type;
1550 if (is_int)
1551 type = SVM_EVTINJ_TYPE_SOFT;
1552 else
1553 type = SVM_EVTINJ_TYPE_EXEPT;
1554 event_inj = intno | type | SVM_EVTINJ_VALID;
1555 if (!rm && exeption_has_error_code(intno)) {
1556 event_inj |= SVM_EVTINJ_VALID_ERR;
1557 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1558 }
1559 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1560 }
1561}
1562#endif
1563
1564/*
1565 * Begin execution of an interruption. is_int is TRUE if coming from
1566 * the int instruction. next_eip is the EIP value AFTER the interrupt
1567 * instruction. It is only relevant if is_int is TRUE.
1568 */
1569void do_interrupt(int intno, int is_int, int error_code,
1570 target_ulong next_eip, int is_hw)
1571{
1572 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1573 if ((env->cr[0] & CR0_PE_MASK)) {
1574 static int count;
1575 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1576 count, intno, error_code, is_int,
1577 env->hflags & HF_CPL_MASK,
1578 env->segs[R_CS].selector, EIP,
1579 (int)env->segs[R_CS].base + EIP,
1580 env->segs[R_SS].selector, ESP);
1581 if (intno == 0x0e) {
1582 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1583 } else {
1584 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1585 }
1586 qemu_log("\n");
1587 log_cpu_state(env, X86_DUMP_CCOP);
1588#if 0
1589 {
1590 int i;
1591 uint8_t *ptr;
1592 qemu_log(" code=");
1593 ptr = env->segs[R_CS].base + env->eip;
1594 for(i = 0; i < 16; i++) {
1595 qemu_log(" %02x", ldub(ptr + i));
1596 }
1597 qemu_log("\n");
1598 }
1599#endif
1600 count++;
1601 }
1602 }
1603#ifdef VBOX
1604 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1605 if (is_int) {
1606 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1607 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1608 } else {
1609 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1610 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1611 }
1612 }
1613#endif
1614 if (env->cr[0] & CR0_PE_MASK) {
1615#if !defined(CONFIG_USER_ONLY)
1616 if (env->hflags & HF_SVMI_MASK)
1617 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1618#endif
1619#ifdef TARGET_X86_64
1620 if (env->hflags & HF_LMA_MASK) {
1621 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1622 } else
1623#endif
1624 {
1625#ifdef VBOX
1626 /* int xx *, v86 code and VME enabled? */
1627 if ( (env->eflags & VM_MASK)
1628 && (env->cr[4] & CR4_VME_MASK)
1629 && is_int
1630 && !is_hw
1631 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1632 )
1633 do_soft_interrupt_vme(intno, error_code, next_eip);
1634 else
1635#endif /* VBOX */
1636 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1637 }
1638 } else {
1639#if !defined(CONFIG_USER_ONLY)
1640 if (env->hflags & HF_SVMI_MASK)
1641 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1642#endif
1643 do_interrupt_real(intno, is_int, error_code, next_eip);
1644 }
1645
1646#if !defined(CONFIG_USER_ONLY)
1647 if (env->hflags & HF_SVMI_MASK) {
1648 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1649 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1650 }
1651#endif
1652}
1653
1654/* This should come from sysemu.h - if we could include it here... */
1655void qemu_system_reset_request(void);
1656
1657/*
1658 * Check nested exceptions and change to double or triple fault if
1659 * needed. It should only be called, if this is not an interrupt.
1660 * Returns the new exception number.
1661 */
1662static int check_exception(int intno, int *error_code)
1663{
1664 int first_contributory = env->old_exception == 0 ||
1665 (env->old_exception >= 10 &&
1666 env->old_exception <= 13);
1667 int second_contributory = intno == 0 ||
1668 (intno >= 10 && intno <= 13);
1669
1670 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1671 env->old_exception, intno);
1672
1673#if !defined(CONFIG_USER_ONLY)
1674 if (env->old_exception == EXCP08_DBLE) {
1675 if (env->hflags & HF_SVMI_MASK)
1676 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1677
1678 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1679
1680# ifndef VBOX
1681 qemu_system_reset_request();
1682# else
1683 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1684# endif
1685 return EXCP_HLT;
1686 }
1687#endif
1688
1689 if ((first_contributory && second_contributory)
1690 || (env->old_exception == EXCP0E_PAGE &&
1691 (second_contributory || (intno == EXCP0E_PAGE)))) {
1692 intno = EXCP08_DBLE;
1693 *error_code = 0;
1694 }
1695
1696 if (second_contributory || (intno == EXCP0E_PAGE) ||
1697 (intno == EXCP08_DBLE))
1698 env->old_exception = intno;
1699
1700 return intno;
1701}
1702
1703/*
1704 * Signal an interruption. It is executed in the main CPU loop.
1705 * is_int is TRUE if coming from the int instruction. next_eip is the
1706 * EIP value AFTER the interrupt instruction. It is only relevant if
1707 * is_int is TRUE.
1708 */
1709static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1710 int next_eip_addend)
1711{
1712#if defined(VBOX) && defined(DEBUG)
1713 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1714#endif
1715 if (!is_int) {
1716 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1717 intno = check_exception(intno, &error_code);
1718 } else {
1719 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1720 }
1721
1722 env->exception_index = intno;
1723 env->error_code = error_code;
1724 env->exception_is_int = is_int;
1725 env->exception_next_eip = env->eip + next_eip_addend;
1726 cpu_loop_exit();
1727}
1728
1729/* shortcuts to generate exceptions */
1730
1731void raise_exception_err(int exception_index, int error_code)
1732{
1733 raise_interrupt(exception_index, 0, error_code, 0);
1734}
1735
1736void raise_exception(int exception_index)
1737{
1738 raise_interrupt(exception_index, 0, 0, 0);
1739}
1740
1741/* SMM support */
1742
1743#if defined(CONFIG_USER_ONLY)
1744
1745void do_smm_enter(void)
1746{
1747}
1748
1749void helper_rsm(void)
1750{
1751}
1752
1753#else
1754
1755#ifdef TARGET_X86_64
1756#define SMM_REVISION_ID 0x00020064
1757#else
1758#define SMM_REVISION_ID 0x00020000
1759#endif
1760
1761void do_smm_enter(void)
1762{
1763 target_ulong sm_state;
1764 SegmentCache *dt;
1765 int i, offset;
1766
1767 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1768 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1769
1770 env->hflags |= HF_SMM_MASK;
1771 cpu_smm_update(env);
1772
1773 sm_state = env->smbase + 0x8000;
1774
1775#ifdef TARGET_X86_64
1776 for(i = 0; i < 6; i++) {
1777 dt = &env->segs[i];
1778 offset = 0x7e00 + i * 16;
1779 stw_phys(sm_state + offset, dt->selector);
1780 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1781 stl_phys(sm_state + offset + 4, dt->limit);
1782 stq_phys(sm_state + offset + 8, dt->base);
1783 }
1784
1785 stq_phys(sm_state + 0x7e68, env->gdt.base);
1786 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1787
1788 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1789 stq_phys(sm_state + 0x7e78, env->ldt.base);
1790 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1791 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1792
1793 stq_phys(sm_state + 0x7e88, env->idt.base);
1794 stl_phys(sm_state + 0x7e84, env->idt.limit);
1795
1796 stw_phys(sm_state + 0x7e90, env->tr.selector);
1797 stq_phys(sm_state + 0x7e98, env->tr.base);
1798 stl_phys(sm_state + 0x7e94, env->tr.limit);
1799 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1800
1801 stq_phys(sm_state + 0x7ed0, env->efer);
1802
1803 stq_phys(sm_state + 0x7ff8, EAX);
1804 stq_phys(sm_state + 0x7ff0, ECX);
1805 stq_phys(sm_state + 0x7fe8, EDX);
1806 stq_phys(sm_state + 0x7fe0, EBX);
1807 stq_phys(sm_state + 0x7fd8, ESP);
1808 stq_phys(sm_state + 0x7fd0, EBP);
1809 stq_phys(sm_state + 0x7fc8, ESI);
1810 stq_phys(sm_state + 0x7fc0, EDI);
1811 for(i = 8; i < 16; i++)
1812 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1813 stq_phys(sm_state + 0x7f78, env->eip);
1814 stl_phys(sm_state + 0x7f70, compute_eflags());
1815 stl_phys(sm_state + 0x7f68, env->dr[6]);
1816 stl_phys(sm_state + 0x7f60, env->dr[7]);
1817
1818 stl_phys(sm_state + 0x7f48, env->cr[4]);
1819 stl_phys(sm_state + 0x7f50, env->cr[3]);
1820 stl_phys(sm_state + 0x7f58, env->cr[0]);
1821
1822 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1823 stl_phys(sm_state + 0x7f00, env->smbase);
1824#else
1825 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1826 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1827 stl_phys(sm_state + 0x7ff4, compute_eflags());
1828 stl_phys(sm_state + 0x7ff0, env->eip);
1829 stl_phys(sm_state + 0x7fec, EDI);
1830 stl_phys(sm_state + 0x7fe8, ESI);
1831 stl_phys(sm_state + 0x7fe4, EBP);
1832 stl_phys(sm_state + 0x7fe0, ESP);
1833 stl_phys(sm_state + 0x7fdc, EBX);
1834 stl_phys(sm_state + 0x7fd8, EDX);
1835 stl_phys(sm_state + 0x7fd4, ECX);
1836 stl_phys(sm_state + 0x7fd0, EAX);
1837 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1838 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1839
1840 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1841 stl_phys(sm_state + 0x7f64, env->tr.base);
1842 stl_phys(sm_state + 0x7f60, env->tr.limit);
1843 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1844
1845 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1846 stl_phys(sm_state + 0x7f80, env->ldt.base);
1847 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1848 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1849
1850 stl_phys(sm_state + 0x7f74, env->gdt.base);
1851 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1852
1853 stl_phys(sm_state + 0x7f58, env->idt.base);
1854 stl_phys(sm_state + 0x7f54, env->idt.limit);
1855
1856 for(i = 0; i < 6; i++) {
1857 dt = &env->segs[i];
1858 if (i < 3)
1859 offset = 0x7f84 + i * 12;
1860 else
1861 offset = 0x7f2c + (i - 3) * 12;
1862 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1863 stl_phys(sm_state + offset + 8, dt->base);
1864 stl_phys(sm_state + offset + 4, dt->limit);
1865 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1866 }
1867 stl_phys(sm_state + 0x7f14, env->cr[4]);
1868
1869 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1870 stl_phys(sm_state + 0x7ef8, env->smbase);
1871#endif
1872 /* init SMM cpu state */
1873
1874#ifdef TARGET_X86_64
1875 cpu_load_efer(env, 0);
1876#endif
1877 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1878 env->eip = 0x00008000;
1879 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1880 0xffffffff, 0);
1881 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1882 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1883 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1884 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1885 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1886
1887 cpu_x86_update_cr0(env,
1888 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1889 cpu_x86_update_cr4(env, 0);
1890 env->dr[7] = 0x00000400;
1891 CC_OP = CC_OP_EFLAGS;
1892}
1893
1894void helper_rsm(void)
1895{
1896#ifdef VBOX
1897 cpu_abort(env, "helper_rsm");
1898#else /* !VBOX */
1899 target_ulong sm_state;
1900 int i, offset;
1901 uint32_t val;
1902
1903 sm_state = env->smbase + 0x8000;
1904#ifdef TARGET_X86_64
1905 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1906
1907 for(i = 0; i < 6; i++) {
1908 offset = 0x7e00 + i * 16;
1909 cpu_x86_load_seg_cache(env, i,
1910 lduw_phys(sm_state + offset),
1911 ldq_phys(sm_state + offset + 8),
1912 ldl_phys(sm_state + offset + 4),
1913 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1914 }
1915
1916 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1917 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1918
1919 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1920 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1921 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1922 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1923
1924 env->idt.base = ldq_phys(sm_state + 0x7e88);
1925 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1926
1927 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1928 env->tr.base = ldq_phys(sm_state + 0x7e98);
1929 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1930 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1931
1932 EAX = ldq_phys(sm_state + 0x7ff8);
1933 ECX = ldq_phys(sm_state + 0x7ff0);
1934 EDX = ldq_phys(sm_state + 0x7fe8);
1935 EBX = ldq_phys(sm_state + 0x7fe0);
1936 ESP = ldq_phys(sm_state + 0x7fd8);
1937 EBP = ldq_phys(sm_state + 0x7fd0);
1938 ESI = ldq_phys(sm_state + 0x7fc8);
1939 EDI = ldq_phys(sm_state + 0x7fc0);
1940 for(i = 8; i < 16; i++)
1941 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1942 env->eip = ldq_phys(sm_state + 0x7f78);
1943 load_eflags(ldl_phys(sm_state + 0x7f70),
1944 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1945 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1946 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1947
1948 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1949 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1950 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1951
1952 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1953 if (val & 0x20000) {
1954 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1955 }
1956#else
1957 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1958 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1959 load_eflags(ldl_phys(sm_state + 0x7ff4),
1960 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1961 env->eip = ldl_phys(sm_state + 0x7ff0);
1962 EDI = ldl_phys(sm_state + 0x7fec);
1963 ESI = ldl_phys(sm_state + 0x7fe8);
1964 EBP = ldl_phys(sm_state + 0x7fe4);
1965 ESP = ldl_phys(sm_state + 0x7fe0);
1966 EBX = ldl_phys(sm_state + 0x7fdc);
1967 EDX = ldl_phys(sm_state + 0x7fd8);
1968 ECX = ldl_phys(sm_state + 0x7fd4);
1969 EAX = ldl_phys(sm_state + 0x7fd0);
1970 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1971 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1972
1973 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1974 env->tr.base = ldl_phys(sm_state + 0x7f64);
1975 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1976 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1977
1978 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1979 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1980 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1981 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1982
1983 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1984 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1985
1986 env->idt.base = ldl_phys(sm_state + 0x7f58);
1987 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1988
1989 for(i = 0; i < 6; i++) {
1990 if (i < 3)
1991 offset = 0x7f84 + i * 12;
1992 else
1993 offset = 0x7f2c + (i - 3) * 12;
1994 cpu_x86_load_seg_cache(env, i,
1995 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1996 ldl_phys(sm_state + offset + 8),
1997 ldl_phys(sm_state + offset + 4),
1998 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1999 }
2000 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2001
2002 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2003 if (val & 0x20000) {
2004 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2005 }
2006#endif
2007 CC_OP = CC_OP_EFLAGS;
2008 env->hflags &= ~HF_SMM_MASK;
2009 cpu_smm_update(env);
2010
2011 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2012 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2013#endif /* !VBOX */
2014}
2015
2016#endif /* !CONFIG_USER_ONLY */
2017
2018
2019/* division, flags are undefined */
2020
2021void helper_divb_AL(target_ulong t0)
2022{
2023 unsigned int num, den, q, r;
2024
2025 num = (EAX & 0xffff);
2026 den = (t0 & 0xff);
2027 if (den == 0) {
2028 raise_exception(EXCP00_DIVZ);
2029 }
2030 q = (num / den);
2031 if (q > 0xff)
2032 raise_exception(EXCP00_DIVZ);
2033 q &= 0xff;
2034 r = (num % den) & 0xff;
2035 EAX = (EAX & ~0xffff) | (r << 8) | q;
2036}
2037
2038void helper_idivb_AL(target_ulong t0)
2039{
2040 int num, den, q, r;
2041
2042 num = (int16_t)EAX;
2043 den = (int8_t)t0;
2044 if (den == 0) {
2045 raise_exception(EXCP00_DIVZ);
2046 }
2047 q = (num / den);
2048 if (q != (int8_t)q)
2049 raise_exception(EXCP00_DIVZ);
2050 q &= 0xff;
2051 r = (num % den) & 0xff;
2052 EAX = (EAX & ~0xffff) | (r << 8) | q;
2053}
2054
2055void helper_divw_AX(target_ulong t0)
2056{
2057 unsigned int num, den, q, r;
2058
2059 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2060 den = (t0 & 0xffff);
2061 if (den == 0) {
2062 raise_exception(EXCP00_DIVZ);
2063 }
2064 q = (num / den);
2065 if (q > 0xffff)
2066 raise_exception(EXCP00_DIVZ);
2067 q &= 0xffff;
2068 r = (num % den) & 0xffff;
2069 EAX = (EAX & ~0xffff) | q;
2070 EDX = (EDX & ~0xffff) | r;
2071}
2072
2073void helper_idivw_AX(target_ulong t0)
2074{
2075 int num, den, q, r;
2076
2077 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2078 den = (int16_t)t0;
2079 if (den == 0) {
2080 raise_exception(EXCP00_DIVZ);
2081 }
2082 q = (num / den);
2083 if (q != (int16_t)q)
2084 raise_exception(EXCP00_DIVZ);
2085 q &= 0xffff;
2086 r = (num % den) & 0xffff;
2087 EAX = (EAX & ~0xffff) | q;
2088 EDX = (EDX & ~0xffff) | r;
2089}
2090
2091void helper_divl_EAX(target_ulong t0)
2092{
2093 unsigned int den, r;
2094 uint64_t num, q;
2095
2096 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2097 den = t0;
2098 if (den == 0) {
2099 raise_exception(EXCP00_DIVZ);
2100 }
2101 q = (num / den);
2102 r = (num % den);
2103 if (q > 0xffffffff)
2104 raise_exception(EXCP00_DIVZ);
2105 EAX = (uint32_t)q;
2106 EDX = (uint32_t)r;
2107}
2108
2109void helper_idivl_EAX(target_ulong t0)
2110{
2111 int den, r;
2112 int64_t num, q;
2113
2114 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2115 den = t0;
2116 if (den == 0) {
2117 raise_exception(EXCP00_DIVZ);
2118 }
2119 q = (num / den);
2120 r = (num % den);
2121 if (q != (int32_t)q)
2122 raise_exception(EXCP00_DIVZ);
2123 EAX = (uint32_t)q;
2124 EDX = (uint32_t)r;
2125}
2126
2127/* bcd */
2128
2129/* XXX: exception */
2130void helper_aam(int base)
2131{
2132 int al, ah;
2133 al = EAX & 0xff;
2134 ah = al / base;
2135 al = al % base;
2136 EAX = (EAX & ~0xffff) | al | (ah << 8);
2137 CC_DST = al;
2138}
2139
2140void helper_aad(int base)
2141{
2142 int al, ah;
2143 al = EAX & 0xff;
2144 ah = (EAX >> 8) & 0xff;
2145 al = ((ah * base) + al) & 0xff;
2146 EAX = (EAX & ~0xffff) | al;
2147 CC_DST = al;
2148}
2149
2150void helper_aaa(void)
2151{
2152 int icarry;
2153 int al, ah, af;
2154 int eflags;
2155
2156 eflags = helper_cc_compute_all(CC_OP);
2157 af = eflags & CC_A;
2158 al = EAX & 0xff;
2159 ah = (EAX >> 8) & 0xff;
2160
2161 icarry = (al > 0xf9);
2162 if (((al & 0x0f) > 9 ) || af) {
2163 al = (al + 6) & 0x0f;
2164 ah = (ah + 1 + icarry) & 0xff;
2165 eflags |= CC_C | CC_A;
2166 } else {
2167 eflags &= ~(CC_C | CC_A);
2168 al &= 0x0f;
2169 }
2170 EAX = (EAX & ~0xffff) | al | (ah << 8);
2171 CC_SRC = eflags;
2172}
2173
2174void helper_aas(void)
2175{
2176 int icarry;
2177 int al, ah, af;
2178 int eflags;
2179
2180 eflags = helper_cc_compute_all(CC_OP);
2181 af = eflags & CC_A;
2182 al = EAX & 0xff;
2183 ah = (EAX >> 8) & 0xff;
2184
2185 icarry = (al < 6);
2186 if (((al & 0x0f) > 9 ) || af) {
2187 al = (al - 6) & 0x0f;
2188 ah = (ah - 1 - icarry) & 0xff;
2189 eflags |= CC_C | CC_A;
2190 } else {
2191 eflags &= ~(CC_C | CC_A);
2192 al &= 0x0f;
2193 }
2194 EAX = (EAX & ~0xffff) | al | (ah << 8);
2195 CC_SRC = eflags;
2196}
2197
2198void helper_daa(void)
2199{
2200 int al, af, cf;
2201 int eflags;
2202
2203 eflags = helper_cc_compute_all(CC_OP);
2204 cf = eflags & CC_C;
2205 af = eflags & CC_A;
2206 al = EAX & 0xff;
2207
2208 eflags = 0;
2209 if (((al & 0x0f) > 9 ) || af) {
2210 al = (al + 6) & 0xff;
2211 eflags |= CC_A;
2212 }
2213 if ((al > 0x9f) || cf) {
2214 al = (al + 0x60) & 0xff;
2215 eflags |= CC_C;
2216 }
2217 EAX = (EAX & ~0xff) | al;
2218 /* well, speed is not an issue here, so we compute the flags by hand */
2219 eflags |= (al == 0) << 6; /* zf */
2220 eflags |= parity_table[al]; /* pf */
2221 eflags |= (al & 0x80); /* sf */
2222 CC_SRC = eflags;
2223}
2224
2225void helper_das(void)
2226{
2227 int al, al1, af, cf;
2228 int eflags;
2229
2230 eflags = helper_cc_compute_all(CC_OP);
2231 cf = eflags & CC_C;
2232 af = eflags & CC_A;
2233 al = EAX & 0xff;
2234
2235 eflags = 0;
2236 al1 = al;
2237 if (((al & 0x0f) > 9 ) || af) {
2238 eflags |= CC_A;
2239 if (al < 6 || cf)
2240 eflags |= CC_C;
2241 al = (al - 6) & 0xff;
2242 }
2243 if ((al1 > 0x99) || cf) {
2244 al = (al - 0x60) & 0xff;
2245 eflags |= CC_C;
2246 }
2247 EAX = (EAX & ~0xff) | al;
2248 /* well, speed is not an issue here, so we compute the flags by hand */
2249 eflags |= (al == 0) << 6; /* zf */
2250 eflags |= parity_table[al]; /* pf */
2251 eflags |= (al & 0x80); /* sf */
2252 CC_SRC = eflags;
2253}
2254
2255void helper_into(int next_eip_addend)
2256{
2257 int eflags;
2258 eflags = helper_cc_compute_all(CC_OP);
2259 if (eflags & CC_O) {
2260 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2261 }
2262}
2263
2264void helper_cmpxchg8b(target_ulong a0)
2265{
2266 uint64_t d;
2267 int eflags;
2268
2269 eflags = helper_cc_compute_all(CC_OP);
2270 d = ldq(a0);
2271 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2272 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2273 eflags |= CC_Z;
2274 } else {
2275 /* always do the store */
2276 stq(a0, d);
2277 EDX = (uint32_t)(d >> 32);
2278 EAX = (uint32_t)d;
2279 eflags &= ~CC_Z;
2280 }
2281 CC_SRC = eflags;
2282}
2283
2284#ifdef TARGET_X86_64
2285void helper_cmpxchg16b(target_ulong a0)
2286{
2287 uint64_t d0, d1;
2288 int eflags;
2289
2290 if ((a0 & 0xf) != 0)
2291 raise_exception(EXCP0D_GPF);
2292 eflags = helper_cc_compute_all(CC_OP);
2293 d0 = ldq(a0);
2294 d1 = ldq(a0 + 8);
2295 if (d0 == EAX && d1 == EDX) {
2296 stq(a0, EBX);
2297 stq(a0 + 8, ECX);
2298 eflags |= CC_Z;
2299 } else {
2300 /* always do the store */
2301 stq(a0, d0);
2302 stq(a0 + 8, d1);
2303 EDX = d1;
2304 EAX = d0;
2305 eflags &= ~CC_Z;
2306 }
2307 CC_SRC = eflags;
2308}
2309#endif
2310
2311void helper_single_step(void)
2312{
2313#ifndef CONFIG_USER_ONLY
2314 check_hw_breakpoints(env, 1);
2315 env->dr[6] |= DR6_BS;
2316#endif
2317 raise_exception(EXCP01_DB);
2318}
2319
2320void helper_cpuid(void)
2321{
2322 uint32_t eax, ebx, ecx, edx;
2323
2324 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2325
2326 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2327 EAX = eax;
2328 EBX = ebx;
2329 ECX = ecx;
2330 EDX = edx;
2331}
2332
2333void helper_enter_level(int level, int data32, target_ulong t1)
2334{
2335 target_ulong ssp;
2336 uint32_t esp_mask, esp, ebp;
2337
2338 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2339 ssp = env->segs[R_SS].base;
2340 ebp = EBP;
2341 esp = ESP;
2342 if (data32) {
2343 /* 32 bit */
2344 esp -= 4;
2345 while (--level) {
2346 esp -= 4;
2347 ebp -= 4;
2348 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2349 }
2350 esp -= 4;
2351 stl(ssp + (esp & esp_mask), t1);
2352 } else {
2353 /* 16 bit */
2354 esp -= 2;
2355 while (--level) {
2356 esp -= 2;
2357 ebp -= 2;
2358 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2359 }
2360 esp -= 2;
2361 stw(ssp + (esp & esp_mask), t1);
2362 }
2363}
2364
2365#ifdef TARGET_X86_64
2366void helper_enter64_level(int level, int data64, target_ulong t1)
2367{
2368 target_ulong esp, ebp;
2369 ebp = EBP;
2370 esp = ESP;
2371
2372 if (data64) {
2373 /* 64 bit */
2374 esp -= 8;
2375 while (--level) {
2376 esp -= 8;
2377 ebp -= 8;
2378 stq(esp, ldq(ebp));
2379 }
2380 esp -= 8;
2381 stq(esp, t1);
2382 } else {
2383 /* 16 bit */
2384 esp -= 2;
2385 while (--level) {
2386 esp -= 2;
2387 ebp -= 2;
2388 stw(esp, lduw(ebp));
2389 }
2390 esp -= 2;
2391 stw(esp, t1);
2392 }
2393}
2394#endif
2395
2396void helper_lldt(int selector)
2397{
2398 SegmentCache *dt;
2399 uint32_t e1, e2;
2400#ifndef VBOX
2401 int index, entry_limit;
2402#else
2403 unsigned int index, entry_limit;
2404#endif
2405 target_ulong ptr;
2406
2407#ifdef VBOX
2408 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2409 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2410#endif
2411
2412 selector &= 0xffff;
2413 if ((selector & 0xfffc) == 0) {
2414 /* XXX: NULL selector case: invalid LDT */
2415 env->ldt.base = 0;
2416 env->ldt.limit = 0;
2417 } else {
2418 if (selector & 0x4)
2419 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2420 dt = &env->gdt;
2421 index = selector & ~7;
2422#ifdef TARGET_X86_64
2423 if (env->hflags & HF_LMA_MASK)
2424 entry_limit = 15;
2425 else
2426#endif
2427 entry_limit = 7;
2428 if ((index + entry_limit) > dt->limit)
2429 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2430 ptr = dt->base + index;
2431 e1 = ldl_kernel(ptr);
2432 e2 = ldl_kernel(ptr + 4);
2433 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2434 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2435 if (!(e2 & DESC_P_MASK))
2436 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2437#ifdef TARGET_X86_64
2438 if (env->hflags & HF_LMA_MASK) {
2439 uint32_t e3;
2440 e3 = ldl_kernel(ptr + 8);
2441 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2442 env->ldt.base |= (target_ulong)e3 << 32;
2443 } else
2444#endif
2445 {
2446 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2447 }
2448 }
2449 env->ldt.selector = selector;
2450#ifdef VBOX
2451 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2452 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2453#endif
2454}
2455
2456void helper_ltr(int selector)
2457{
2458 SegmentCache *dt;
2459 uint32_t e1, e2;
2460#ifndef VBOX
2461 int index, type, entry_limit;
2462#else
2463 unsigned int index;
2464 int type, entry_limit;
2465#endif
2466 target_ulong ptr;
2467
2468#ifdef VBOX
2469 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2470 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2471 env->tr.flags, (RTSEL)(selector & 0xffff)));
2472#endif
2473 selector &= 0xffff;
2474 if ((selector & 0xfffc) == 0) {
2475 /* NULL selector case: invalid TR */
2476 env->tr.base = 0;
2477 env->tr.limit = 0;
2478 env->tr.flags = 0;
2479 } else {
2480 if (selector & 0x4)
2481 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2482 dt = &env->gdt;
2483 index = selector & ~7;
2484#ifdef TARGET_X86_64
2485 if (env->hflags & HF_LMA_MASK)
2486 entry_limit = 15;
2487 else
2488#endif
2489 entry_limit = 7;
2490 if ((index + entry_limit) > dt->limit)
2491 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2492 ptr = dt->base + index;
2493 e1 = ldl_kernel(ptr);
2494 e2 = ldl_kernel(ptr + 4);
2495 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2496 if ((e2 & DESC_S_MASK) ||
2497 (type != 1 && type != 9))
2498 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2499 if (!(e2 & DESC_P_MASK))
2500 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2501#ifdef TARGET_X86_64
2502 if (env->hflags & HF_LMA_MASK) {
2503 uint32_t e3, e4;
2504 e3 = ldl_kernel(ptr + 8);
2505 e4 = ldl_kernel(ptr + 12);
2506 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2507 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2508 load_seg_cache_raw_dt(&env->tr, e1, e2);
2509 env->tr.base |= (target_ulong)e3 << 32;
2510 } else
2511#endif
2512 {
2513 load_seg_cache_raw_dt(&env->tr, e1, e2);
2514 }
2515 e2 |= DESC_TSS_BUSY_MASK;
2516 stl_kernel(ptr + 4, e2);
2517 }
2518 env->tr.selector = selector;
2519#ifdef VBOX
2520 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2521 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2522 env->tr.flags, (RTSEL)(selector & 0xffff)));
2523#endif
2524}
2525
2526/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2527void helper_load_seg(int seg_reg, int selector)
2528{
2529 uint32_t e1, e2;
2530 int cpl, dpl, rpl;
2531 SegmentCache *dt;
2532#ifndef VBOX
2533 int index;
2534#else
2535 unsigned int index;
2536#endif
2537 target_ulong ptr;
2538
2539 selector &= 0xffff;
2540 cpl = env->hflags & HF_CPL_MASK;
2541#ifdef VBOX
2542
2543 /* Trying to load a selector with CPL=1? */
2544 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2545 {
2546 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2547 selector = selector & 0xfffc;
2548 }
2549#endif /* VBOX */
2550 if ((selector & 0xfffc) == 0) {
2551 /* null selector case */
2552 if (seg_reg == R_SS
2553#ifdef TARGET_X86_64
2554 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2555#endif
2556 )
2557 raise_exception_err(EXCP0D_GPF, 0);
2558 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2559 } else {
2560
2561 if (selector & 0x4)
2562 dt = &env->ldt;
2563 else
2564 dt = &env->gdt;
2565 index = selector & ~7;
2566 if ((index + 7) > dt->limit)
2567 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2568 ptr = dt->base + index;
2569 e1 = ldl_kernel(ptr);
2570 e2 = ldl_kernel(ptr + 4);
2571
2572 if (!(e2 & DESC_S_MASK))
2573 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2574 rpl = selector & 3;
2575 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2576 if (seg_reg == R_SS) {
2577 /* must be writable segment */
2578 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2579 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2580 if (rpl != cpl || dpl != cpl)
2581 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2582 } else {
2583 /* must be readable segment */
2584 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2585 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2586
2587 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2588 /* if not conforming code, test rights */
2589 if (dpl < cpl || dpl < rpl)
2590 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2591 }
2592 }
2593
2594 if (!(e2 & DESC_P_MASK)) {
2595 if (seg_reg == R_SS)
2596 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2597 else
2598 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2599 }
2600
2601 /* set the access bit if not already set */
2602 if (!(e2 & DESC_A_MASK)) {
2603 e2 |= DESC_A_MASK;
2604 stl_kernel(ptr + 4, e2);
2605 }
2606
2607 cpu_x86_load_seg_cache(env, seg_reg, selector,
2608 get_seg_base(e1, e2),
2609 get_seg_limit(e1, e2),
2610 e2);
2611#if 0
2612 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2613 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2614#endif
2615 }
2616}
2617
2618/* protected mode jump */
2619void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2620 int next_eip_addend)
2621{
2622 int gate_cs, type;
2623 uint32_t e1, e2, cpl, dpl, rpl, limit;
2624 target_ulong next_eip;
2625
2626#ifdef VBOX /** @todo Why do we do this? */
2627 e1 = e2 = 0;
2628#endif
2629 if ((new_cs & 0xfffc) == 0)
2630 raise_exception_err(EXCP0D_GPF, 0);
2631 if (load_segment(&e1, &e2, new_cs) != 0)
2632 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2633 cpl = env->hflags & HF_CPL_MASK;
2634 if (e2 & DESC_S_MASK) {
2635 if (!(e2 & DESC_CS_MASK))
2636 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2637 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2638 if (e2 & DESC_C_MASK) {
2639 /* conforming code segment */
2640 if (dpl > cpl)
2641 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2642 } else {
2643 /* non conforming code segment */
2644 rpl = new_cs & 3;
2645 if (rpl > cpl)
2646 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2647 if (dpl != cpl)
2648 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2649 }
2650 if (!(e2 & DESC_P_MASK))
2651 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2652 limit = get_seg_limit(e1, e2);
2653 if (new_eip > limit &&
2654 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2655 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2656 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2657 get_seg_base(e1, e2), limit, e2);
2658 EIP = new_eip;
2659 } else {
2660 /* jump to call or task gate */
2661 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2662 rpl = new_cs & 3;
2663 cpl = env->hflags & HF_CPL_MASK;
2664 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2665 switch(type) {
2666 case 1: /* 286 TSS */
2667 case 9: /* 386 TSS */
2668 case 5: /* task gate */
2669 if (dpl < cpl || dpl < rpl)
2670 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2671 next_eip = env->eip + next_eip_addend;
2672 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2673 CC_OP = CC_OP_EFLAGS;
2674 break;
2675 case 4: /* 286 call gate */
2676 case 12: /* 386 call gate */
2677 if ((dpl < cpl) || (dpl < rpl))
2678 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2679 if (!(e2 & DESC_P_MASK))
2680 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2681 gate_cs = e1 >> 16;
2682 new_eip = (e1 & 0xffff);
2683 if (type == 12)
2684 new_eip |= (e2 & 0xffff0000);
2685 if (load_segment(&e1, &e2, gate_cs) != 0)
2686 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2687 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2688 /* must be code segment */
2689 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2690 (DESC_S_MASK | DESC_CS_MASK)))
2691 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2692 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2693 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2694 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2695 if (!(e2 & DESC_P_MASK))
2696#ifdef VBOX /* See page 3-514 of 253666.pdf */
2697 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2698#else
2699 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2700#endif
2701 limit = get_seg_limit(e1, e2);
2702 if (new_eip > limit)
2703 raise_exception_err(EXCP0D_GPF, 0);
2704 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2705 get_seg_base(e1, e2), limit, e2);
2706 EIP = new_eip;
2707 break;
2708 default:
2709 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2710 break;
2711 }
2712 }
2713}
2714
2715/* real mode call */
2716void helper_lcall_real(int new_cs, target_ulong new_eip1,
2717 int shift, int next_eip)
2718{
2719 int new_eip;
2720 uint32_t esp, esp_mask;
2721 target_ulong ssp;
2722
2723 new_eip = new_eip1;
2724 esp = ESP;
2725 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2726 ssp = env->segs[R_SS].base;
2727 if (shift) {
2728 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2729 PUSHL(ssp, esp, esp_mask, next_eip);
2730 } else {
2731 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2732 PUSHW(ssp, esp, esp_mask, next_eip);
2733 }
2734
2735 SET_ESP(esp, esp_mask);
2736 env->eip = new_eip;
2737 env->segs[R_CS].selector = new_cs;
2738 env->segs[R_CS].base = (new_cs << 4);
2739}
2740
2741/* protected mode call */
2742void helper_lcall_protected(int new_cs, target_ulong new_eip,
2743 int shift, int next_eip_addend)
2744{
2745 int new_stack, i;
2746 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2747 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2748 uint32_t val, limit, old_sp_mask;
2749 target_ulong ssp, old_ssp, next_eip;
2750
2751#ifdef VBOX /** @todo Why do we do this? */
2752 e1 = e2 = 0;
2753#endif
2754 next_eip = env->eip + next_eip_addend;
2755 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2756 LOG_PCALL_STATE(env);
2757 if ((new_cs & 0xfffc) == 0)
2758 raise_exception_err(EXCP0D_GPF, 0);
2759 if (load_segment(&e1, &e2, new_cs) != 0)
2760 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2761 cpl = env->hflags & HF_CPL_MASK;
2762 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2763 if (e2 & DESC_S_MASK) {
2764 if (!(e2 & DESC_CS_MASK))
2765 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2766 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2767 if (e2 & DESC_C_MASK) {
2768 /* conforming code segment */
2769 if (dpl > cpl)
2770 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2771 } else {
2772 /* non conforming code segment */
2773 rpl = new_cs & 3;
2774 if (rpl > cpl)
2775 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2776 if (dpl != cpl)
2777 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2778 }
2779 if (!(e2 & DESC_P_MASK))
2780 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2781
2782#ifdef TARGET_X86_64
2783 /* XXX: check 16/32 bit cases in long mode */
2784 if (shift == 2) {
2785 target_ulong rsp;
2786 /* 64 bit case */
2787 rsp = ESP;
2788 PUSHQ(rsp, env->segs[R_CS].selector);
2789 PUSHQ(rsp, next_eip);
2790 /* from this point, not restartable */
2791 ESP = rsp;
2792 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2793 get_seg_base(e1, e2),
2794 get_seg_limit(e1, e2), e2);
2795 EIP = new_eip;
2796 } else
2797#endif
2798 {
2799 sp = ESP;
2800 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2801 ssp = env->segs[R_SS].base;
2802 if (shift) {
2803 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2804 PUSHL(ssp, sp, sp_mask, next_eip);
2805 } else {
2806 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2807 PUSHW(ssp, sp, sp_mask, next_eip);
2808 }
2809
2810 limit = get_seg_limit(e1, e2);
2811 if (new_eip > limit)
2812 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2813 /* from this point, not restartable */
2814 SET_ESP(sp, sp_mask);
2815 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2816 get_seg_base(e1, e2), limit, e2);
2817 EIP = new_eip;
2818 }
2819 } else {
2820 /* check gate type */
2821 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2822 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2823 rpl = new_cs & 3;
2824 switch(type) {
2825 case 1: /* available 286 TSS */
2826 case 9: /* available 386 TSS */
2827 case 5: /* task gate */
2828 if (dpl < cpl || dpl < rpl)
2829 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2830 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2831 CC_OP = CC_OP_EFLAGS;
2832 return;
2833 case 4: /* 286 call gate */
2834 case 12: /* 386 call gate */
2835 break;
2836 default:
2837 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2838 break;
2839 }
2840 shift = type >> 3;
2841
2842 if (dpl < cpl || dpl < rpl)
2843 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2844 /* check valid bit */
2845 if (!(e2 & DESC_P_MASK))
2846 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2847 selector = e1 >> 16;
2848 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2849 param_count = e2 & 0x1f;
2850 if ((selector & 0xfffc) == 0)
2851 raise_exception_err(EXCP0D_GPF, 0);
2852
2853 if (load_segment(&e1, &e2, selector) != 0)
2854 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2855 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2856 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2857 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2858 if (dpl > cpl)
2859 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2860 if (!(e2 & DESC_P_MASK))
2861 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2862
2863 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2864 /* to inner privilege */
2865 get_ss_esp_from_tss(&ss, &sp, dpl);
2866 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2867 ss, sp, param_count, ESP);
2868 if ((ss & 0xfffc) == 0)
2869 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2870 if ((ss & 3) != dpl)
2871 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2872 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2873 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2874 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2875 if (ss_dpl != dpl)
2876 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2877 if (!(ss_e2 & DESC_S_MASK) ||
2878 (ss_e2 & DESC_CS_MASK) ||
2879 !(ss_e2 & DESC_W_MASK))
2880 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2881 if (!(ss_e2 & DESC_P_MASK))
2882#ifdef VBOX /* See page 3-99 of 253666.pdf */
2883 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2884#else
2885 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2886#endif
2887
2888 // push_size = ((param_count * 2) + 8) << shift;
2889
2890 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2891 old_ssp = env->segs[R_SS].base;
2892
2893 sp_mask = get_sp_mask(ss_e2);
2894 ssp = get_seg_base(ss_e1, ss_e2);
2895 if (shift) {
2896 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2897 PUSHL(ssp, sp, sp_mask, ESP);
2898 for(i = param_count - 1; i >= 0; i--) {
2899 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2900 PUSHL(ssp, sp, sp_mask, val);
2901 }
2902 } else {
2903 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2904 PUSHW(ssp, sp, sp_mask, ESP);
2905 for(i = param_count - 1; i >= 0; i--) {
2906 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2907 PUSHW(ssp, sp, sp_mask, val);
2908 }
2909 }
2910 new_stack = 1;
2911 } else {
2912 /* to same privilege */
2913 sp = ESP;
2914 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2915 ssp = env->segs[R_SS].base;
2916 // push_size = (4 << shift);
2917 new_stack = 0;
2918 }
2919
2920 if (shift) {
2921 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2922 PUSHL(ssp, sp, sp_mask, next_eip);
2923 } else {
2924 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2925 PUSHW(ssp, sp, sp_mask, next_eip);
2926 }
2927
2928 /* from this point, not restartable */
2929
2930 if (new_stack) {
2931 ss = (ss & ~3) | dpl;
2932 cpu_x86_load_seg_cache(env, R_SS, ss,
2933 ssp,
2934 get_seg_limit(ss_e1, ss_e2),
2935 ss_e2);
2936 }
2937
2938 selector = (selector & ~3) | dpl;
2939 cpu_x86_load_seg_cache(env, R_CS, selector,
2940 get_seg_base(e1, e2),
2941 get_seg_limit(e1, e2),
2942 e2);
2943 cpu_x86_set_cpl(env, dpl);
2944 SET_ESP(sp, sp_mask);
2945 EIP = offset;
2946 }
2947#ifdef CONFIG_KQEMU
2948 if (kqemu_is_ok(env)) {
2949 env->exception_index = -1;
2950 cpu_loop_exit();
2951 }
2952#endif
2953}
2954
2955/* real and vm86 mode iret */
2956void helper_iret_real(int shift)
2957{
2958 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2959 target_ulong ssp;
2960 int eflags_mask;
2961#ifdef VBOX
2962 bool fVME = false;
2963
2964 remR3TrapClear(env->pVM);
2965#endif /* VBOX */
2966
2967 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2968 sp = ESP;
2969 ssp = env->segs[R_SS].base;
2970 if (shift == 1) {
2971 /* 32 bits */
2972 POPL(ssp, sp, sp_mask, new_eip);
2973 POPL(ssp, sp, sp_mask, new_cs);
2974 new_cs &= 0xffff;
2975 POPL(ssp, sp, sp_mask, new_eflags);
2976 } else {
2977 /* 16 bits */
2978 POPW(ssp, sp, sp_mask, new_eip);
2979 POPW(ssp, sp, sp_mask, new_cs);
2980 POPW(ssp, sp, sp_mask, new_eflags);
2981 }
2982#ifdef VBOX
2983 if ( (env->eflags & VM_MASK)
2984 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
2985 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
2986 {
2987 fVME = true;
2988 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
2989 /* if TF will be set -> #GP */
2990 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
2991 || (new_eflags & TF_MASK))
2992 raise_exception(EXCP0D_GPF);
2993 }
2994#endif /* VBOX */
2995 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2996 env->segs[R_CS].selector = new_cs;
2997 env->segs[R_CS].base = (new_cs << 4);
2998 env->eip = new_eip;
2999#ifdef VBOX
3000 if (fVME)
3001 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3002 else
3003#endif
3004 if (env->eflags & VM_MASK)
3005 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3006 else
3007 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3008 if (shift == 0)
3009 eflags_mask &= 0xffff;
3010 load_eflags(new_eflags, eflags_mask);
3011 env->hflags2 &= ~HF2_NMI_MASK;
3012#ifdef VBOX
3013 if (fVME)
3014 {
3015 if (new_eflags & IF_MASK)
3016 env->eflags |= VIF_MASK;
3017 else
3018 env->eflags &= ~VIF_MASK;
3019 }
3020#endif /* VBOX */
3021}
3022
3023static inline void validate_seg(int seg_reg, int cpl)
3024{
3025 int dpl;
3026 uint32_t e2;
3027
3028 /* XXX: on x86_64, we do not want to nullify FS and GS because
3029 they may still contain a valid base. I would be interested to
3030 know how a real x86_64 CPU behaves */
3031 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3032 (env->segs[seg_reg].selector & 0xfffc) == 0)
3033 return;
3034
3035 e2 = env->segs[seg_reg].flags;
3036 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3037 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3038 /* data or non conforming code segment */
3039 if (dpl < cpl) {
3040 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3041 }
3042 }
3043}
3044
3045/* protected mode iret */
3046static inline void helper_ret_protected(int shift, int is_iret, int addend)
3047{
3048 uint32_t new_cs, new_eflags, new_ss;
3049 uint32_t new_es, new_ds, new_fs, new_gs;
3050 uint32_t e1, e2, ss_e1, ss_e2;
3051 int cpl, dpl, rpl, eflags_mask, iopl;
3052 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3053
3054#ifdef VBOX /** @todo Why do we do this? */
3055 ss_e1 = ss_e2 = e1 = e2 = 0;
3056#endif
3057
3058#ifdef TARGET_X86_64
3059 if (shift == 2)
3060 sp_mask = -1;
3061 else
3062#endif
3063 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3064 sp = ESP;
3065 ssp = env->segs[R_SS].base;
3066 new_eflags = 0; /* avoid warning */
3067#ifdef TARGET_X86_64
3068 if (shift == 2) {
3069 POPQ(sp, new_eip);
3070 POPQ(sp, new_cs);
3071 new_cs &= 0xffff;
3072 if (is_iret) {
3073 POPQ(sp, new_eflags);
3074 }
3075 } else
3076#endif
3077 if (shift == 1) {
3078 /* 32 bits */
3079 POPL(ssp, sp, sp_mask, new_eip);
3080 POPL(ssp, sp, sp_mask, new_cs);
3081 new_cs &= 0xffff;
3082 if (is_iret) {
3083 POPL(ssp, sp, sp_mask, new_eflags);
3084#if defined(VBOX) && defined(DEBUG)
3085 printf("iret: new CS %04X\n", new_cs);
3086 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3087 printf("iret: new EFLAGS %08X\n", new_eflags);
3088 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3089#endif
3090 if (new_eflags & VM_MASK)
3091 goto return_to_vm86;
3092 }
3093#ifdef VBOX
3094 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3095 {
3096#ifdef DEBUG
3097 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3098#endif
3099 new_cs = new_cs & 0xfffc;
3100 }
3101#endif
3102 } else {
3103 /* 16 bits */
3104 POPW(ssp, sp, sp_mask, new_eip);
3105 POPW(ssp, sp, sp_mask, new_cs);
3106 if (is_iret)
3107 POPW(ssp, sp, sp_mask, new_eflags);
3108 }
3109 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3110 new_cs, new_eip, shift, addend);
3111 LOG_PCALL_STATE(env);
3112 if ((new_cs & 0xfffc) == 0)
3113 {
3114#if defined(VBOX) && defined(DEBUG)
3115 printf("new_cs & 0xfffc) == 0\n");
3116#endif
3117 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3118 }
3119 if (load_segment(&e1, &e2, new_cs) != 0)
3120 {
3121#if defined(VBOX) && defined(DEBUG)
3122 printf("load_segment failed\n");
3123#endif
3124 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3125 }
3126 if (!(e2 & DESC_S_MASK) ||
3127 !(e2 & DESC_CS_MASK))
3128 {
3129#if defined(VBOX) && defined(DEBUG)
3130 printf("e2 mask %08x\n", e2);
3131#endif
3132 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3133 }
3134 cpl = env->hflags & HF_CPL_MASK;
3135 rpl = new_cs & 3;
3136 if (rpl < cpl)
3137 {
3138#if defined(VBOX) && defined(DEBUG)
3139 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3140#endif
3141 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3142 }
3143 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3144 if (e2 & DESC_C_MASK) {
3145 if (dpl > rpl)
3146 {
3147#if defined(VBOX) && defined(DEBUG)
3148 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3149#endif
3150 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3151 }
3152 } else {
3153 if (dpl != rpl)
3154 {
3155#if defined(VBOX) && defined(DEBUG)
3156 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3157#endif
3158 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3159 }
3160 }
3161 if (!(e2 & DESC_P_MASK))
3162 {
3163#if defined(VBOX) && defined(DEBUG)
3164 printf("DESC_P_MASK e2=%08x\n", e2);
3165#endif
3166 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3167 }
3168
3169 sp += addend;
3170 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3171 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3172 /* return to same privilege level */
3173 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3174 get_seg_base(e1, e2),
3175 get_seg_limit(e1, e2),
3176 e2);
3177 } else {
3178 /* return to different privilege level */
3179#ifdef TARGET_X86_64
3180 if (shift == 2) {
3181 POPQ(sp, new_esp);
3182 POPQ(sp, new_ss);
3183 new_ss &= 0xffff;
3184 } else
3185#endif
3186 if (shift == 1) {
3187 /* 32 bits */
3188 POPL(ssp, sp, sp_mask, new_esp);
3189 POPL(ssp, sp, sp_mask, new_ss);
3190 new_ss &= 0xffff;
3191 } else {
3192 /* 16 bits */
3193 POPW(ssp, sp, sp_mask, new_esp);
3194 POPW(ssp, sp, sp_mask, new_ss);
3195 }
3196 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3197 new_ss, new_esp);
3198 if ((new_ss & 0xfffc) == 0) {
3199#ifdef TARGET_X86_64
3200 /* NULL ss is allowed in long mode if cpl != 3*/
3201 /* XXX: test CS64 ? */
3202 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3203 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3204 0, 0xffffffff,
3205 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3206 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3207 DESC_W_MASK | DESC_A_MASK);
3208 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3209 } else
3210#endif
3211 {
3212 raise_exception_err(EXCP0D_GPF, 0);
3213 }
3214 } else {
3215 if ((new_ss & 3) != rpl)
3216 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3217 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3218 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3219 if (!(ss_e2 & DESC_S_MASK) ||
3220 (ss_e2 & DESC_CS_MASK) ||
3221 !(ss_e2 & DESC_W_MASK))
3222 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3223 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3224 if (dpl != rpl)
3225 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3226 if (!(ss_e2 & DESC_P_MASK))
3227 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3228 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3229 get_seg_base(ss_e1, ss_e2),
3230 get_seg_limit(ss_e1, ss_e2),
3231 ss_e2);
3232 }
3233
3234 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3235 get_seg_base(e1, e2),
3236 get_seg_limit(e1, e2),
3237 e2);
3238 cpu_x86_set_cpl(env, rpl);
3239 sp = new_esp;
3240#ifdef TARGET_X86_64
3241 if (env->hflags & HF_CS64_MASK)
3242 sp_mask = -1;
3243 else
3244#endif
3245 sp_mask = get_sp_mask(ss_e2);
3246
3247 /* validate data segments */
3248 validate_seg(R_ES, rpl);
3249 validate_seg(R_DS, rpl);
3250 validate_seg(R_FS, rpl);
3251 validate_seg(R_GS, rpl);
3252
3253 sp += addend;
3254 }
3255 SET_ESP(sp, sp_mask);
3256 env->eip = new_eip;
3257 if (is_iret) {
3258 /* NOTE: 'cpl' is the _old_ CPL */
3259 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3260 if (cpl == 0)
3261#ifdef VBOX
3262 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3263#else
3264 eflags_mask |= IOPL_MASK;
3265#endif
3266 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3267 if (cpl <= iopl)
3268 eflags_mask |= IF_MASK;
3269 if (shift == 0)
3270 eflags_mask &= 0xffff;
3271 load_eflags(new_eflags, eflags_mask);
3272 }
3273 return;
3274
3275 return_to_vm86:
3276 POPL(ssp, sp, sp_mask, new_esp);
3277 POPL(ssp, sp, sp_mask, new_ss);
3278 POPL(ssp, sp, sp_mask, new_es);
3279 POPL(ssp, sp, sp_mask, new_ds);
3280 POPL(ssp, sp, sp_mask, new_fs);
3281 POPL(ssp, sp, sp_mask, new_gs);
3282
3283 /* modify processor state */
3284 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3285 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3286 load_seg_vm(R_CS, new_cs & 0xffff);
3287 cpu_x86_set_cpl(env, 3);
3288 load_seg_vm(R_SS, new_ss & 0xffff);
3289 load_seg_vm(R_ES, new_es & 0xffff);
3290 load_seg_vm(R_DS, new_ds & 0xffff);
3291 load_seg_vm(R_FS, new_fs & 0xffff);
3292 load_seg_vm(R_GS, new_gs & 0xffff);
3293
3294 env->eip = new_eip & 0xffff;
3295 ESP = new_esp;
3296}
3297
3298void helper_iret_protected(int shift, int next_eip)
3299{
3300 int tss_selector, type;
3301 uint32_t e1, e2;
3302
3303#ifdef VBOX
3304 e1 = e2 = 0; /** @todo Why do we do this? */
3305 remR3TrapClear(env->pVM);
3306#endif
3307
3308 /* specific case for TSS */
3309 if (env->eflags & NT_MASK) {
3310#ifdef TARGET_X86_64
3311 if (env->hflags & HF_LMA_MASK)
3312 raise_exception_err(EXCP0D_GPF, 0);
3313#endif
3314 tss_selector = lduw_kernel(env->tr.base + 0);
3315 if (tss_selector & 4)
3316 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3317 if (load_segment(&e1, &e2, tss_selector) != 0)
3318 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3319 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3320 /* NOTE: we check both segment and busy TSS */
3321 if (type != 3)
3322 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3323 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3324 } else {
3325 helper_ret_protected(shift, 1, 0);
3326 }
3327 env->hflags2 &= ~HF2_NMI_MASK;
3328#ifdef CONFIG_KQEMU
3329 if (kqemu_is_ok(env)) {
3330 CC_OP = CC_OP_EFLAGS;
3331 env->exception_index = -1;
3332 cpu_loop_exit();
3333 }
3334#endif
3335}
3336
3337void helper_lret_protected(int shift, int addend)
3338{
3339 helper_ret_protected(shift, 0, addend);
3340#ifdef CONFIG_KQEMU
3341 if (kqemu_is_ok(env)) {
3342 env->exception_index = -1;
3343 cpu_loop_exit();
3344 }
3345#endif
3346}
3347
3348void helper_sysenter(void)
3349{
3350 if (env->sysenter_cs == 0) {
3351 raise_exception_err(EXCP0D_GPF, 0);
3352 }
3353 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3354 cpu_x86_set_cpl(env, 0);
3355
3356#ifdef TARGET_X86_64
3357 if (env->hflags & HF_LMA_MASK) {
3358 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3359 0, 0xffffffff,
3360 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3361 DESC_S_MASK |
3362 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3363 } else
3364#endif
3365 {
3366 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3367 0, 0xffffffff,
3368 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3369 DESC_S_MASK |
3370 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3371 }
3372 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3373 0, 0xffffffff,
3374 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3375 DESC_S_MASK |
3376 DESC_W_MASK | DESC_A_MASK);
3377 ESP = env->sysenter_esp;
3378 EIP = env->sysenter_eip;
3379}
3380
3381void helper_sysexit(int dflag)
3382{
3383 int cpl;
3384
3385 cpl = env->hflags & HF_CPL_MASK;
3386 if (env->sysenter_cs == 0 || cpl != 0) {
3387 raise_exception_err(EXCP0D_GPF, 0);
3388 }
3389 cpu_x86_set_cpl(env, 3);
3390#ifdef TARGET_X86_64
3391 if (dflag == 2) {
3392 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3393 0, 0xffffffff,
3394 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3395 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3396 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3397 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3398 0, 0xffffffff,
3399 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3400 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3401 DESC_W_MASK | DESC_A_MASK);
3402 } else
3403#endif
3404 {
3405 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3406 0, 0xffffffff,
3407 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3408 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3409 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3410 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3411 0, 0xffffffff,
3412 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3413 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3414 DESC_W_MASK | DESC_A_MASK);
3415 }
3416 ESP = ECX;
3417 EIP = EDX;
3418#ifdef CONFIG_KQEMU
3419 if (kqemu_is_ok(env)) {
3420 env->exception_index = -1;
3421 cpu_loop_exit();
3422 }
3423#endif
3424}
3425
3426#if defined(CONFIG_USER_ONLY)
3427target_ulong helper_read_crN(int reg)
3428{
3429 return 0;
3430}
3431
3432void helper_write_crN(int reg, target_ulong t0)
3433{
3434}
3435
3436void helper_movl_drN_T0(int reg, target_ulong t0)
3437{
3438}
3439#else
3440target_ulong helper_read_crN(int reg)
3441{
3442 target_ulong val;
3443
3444 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3445 switch(reg) {
3446 default:
3447 val = env->cr[reg];
3448 break;
3449 case 8:
3450 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3451 val = cpu_get_apic_tpr(env);
3452 } else {
3453 val = env->v_tpr;
3454 }
3455 break;
3456 }
3457 return val;
3458}
3459
3460void helper_write_crN(int reg, target_ulong t0)
3461{
3462 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3463 switch(reg) {
3464 case 0:
3465 cpu_x86_update_cr0(env, t0);
3466 break;
3467 case 3:
3468 cpu_x86_update_cr3(env, t0);
3469 break;
3470 case 4:
3471 cpu_x86_update_cr4(env, t0);
3472 break;
3473 case 8:
3474 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3475 cpu_set_apic_tpr(env, t0);
3476 }
3477 env->v_tpr = t0 & 0x0f;
3478 break;
3479 default:
3480 env->cr[reg] = t0;
3481 break;
3482 }
3483}
3484
3485void helper_movl_drN_T0(int reg, target_ulong t0)
3486{
3487 int i;
3488
3489 if (reg < 4) {
3490 hw_breakpoint_remove(env, reg);
3491 env->dr[reg] = t0;
3492 hw_breakpoint_insert(env, reg);
3493 } else if (reg == 7) {
3494 for (i = 0; i < 4; i++)
3495 hw_breakpoint_remove(env, i);
3496 env->dr[7] = t0;
3497 for (i = 0; i < 4; i++)
3498 hw_breakpoint_insert(env, i);
3499 } else
3500 env->dr[reg] = t0;
3501}
3502#endif
3503
3504void helper_lmsw(target_ulong t0)
3505{
3506 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3507 if already set to one. */
3508 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3509 helper_write_crN(0, t0);
3510}
3511
3512void helper_clts(void)
3513{
3514 env->cr[0] &= ~CR0_TS_MASK;
3515 env->hflags &= ~HF_TS_MASK;
3516}
3517
3518void helper_invlpg(target_ulong addr)
3519{
3520 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3521 tlb_flush_page(env, addr);
3522}
3523
3524void helper_rdtsc(void)
3525{
3526 uint64_t val;
3527
3528 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3529 raise_exception(EXCP0D_GPF);
3530 }
3531 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3532
3533 val = cpu_get_tsc(env) + env->tsc_offset;
3534 EAX = (uint32_t)(val);
3535 EDX = (uint32_t)(val >> 32);
3536}
3537
3538#ifdef VBOX
3539void helper_rdtscp(void)
3540{
3541 uint64_t val;
3542 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3543 raise_exception(EXCP0D_GPF);
3544 }
3545
3546 val = cpu_get_tsc(env);
3547 EAX = (uint32_t)(val);
3548 EDX = (uint32_t)(val >> 32);
3549 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3550 ECX = (uint32_t)(val);
3551 else
3552 ECX = 0;
3553}
3554#endif /* VBOX */
3555
3556void helper_rdpmc(void)
3557{
3558#ifdef VBOX
3559 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3560 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3561 raise_exception(EXCP0D_GPF);
3562 }
3563 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3564 EAX = 0;
3565 EDX = 0;
3566#else /* !VBOX */
3567 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3568 raise_exception(EXCP0D_GPF);
3569 }
3570 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3571
3572 /* currently unimplemented */
3573 raise_exception_err(EXCP06_ILLOP, 0);
3574#endif /* !VBOX */
3575}
3576
3577#if defined(CONFIG_USER_ONLY)
3578void helper_wrmsr(void)
3579{
3580}
3581
3582void helper_rdmsr(void)
3583{
3584}
3585#else
3586void helper_wrmsr(void)
3587{
3588 uint64_t val;
3589
3590 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3591
3592 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3593
3594 switch((uint32_t)ECX) {
3595 case MSR_IA32_SYSENTER_CS:
3596 env->sysenter_cs = val & 0xffff;
3597 break;
3598 case MSR_IA32_SYSENTER_ESP:
3599 env->sysenter_esp = val;
3600 break;
3601 case MSR_IA32_SYSENTER_EIP:
3602 env->sysenter_eip = val;
3603 break;
3604 case MSR_IA32_APICBASE:
3605# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3606 cpu_set_apic_base(env, val);
3607# endif
3608 break;
3609 case MSR_EFER:
3610 {
3611 uint64_t update_mask;
3612 update_mask = 0;
3613 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3614 update_mask |= MSR_EFER_SCE;
3615 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3616 update_mask |= MSR_EFER_LME;
3617 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3618 update_mask |= MSR_EFER_FFXSR;
3619 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3620 update_mask |= MSR_EFER_NXE;
3621 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3622 update_mask |= MSR_EFER_SVME;
3623 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3624 update_mask |= MSR_EFER_FFXSR;
3625 cpu_load_efer(env, (env->efer & ~update_mask) |
3626 (val & update_mask));
3627 }
3628 break;
3629 case MSR_STAR:
3630 env->star = val;
3631 break;
3632 case MSR_PAT:
3633 env->pat = val;
3634 break;
3635 case MSR_VM_HSAVE_PA:
3636 env->vm_hsave = val;
3637 break;
3638#ifdef TARGET_X86_64
3639 case MSR_LSTAR:
3640 env->lstar = val;
3641 break;
3642 case MSR_CSTAR:
3643 env->cstar = val;
3644 break;
3645 case MSR_FMASK:
3646 env->fmask = val;
3647 break;
3648 case MSR_FSBASE:
3649 env->segs[R_FS].base = val;
3650 break;
3651 case MSR_GSBASE:
3652 env->segs[R_GS].base = val;
3653 break;
3654 case MSR_KERNELGSBASE:
3655 env->kernelgsbase = val;
3656 break;
3657#endif
3658# ifndef VBOX
3659 case MSR_MTRRphysBase(0):
3660 case MSR_MTRRphysBase(1):
3661 case MSR_MTRRphysBase(2):
3662 case MSR_MTRRphysBase(3):
3663 case MSR_MTRRphysBase(4):
3664 case MSR_MTRRphysBase(5):
3665 case MSR_MTRRphysBase(6):
3666 case MSR_MTRRphysBase(7):
3667 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3668 break;
3669 case MSR_MTRRphysMask(0):
3670 case MSR_MTRRphysMask(1):
3671 case MSR_MTRRphysMask(2):
3672 case MSR_MTRRphysMask(3):
3673 case MSR_MTRRphysMask(4):
3674 case MSR_MTRRphysMask(5):
3675 case MSR_MTRRphysMask(6):
3676 case MSR_MTRRphysMask(7):
3677 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3678 break;
3679 case MSR_MTRRfix64K_00000:
3680 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3681 break;
3682 case MSR_MTRRfix16K_80000:
3683 case MSR_MTRRfix16K_A0000:
3684 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3685 break;
3686 case MSR_MTRRfix4K_C0000:
3687 case MSR_MTRRfix4K_C8000:
3688 case MSR_MTRRfix4K_D0000:
3689 case MSR_MTRRfix4K_D8000:
3690 case MSR_MTRRfix4K_E0000:
3691 case MSR_MTRRfix4K_E8000:
3692 case MSR_MTRRfix4K_F0000:
3693 case MSR_MTRRfix4K_F8000:
3694 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3695 break;
3696 case MSR_MTRRdefType:
3697 env->mtrr_deftype = val;
3698 break;
3699 case MSR_MCG_STATUS:
3700 env->mcg_status = val;
3701 break;
3702 case MSR_MCG_CTL:
3703 if ((env->mcg_cap & MCG_CTL_P)
3704 && (val == 0 || val == ~(uint64_t)0))
3705 env->mcg_ctl = val;
3706 break;
3707# endif /* !VBOX */
3708 default:
3709# ifndef VBOX
3710 if ((uint32_t)ECX >= MSR_MC0_CTL
3711 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3712 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3713 if ((offset & 0x3) != 0
3714 || (val == 0 || val == ~(uint64_t)0))
3715 env->mce_banks[offset] = val;
3716 break;
3717 }
3718 /* XXX: exception ? */
3719# endif
3720 break;
3721 }
3722
3723# ifdef VBOX
3724 /* call CPUM. */
3725 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3726 {
3727 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3728 }
3729# endif
3730}
3731
3732void helper_rdmsr(void)
3733{
3734 uint64_t val;
3735
3736 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3737
3738 switch((uint32_t)ECX) {
3739 case MSR_IA32_SYSENTER_CS:
3740 val = env->sysenter_cs;
3741 break;
3742 case MSR_IA32_SYSENTER_ESP:
3743 val = env->sysenter_esp;
3744 break;
3745 case MSR_IA32_SYSENTER_EIP:
3746 val = env->sysenter_eip;
3747 break;
3748 case MSR_IA32_APICBASE:
3749 val = cpu_get_apic_base(env);
3750 break;
3751 case MSR_EFER:
3752 val = env->efer;
3753 break;
3754 case MSR_STAR:
3755 val = env->star;
3756 break;
3757 case MSR_PAT:
3758 val = env->pat;
3759 break;
3760 case MSR_VM_HSAVE_PA:
3761 val = env->vm_hsave;
3762 break;
3763# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3764 case MSR_IA32_PERF_STATUS:
3765 /* tsc_increment_by_tick */
3766 val = 1000ULL;
3767 /* CPU multiplier */
3768 val |= (((uint64_t)4ULL) << 40);
3769 break;
3770# endif /* !VBOX */
3771#ifdef TARGET_X86_64
3772 case MSR_LSTAR:
3773 val = env->lstar;
3774 break;
3775 case MSR_CSTAR:
3776 val = env->cstar;
3777 break;
3778 case MSR_FMASK:
3779 val = env->fmask;
3780 break;
3781 case MSR_FSBASE:
3782 val = env->segs[R_FS].base;
3783 break;
3784 case MSR_GSBASE:
3785 val = env->segs[R_GS].base;
3786 break;
3787 case MSR_KERNELGSBASE:
3788 val = env->kernelgsbase;
3789 break;
3790#endif
3791#ifdef CONFIG_KQEMU
3792 case MSR_QPI_COMMBASE:
3793 if (env->kqemu_enabled) {
3794 val = kqemu_comm_base;
3795 } else {
3796 val = 0;
3797 }
3798 break;
3799#endif
3800# ifndef VBOX
3801 case MSR_MTRRphysBase(0):
3802 case MSR_MTRRphysBase(1):
3803 case MSR_MTRRphysBase(2):
3804 case MSR_MTRRphysBase(3):
3805 case MSR_MTRRphysBase(4):
3806 case MSR_MTRRphysBase(5):
3807 case MSR_MTRRphysBase(6):
3808 case MSR_MTRRphysBase(7):
3809 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3810 break;
3811 case MSR_MTRRphysMask(0):
3812 case MSR_MTRRphysMask(1):
3813 case MSR_MTRRphysMask(2):
3814 case MSR_MTRRphysMask(3):
3815 case MSR_MTRRphysMask(4):
3816 case MSR_MTRRphysMask(5):
3817 case MSR_MTRRphysMask(6):
3818 case MSR_MTRRphysMask(7):
3819 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3820 break;
3821 case MSR_MTRRfix64K_00000:
3822 val = env->mtrr_fixed[0];
3823 break;
3824 case MSR_MTRRfix16K_80000:
3825 case MSR_MTRRfix16K_A0000:
3826 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3827 break;
3828 case MSR_MTRRfix4K_C0000:
3829 case MSR_MTRRfix4K_C8000:
3830 case MSR_MTRRfix4K_D0000:
3831 case MSR_MTRRfix4K_D8000:
3832 case MSR_MTRRfix4K_E0000:
3833 case MSR_MTRRfix4K_E8000:
3834 case MSR_MTRRfix4K_F0000:
3835 case MSR_MTRRfix4K_F8000:
3836 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3837 break;
3838 case MSR_MTRRdefType:
3839 val = env->mtrr_deftype;
3840 break;
3841 case MSR_MTRRcap:
3842 if (env->cpuid_features & CPUID_MTRR)
3843 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3844 else
3845 /* XXX: exception ? */
3846 val = 0;
3847 break;
3848 case MSR_MCG_CAP:
3849 val = env->mcg_cap;
3850 break;
3851 case MSR_MCG_CTL:
3852 if (env->mcg_cap & MCG_CTL_P)
3853 val = env->mcg_ctl;
3854 else
3855 val = 0;
3856 break;
3857 case MSR_MCG_STATUS:
3858 val = env->mcg_status;
3859 break;
3860# endif /* !VBOX */
3861 default:
3862# ifndef VBOX
3863 if ((uint32_t)ECX >= MSR_MC0_CTL
3864 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3865 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3866 val = env->mce_banks[offset];
3867 break;
3868 }
3869 /* XXX: exception ? */
3870 val = 0;
3871# else /* VBOX */
3872 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3873 {
3874 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3875 val = 0;
3876 }
3877# endif /* VBOX */
3878 break;
3879 }
3880 EAX = (uint32_t)(val);
3881 EDX = (uint32_t)(val >> 32);
3882
3883# ifdef VBOX_STRICT
3884 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3885 val = 0;
3886 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3887# endif
3888}
3889#endif
3890
3891target_ulong helper_lsl(target_ulong selector1)
3892{
3893 unsigned int limit;
3894 uint32_t e1, e2, eflags, selector;
3895 int rpl, dpl, cpl, type;
3896
3897 selector = selector1 & 0xffff;
3898 eflags = helper_cc_compute_all(CC_OP);
3899 if ((selector & 0xfffc) == 0)
3900 goto fail;
3901 if (load_segment(&e1, &e2, selector) != 0)
3902 goto fail;
3903 rpl = selector & 3;
3904 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3905 cpl = env->hflags & HF_CPL_MASK;
3906 if (e2 & DESC_S_MASK) {
3907 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3908 /* conforming */
3909 } else {
3910 if (dpl < cpl || dpl < rpl)
3911 goto fail;
3912 }
3913 } else {
3914 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3915 switch(type) {
3916 case 1:
3917 case 2:
3918 case 3:
3919 case 9:
3920 case 11:
3921 break;
3922 default:
3923 goto fail;
3924 }
3925 if (dpl < cpl || dpl < rpl) {
3926 fail:
3927 CC_SRC = eflags & ~CC_Z;
3928 return 0;
3929 }
3930 }
3931 limit = get_seg_limit(e1, e2);
3932 CC_SRC = eflags | CC_Z;
3933 return limit;
3934}
3935
3936target_ulong helper_lar(target_ulong selector1)
3937{
3938 uint32_t e1, e2, eflags, selector;
3939 int rpl, dpl, cpl, type;
3940
3941 selector = selector1 & 0xffff;
3942 eflags = helper_cc_compute_all(CC_OP);
3943 if ((selector & 0xfffc) == 0)
3944 goto fail;
3945 if (load_segment(&e1, &e2, selector) != 0)
3946 goto fail;
3947 rpl = selector & 3;
3948 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3949 cpl = env->hflags & HF_CPL_MASK;
3950 if (e2 & DESC_S_MASK) {
3951 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3952 /* conforming */
3953 } else {
3954 if (dpl < cpl || dpl < rpl)
3955 goto fail;
3956 }
3957 } else {
3958 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3959 switch(type) {
3960 case 1:
3961 case 2:
3962 case 3:
3963 case 4:
3964 case 5:
3965 case 9:
3966 case 11:
3967 case 12:
3968 break;
3969 default:
3970 goto fail;
3971 }
3972 if (dpl < cpl || dpl < rpl) {
3973 fail:
3974 CC_SRC = eflags & ~CC_Z;
3975 return 0;
3976 }
3977 }
3978 CC_SRC = eflags | CC_Z;
3979 return e2 & 0x00f0ff00;
3980}
3981
3982void helper_verr(target_ulong selector1)
3983{
3984 uint32_t e1, e2, eflags, selector;
3985 int rpl, dpl, cpl;
3986
3987 selector = selector1 & 0xffff;
3988 eflags = helper_cc_compute_all(CC_OP);
3989 if ((selector & 0xfffc) == 0)
3990 goto fail;
3991 if (load_segment(&e1, &e2, selector) != 0)
3992 goto fail;
3993 if (!(e2 & DESC_S_MASK))
3994 goto fail;
3995 rpl = selector & 3;
3996 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3997 cpl = env->hflags & HF_CPL_MASK;
3998 if (e2 & DESC_CS_MASK) {
3999 if (!(e2 & DESC_R_MASK))
4000 goto fail;
4001 if (!(e2 & DESC_C_MASK)) {
4002 if (dpl < cpl || dpl < rpl)
4003 goto fail;
4004 }
4005 } else {
4006 if (dpl < cpl || dpl < rpl) {
4007 fail:
4008 CC_SRC = eflags & ~CC_Z;
4009 return;
4010 }
4011 }
4012 CC_SRC = eflags | CC_Z;
4013}
4014
4015void helper_verw(target_ulong selector1)
4016{
4017 uint32_t e1, e2, eflags, selector;
4018 int rpl, dpl, cpl;
4019
4020 selector = selector1 & 0xffff;
4021 eflags = helper_cc_compute_all(CC_OP);
4022 if ((selector & 0xfffc) == 0)
4023 goto fail;
4024 if (load_segment(&e1, &e2, selector) != 0)
4025 goto fail;
4026 if (!(e2 & DESC_S_MASK))
4027 goto fail;
4028 rpl = selector & 3;
4029 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4030 cpl = env->hflags & HF_CPL_MASK;
4031 if (e2 & DESC_CS_MASK) {
4032 goto fail;
4033 } else {
4034 if (dpl < cpl || dpl < rpl)
4035 goto fail;
4036 if (!(e2 & DESC_W_MASK)) {
4037 fail:
4038 CC_SRC = eflags & ~CC_Z;
4039 return;
4040 }
4041 }
4042 CC_SRC = eflags | CC_Z;
4043}
4044
4045/* x87 FPU helpers */
4046
4047static void fpu_set_exception(int mask)
4048{
4049 env->fpus |= mask;
4050 if (env->fpus & (~env->fpuc & FPUC_EM))
4051 env->fpus |= FPUS_SE | FPUS_B;
4052}
4053
4054static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4055{
4056 if (b == 0.0)
4057 fpu_set_exception(FPUS_ZE);
4058 return a / b;
4059}
4060
4061static void fpu_raise_exception(void)
4062{
4063 if (env->cr[0] & CR0_NE_MASK) {
4064 raise_exception(EXCP10_COPR);
4065 }
4066#if !defined(CONFIG_USER_ONLY)
4067 else {
4068 cpu_set_ferr(env);
4069 }
4070#endif
4071}
4072
4073void helper_flds_FT0(uint32_t val)
4074{
4075 union {
4076 float32 f;
4077 uint32_t i;
4078 } u;
4079 u.i = val;
4080 FT0 = float32_to_floatx(u.f, &env->fp_status);
4081}
4082
4083void helper_fldl_FT0(uint64_t val)
4084{
4085 union {
4086 float64 f;
4087 uint64_t i;
4088 } u;
4089 u.i = val;
4090 FT0 = float64_to_floatx(u.f, &env->fp_status);
4091}
4092
4093void helper_fildl_FT0(int32_t val)
4094{
4095 FT0 = int32_to_floatx(val, &env->fp_status);
4096}
4097
4098void helper_flds_ST0(uint32_t val)
4099{
4100 int new_fpstt;
4101 union {
4102 float32 f;
4103 uint32_t i;
4104 } u;
4105 new_fpstt = (env->fpstt - 1) & 7;
4106 u.i = val;
4107 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4108 env->fpstt = new_fpstt;
4109 env->fptags[new_fpstt] = 0; /* validate stack entry */
4110}
4111
4112void helper_fldl_ST0(uint64_t val)
4113{
4114 int new_fpstt;
4115 union {
4116 float64 f;
4117 uint64_t i;
4118 } u;
4119 new_fpstt = (env->fpstt - 1) & 7;
4120 u.i = val;
4121 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4122 env->fpstt = new_fpstt;
4123 env->fptags[new_fpstt] = 0; /* validate stack entry */
4124}
4125
4126void helper_fildl_ST0(int32_t val)
4127{
4128 int new_fpstt;
4129 new_fpstt = (env->fpstt - 1) & 7;
4130 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4131 env->fpstt = new_fpstt;
4132 env->fptags[new_fpstt] = 0; /* validate stack entry */
4133}
4134
4135void helper_fildll_ST0(int64_t val)
4136{
4137 int new_fpstt;
4138 new_fpstt = (env->fpstt - 1) & 7;
4139 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4140 env->fpstt = new_fpstt;
4141 env->fptags[new_fpstt] = 0; /* validate stack entry */
4142}
4143
4144#ifndef VBOX
4145uint32_t helper_fsts_ST0(void)
4146#else
4147RTCCUINTREG helper_fsts_ST0(void)
4148#endif
4149{
4150 union {
4151 float32 f;
4152 uint32_t i;
4153 } u;
4154 u.f = floatx_to_float32(ST0, &env->fp_status);
4155 return u.i;
4156}
4157
4158uint64_t helper_fstl_ST0(void)
4159{
4160 union {
4161 float64 f;
4162 uint64_t i;
4163 } u;
4164 u.f = floatx_to_float64(ST0, &env->fp_status);
4165 return u.i;
4166}
4167
4168#ifndef VBOX
4169int32_t helper_fist_ST0(void)
4170#else
4171RTCCINTREG helper_fist_ST0(void)
4172#endif
4173{
4174 int32_t val;
4175 val = floatx_to_int32(ST0, &env->fp_status);
4176 if (val != (int16_t)val)
4177 val = -32768;
4178 return val;
4179}
4180
4181#ifndef VBOX
4182int32_t helper_fistl_ST0(void)
4183#else
4184RTCCINTREG helper_fistl_ST0(void)
4185#endif
4186{
4187 int32_t val;
4188 val = floatx_to_int32(ST0, &env->fp_status);
4189 return val;
4190}
4191
4192int64_t helper_fistll_ST0(void)
4193{
4194 int64_t val;
4195 val = floatx_to_int64(ST0, &env->fp_status);
4196 return val;
4197}
4198
4199#ifndef VBOX
4200int32_t helper_fistt_ST0(void)
4201#else
4202RTCCINTREG helper_fistt_ST0(void)
4203#endif
4204{
4205 int32_t val;
4206 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4207 if (val != (int16_t)val)
4208 val = -32768;
4209 return val;
4210}
4211
4212#ifndef VBOX
4213int32_t helper_fisttl_ST0(void)
4214#else
4215RTCCINTREG helper_fisttl_ST0(void)
4216#endif
4217{
4218 int32_t val;
4219 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4220 return val;
4221}
4222
4223int64_t helper_fisttll_ST0(void)
4224{
4225 int64_t val;
4226 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4227 return val;
4228}
4229
4230void helper_fldt_ST0(target_ulong ptr)
4231{
4232 int new_fpstt;
4233 new_fpstt = (env->fpstt - 1) & 7;
4234 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4235 env->fpstt = new_fpstt;
4236 env->fptags[new_fpstt] = 0; /* validate stack entry */
4237}
4238
4239void helper_fstt_ST0(target_ulong ptr)
4240{
4241 helper_fstt(ST0, ptr);
4242}
4243
4244void helper_fpush(void)
4245{
4246 fpush();
4247}
4248
4249void helper_fpop(void)
4250{
4251 fpop();
4252}
4253
4254void helper_fdecstp(void)
4255{
4256 env->fpstt = (env->fpstt - 1) & 7;
4257 env->fpus &= (~0x4700);
4258}
4259
4260void helper_fincstp(void)
4261{
4262 env->fpstt = (env->fpstt + 1) & 7;
4263 env->fpus &= (~0x4700);
4264}
4265
4266/* FPU move */
4267
4268void helper_ffree_STN(int st_index)
4269{
4270 env->fptags[(env->fpstt + st_index) & 7] = 1;
4271}
4272
4273void helper_fmov_ST0_FT0(void)
4274{
4275 ST0 = FT0;
4276}
4277
4278void helper_fmov_FT0_STN(int st_index)
4279{
4280 FT0 = ST(st_index);
4281}
4282
4283void helper_fmov_ST0_STN(int st_index)
4284{
4285 ST0 = ST(st_index);
4286}
4287
4288void helper_fmov_STN_ST0(int st_index)
4289{
4290 ST(st_index) = ST0;
4291}
4292
4293void helper_fxchg_ST0_STN(int st_index)
4294{
4295 CPU86_LDouble tmp;
4296 tmp = ST(st_index);
4297 ST(st_index) = ST0;
4298 ST0 = tmp;
4299}
4300
4301/* FPU operations */
4302
4303static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4304
4305void helper_fcom_ST0_FT0(void)
4306{
4307 int ret;
4308
4309 ret = floatx_compare(ST0, FT0, &env->fp_status);
4310 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4311}
4312
4313void helper_fucom_ST0_FT0(void)
4314{
4315 int ret;
4316
4317 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4318 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4319}
4320
4321static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4322
4323void helper_fcomi_ST0_FT0(void)
4324{
4325 int eflags;
4326 int ret;
4327
4328 ret = floatx_compare(ST0, FT0, &env->fp_status);
4329 eflags = helper_cc_compute_all(CC_OP);
4330 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4331 CC_SRC = eflags;
4332}
4333
4334void helper_fucomi_ST0_FT0(void)
4335{
4336 int eflags;
4337 int ret;
4338
4339 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4340 eflags = helper_cc_compute_all(CC_OP);
4341 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4342 CC_SRC = eflags;
4343}
4344
4345void helper_fadd_ST0_FT0(void)
4346{
4347 ST0 += FT0;
4348}
4349
4350void helper_fmul_ST0_FT0(void)
4351{
4352 ST0 *= FT0;
4353}
4354
4355void helper_fsub_ST0_FT0(void)
4356{
4357 ST0 -= FT0;
4358}
4359
4360void helper_fsubr_ST0_FT0(void)
4361{
4362 ST0 = FT0 - ST0;
4363}
4364
4365void helper_fdiv_ST0_FT0(void)
4366{
4367 ST0 = helper_fdiv(ST0, FT0);
4368}
4369
4370void helper_fdivr_ST0_FT0(void)
4371{
4372 ST0 = helper_fdiv(FT0, ST0);
4373}
4374
4375/* fp operations between STN and ST0 */
4376
4377void helper_fadd_STN_ST0(int st_index)
4378{
4379 ST(st_index) += ST0;
4380}
4381
4382void helper_fmul_STN_ST0(int st_index)
4383{
4384 ST(st_index) *= ST0;
4385}
4386
4387void helper_fsub_STN_ST0(int st_index)
4388{
4389 ST(st_index) -= ST0;
4390}
4391
4392void helper_fsubr_STN_ST0(int st_index)
4393{
4394 CPU86_LDouble *p;
4395 p = &ST(st_index);
4396 *p = ST0 - *p;
4397}
4398
4399void helper_fdiv_STN_ST0(int st_index)
4400{
4401 CPU86_LDouble *p;
4402 p = &ST(st_index);
4403 *p = helper_fdiv(*p, ST0);
4404}
4405
4406void helper_fdivr_STN_ST0(int st_index)
4407{
4408 CPU86_LDouble *p;
4409 p = &ST(st_index);
4410 *p = helper_fdiv(ST0, *p);
4411}
4412
4413/* misc FPU operations */
4414void helper_fchs_ST0(void)
4415{
4416 ST0 = floatx_chs(ST0);
4417}
4418
4419void helper_fabs_ST0(void)
4420{
4421 ST0 = floatx_abs(ST0);
4422}
4423
4424void helper_fld1_ST0(void)
4425{
4426 ST0 = f15rk[1];
4427}
4428
4429void helper_fldl2t_ST0(void)
4430{
4431 ST0 = f15rk[6];
4432}
4433
4434void helper_fldl2e_ST0(void)
4435{
4436 ST0 = f15rk[5];
4437}
4438
4439void helper_fldpi_ST0(void)
4440{
4441 ST0 = f15rk[2];
4442}
4443
4444void helper_fldlg2_ST0(void)
4445{
4446 ST0 = f15rk[3];
4447}
4448
4449void helper_fldln2_ST0(void)
4450{
4451 ST0 = f15rk[4];
4452}
4453
4454void helper_fldz_ST0(void)
4455{
4456 ST0 = f15rk[0];
4457}
4458
4459void helper_fldz_FT0(void)
4460{
4461 FT0 = f15rk[0];
4462}
4463
4464#ifndef VBOX
4465uint32_t helper_fnstsw(void)
4466#else
4467RTCCUINTREG helper_fnstsw(void)
4468#endif
4469{
4470 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4471}
4472
4473#ifndef VBOX
4474uint32_t helper_fnstcw(void)
4475#else
4476RTCCUINTREG helper_fnstcw(void)
4477#endif
4478{
4479 return env->fpuc;
4480}
4481
4482static void update_fp_status(void)
4483{
4484 int rnd_type;
4485
4486 /* set rounding mode */
4487 switch(env->fpuc & RC_MASK) {
4488 default:
4489 case RC_NEAR:
4490 rnd_type = float_round_nearest_even;
4491 break;
4492 case RC_DOWN:
4493 rnd_type = float_round_down;
4494 break;
4495 case RC_UP:
4496 rnd_type = float_round_up;
4497 break;
4498 case RC_CHOP:
4499 rnd_type = float_round_to_zero;
4500 break;
4501 }
4502 set_float_rounding_mode(rnd_type, &env->fp_status);
4503#ifdef FLOATX80
4504 switch((env->fpuc >> 8) & 3) {
4505 case 0:
4506 rnd_type = 32;
4507 break;
4508 case 2:
4509 rnd_type = 64;
4510 break;
4511 case 3:
4512 default:
4513 rnd_type = 80;
4514 break;
4515 }
4516 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4517#endif
4518}
4519
4520void helper_fldcw(uint32_t val)
4521{
4522 env->fpuc = val;
4523 update_fp_status();
4524}
4525
4526void helper_fclex(void)
4527{
4528 env->fpus &= 0x7f00;
4529}
4530
4531void helper_fwait(void)
4532{
4533 if (env->fpus & FPUS_SE)
4534 fpu_raise_exception();
4535}
4536
4537void helper_fninit(void)
4538{
4539 env->fpus = 0;
4540 env->fpstt = 0;
4541 env->fpuc = 0x37f;
4542 env->fptags[0] = 1;
4543 env->fptags[1] = 1;
4544 env->fptags[2] = 1;
4545 env->fptags[3] = 1;
4546 env->fptags[4] = 1;
4547 env->fptags[5] = 1;
4548 env->fptags[6] = 1;
4549 env->fptags[7] = 1;
4550}
4551
4552/* BCD ops */
4553
4554void helper_fbld_ST0(target_ulong ptr)
4555{
4556 CPU86_LDouble tmp;
4557 uint64_t val;
4558 unsigned int v;
4559 int i;
4560
4561 val = 0;
4562 for(i = 8; i >= 0; i--) {
4563 v = ldub(ptr + i);
4564 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4565 }
4566 tmp = val;
4567 if (ldub(ptr + 9) & 0x80)
4568 tmp = -tmp;
4569 fpush();
4570 ST0 = tmp;
4571}
4572
4573void helper_fbst_ST0(target_ulong ptr)
4574{
4575 int v;
4576 target_ulong mem_ref, mem_end;
4577 int64_t val;
4578
4579 val = floatx_to_int64(ST0, &env->fp_status);
4580 mem_ref = ptr;
4581 mem_end = mem_ref + 9;
4582 if (val < 0) {
4583 stb(mem_end, 0x80);
4584 val = -val;
4585 } else {
4586 stb(mem_end, 0x00);
4587 }
4588 while (mem_ref < mem_end) {
4589 if (val == 0)
4590 break;
4591 v = val % 100;
4592 val = val / 100;
4593 v = ((v / 10) << 4) | (v % 10);
4594 stb(mem_ref++, v);
4595 }
4596 while (mem_ref < mem_end) {
4597 stb(mem_ref++, 0);
4598 }
4599}
4600
4601void helper_f2xm1(void)
4602{
4603 ST0 = pow(2.0,ST0) - 1.0;
4604}
4605
4606void helper_fyl2x(void)
4607{
4608 CPU86_LDouble fptemp;
4609
4610 fptemp = ST0;
4611 if (fptemp>0.0){
4612 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4613 ST1 *= fptemp;
4614 fpop();
4615 } else {
4616 env->fpus &= (~0x4700);
4617 env->fpus |= 0x400;
4618 }
4619}
4620
4621void helper_fptan(void)
4622{
4623 CPU86_LDouble fptemp;
4624
4625 fptemp = ST0;
4626 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4627 env->fpus |= 0x400;
4628 } else {
4629 ST0 = tan(fptemp);
4630 fpush();
4631 ST0 = 1.0;
4632 env->fpus &= (~0x400); /* C2 <-- 0 */
4633 /* the above code is for |arg| < 2**52 only */
4634 }
4635}
4636
4637void helper_fpatan(void)
4638{
4639 CPU86_LDouble fptemp, fpsrcop;
4640
4641 fpsrcop = ST1;
4642 fptemp = ST0;
4643 ST1 = atan2(fpsrcop,fptemp);
4644 fpop();
4645}
4646
4647void helper_fxtract(void)
4648{
4649 CPU86_LDoubleU temp;
4650 unsigned int expdif;
4651
4652 temp.d = ST0;
4653 expdif = EXPD(temp) - EXPBIAS;
4654 /*DP exponent bias*/
4655 ST0 = expdif;
4656 fpush();
4657 BIASEXPONENT(temp);
4658 ST0 = temp.d;
4659}
4660
4661void helper_fprem1(void)
4662{
4663 CPU86_LDouble dblq, fpsrcop, fptemp;
4664 CPU86_LDoubleU fpsrcop1, fptemp1;
4665 int expdif;
4666 signed long long int q;
4667
4668#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4669 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4670#else
4671 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4672#endif
4673 ST0 = 0.0 / 0.0; /* NaN */
4674 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4675 return;
4676 }
4677
4678 fpsrcop = ST0;
4679 fptemp = ST1;
4680 fpsrcop1.d = fpsrcop;
4681 fptemp1.d = fptemp;
4682 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4683
4684 if (expdif < 0) {
4685 /* optimisation? taken from the AMD docs */
4686 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4687 /* ST0 is unchanged */
4688 return;
4689 }
4690
4691 if (expdif < 53) {
4692 dblq = fpsrcop / fptemp;
4693 /* round dblq towards nearest integer */
4694 dblq = rint(dblq);
4695 ST0 = fpsrcop - fptemp * dblq;
4696
4697 /* convert dblq to q by truncating towards zero */
4698 if (dblq < 0.0)
4699 q = (signed long long int)(-dblq);
4700 else
4701 q = (signed long long int)dblq;
4702
4703 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4704 /* (C0,C3,C1) <-- (q2,q1,q0) */
4705 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4706 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4707 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4708 } else {
4709 env->fpus |= 0x400; /* C2 <-- 1 */
4710 fptemp = pow(2.0, expdif - 50);
4711 fpsrcop = (ST0 / ST1) / fptemp;
4712 /* fpsrcop = integer obtained by chopping */
4713 fpsrcop = (fpsrcop < 0.0) ?
4714 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4715 ST0 -= (ST1 * fpsrcop * fptemp);
4716 }
4717}
4718
4719void helper_fprem(void)
4720{
4721 CPU86_LDouble dblq, fpsrcop, fptemp;
4722 CPU86_LDoubleU fpsrcop1, fptemp1;
4723 int expdif;
4724 signed long long int q;
4725
4726#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4727 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4728#else
4729 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4730#endif
4731 ST0 = 0.0 / 0.0; /* NaN */
4732 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4733 return;
4734 }
4735
4736 fpsrcop = (CPU86_LDouble)ST0;
4737 fptemp = (CPU86_LDouble)ST1;
4738 fpsrcop1.d = fpsrcop;
4739 fptemp1.d = fptemp;
4740 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4741
4742 if (expdif < 0) {
4743 /* optimisation? taken from the AMD docs */
4744 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4745 /* ST0 is unchanged */
4746 return;
4747 }
4748
4749 if ( expdif < 53 ) {
4750 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4751 /* round dblq towards zero */
4752 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4753 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4754
4755 /* convert dblq to q by truncating towards zero */
4756 if (dblq < 0.0)
4757 q = (signed long long int)(-dblq);
4758 else
4759 q = (signed long long int)dblq;
4760
4761 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4762 /* (C0,C3,C1) <-- (q2,q1,q0) */
4763 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4764 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4765 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4766 } else {
4767 int N = 32 + (expdif % 32); /* as per AMD docs */
4768 env->fpus |= 0x400; /* C2 <-- 1 */
4769 fptemp = pow(2.0, (double)(expdif - N));
4770 fpsrcop = (ST0 / ST1) / fptemp;
4771 /* fpsrcop = integer obtained by chopping */
4772 fpsrcop = (fpsrcop < 0.0) ?
4773 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4774 ST0 -= (ST1 * fpsrcop * fptemp);
4775 }
4776}
4777
4778void helper_fyl2xp1(void)
4779{
4780 CPU86_LDouble fptemp;
4781
4782 fptemp = ST0;
4783 if ((fptemp+1.0)>0.0) {
4784 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4785 ST1 *= fptemp;
4786 fpop();
4787 } else {
4788 env->fpus &= (~0x4700);
4789 env->fpus |= 0x400;
4790 }
4791}
4792
4793void helper_fsqrt(void)
4794{
4795 CPU86_LDouble fptemp;
4796
4797 fptemp = ST0;
4798 if (fptemp<0.0) {
4799 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4800 env->fpus |= 0x400;
4801 }
4802 ST0 = sqrt(fptemp);
4803}
4804
4805void helper_fsincos(void)
4806{
4807 CPU86_LDouble fptemp;
4808
4809 fptemp = ST0;
4810 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4811 env->fpus |= 0x400;
4812 } else {
4813 ST0 = sin(fptemp);
4814 fpush();
4815 ST0 = cos(fptemp);
4816 env->fpus &= (~0x400); /* C2 <-- 0 */
4817 /* the above code is for |arg| < 2**63 only */
4818 }
4819}
4820
4821void helper_frndint(void)
4822{
4823 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4824}
4825
4826void helper_fscale(void)
4827{
4828 ST0 = ldexp (ST0, (int)(ST1));
4829}
4830
4831void helper_fsin(void)
4832{
4833 CPU86_LDouble fptemp;
4834
4835 fptemp = ST0;
4836 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4837 env->fpus |= 0x400;
4838 } else {
4839 ST0 = sin(fptemp);
4840 env->fpus &= (~0x400); /* C2 <-- 0 */
4841 /* the above code is for |arg| < 2**53 only */
4842 }
4843}
4844
4845void helper_fcos(void)
4846{
4847 CPU86_LDouble fptemp;
4848
4849 fptemp = ST0;
4850 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4851 env->fpus |= 0x400;
4852 } else {
4853 ST0 = cos(fptemp);
4854 env->fpus &= (~0x400); /* C2 <-- 0 */
4855 /* the above code is for |arg5 < 2**63 only */
4856 }
4857}
4858
4859void helper_fxam_ST0(void)
4860{
4861 CPU86_LDoubleU temp;
4862 int expdif;
4863
4864 temp.d = ST0;
4865
4866 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4867 if (SIGND(temp))
4868 env->fpus |= 0x200; /* C1 <-- 1 */
4869
4870 /* XXX: test fptags too */
4871 expdif = EXPD(temp);
4872 if (expdif == MAXEXPD) {
4873#ifdef USE_X86LDOUBLE
4874 if (MANTD(temp) == 0x8000000000000000ULL)
4875#else
4876 if (MANTD(temp) == 0)
4877#endif
4878 env->fpus |= 0x500 /*Infinity*/;
4879 else
4880 env->fpus |= 0x100 /*NaN*/;
4881 } else if (expdif == 0) {
4882 if (MANTD(temp) == 0)
4883 env->fpus |= 0x4000 /*Zero*/;
4884 else
4885 env->fpus |= 0x4400 /*Denormal*/;
4886 } else {
4887 env->fpus |= 0x400;
4888 }
4889}
4890
4891void helper_fstenv(target_ulong ptr, int data32)
4892{
4893 int fpus, fptag, exp, i;
4894 uint64_t mant;
4895 CPU86_LDoubleU tmp;
4896
4897 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4898 fptag = 0;
4899 for (i=7; i>=0; i--) {
4900 fptag <<= 2;
4901 if (env->fptags[i]) {
4902 fptag |= 3;
4903 } else {
4904 tmp.d = env->fpregs[i].d;
4905 exp = EXPD(tmp);
4906 mant = MANTD(tmp);
4907 if (exp == 0 && mant == 0) {
4908 /* zero */
4909 fptag |= 1;
4910 } else if (exp == 0 || exp == MAXEXPD
4911#ifdef USE_X86LDOUBLE
4912 || (mant & (1LL << 63)) == 0
4913#endif
4914 ) {
4915 /* NaNs, infinity, denormal */
4916 fptag |= 2;
4917 }
4918 }
4919 }
4920 if (data32) {
4921 /* 32 bit */
4922 stl(ptr, env->fpuc);
4923 stl(ptr + 4, fpus);
4924 stl(ptr + 8, fptag);
4925 stl(ptr + 12, 0); /* fpip */
4926 stl(ptr + 16, 0); /* fpcs */
4927 stl(ptr + 20, 0); /* fpoo */
4928 stl(ptr + 24, 0); /* fpos */
4929 } else {
4930 /* 16 bit */
4931 stw(ptr, env->fpuc);
4932 stw(ptr + 2, fpus);
4933 stw(ptr + 4, fptag);
4934 stw(ptr + 6, 0);
4935 stw(ptr + 8, 0);
4936 stw(ptr + 10, 0);
4937 stw(ptr + 12, 0);
4938 }
4939}
4940
4941void helper_fldenv(target_ulong ptr, int data32)
4942{
4943 int i, fpus, fptag;
4944
4945 if (data32) {
4946 env->fpuc = lduw(ptr);
4947 fpus = lduw(ptr + 4);
4948 fptag = lduw(ptr + 8);
4949 }
4950 else {
4951 env->fpuc = lduw(ptr);
4952 fpus = lduw(ptr + 2);
4953 fptag = lduw(ptr + 4);
4954 }
4955 env->fpstt = (fpus >> 11) & 7;
4956 env->fpus = fpus & ~0x3800;
4957 for(i = 0;i < 8; i++) {
4958 env->fptags[i] = ((fptag & 3) == 3);
4959 fptag >>= 2;
4960 }
4961}
4962
4963void helper_fsave(target_ulong ptr, int data32)
4964{
4965 CPU86_LDouble tmp;
4966 int i;
4967
4968 helper_fstenv(ptr, data32);
4969
4970 ptr += (14 << data32);
4971 for(i = 0;i < 8; i++) {
4972 tmp = ST(i);
4973 helper_fstt(tmp, ptr);
4974 ptr += 10;
4975 }
4976
4977 /* fninit */
4978 env->fpus = 0;
4979 env->fpstt = 0;
4980 env->fpuc = 0x37f;
4981 env->fptags[0] = 1;
4982 env->fptags[1] = 1;
4983 env->fptags[2] = 1;
4984 env->fptags[3] = 1;
4985 env->fptags[4] = 1;
4986 env->fptags[5] = 1;
4987 env->fptags[6] = 1;
4988 env->fptags[7] = 1;
4989}
4990
4991void helper_frstor(target_ulong ptr, int data32)
4992{
4993 CPU86_LDouble tmp;
4994 int i;
4995
4996 helper_fldenv(ptr, data32);
4997 ptr += (14 << data32);
4998
4999 for(i = 0;i < 8; i++) {
5000 tmp = helper_fldt(ptr);
5001 ST(i) = tmp;
5002 ptr += 10;
5003 }
5004}
5005
5006void helper_fxsave(target_ulong ptr, int data64)
5007{
5008 int fpus, fptag, i, nb_xmm_regs;
5009 CPU86_LDouble tmp;
5010 target_ulong addr;
5011
5012 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5013 fptag = 0;
5014 for(i = 0; i < 8; i++) {
5015 fptag |= (env->fptags[i] << i);
5016 }
5017 stw(ptr, env->fpuc);
5018 stw(ptr + 2, fpus);
5019 stw(ptr + 4, fptag ^ 0xff);
5020#ifdef TARGET_X86_64
5021 if (data64) {
5022 stq(ptr + 0x08, 0); /* rip */
5023 stq(ptr + 0x10, 0); /* rdp */
5024 } else
5025#endif
5026 {
5027 stl(ptr + 0x08, 0); /* eip */
5028 stl(ptr + 0x0c, 0); /* sel */
5029 stl(ptr + 0x10, 0); /* dp */
5030 stl(ptr + 0x14, 0); /* sel */
5031 }
5032
5033 addr = ptr + 0x20;
5034 for(i = 0;i < 8; i++) {
5035 tmp = ST(i);
5036 helper_fstt(tmp, addr);
5037 addr += 16;
5038 }
5039
5040 if (env->cr[4] & CR4_OSFXSR_MASK) {
5041 /* XXX: finish it */
5042 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5043 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5044 if (env->hflags & HF_CS64_MASK)
5045 nb_xmm_regs = 16;
5046 else
5047 nb_xmm_regs = 8;
5048 addr = ptr + 0xa0;
5049 /* Fast FXSAVE leaves out the XMM registers */
5050 if (!(env->efer & MSR_EFER_FFXSR)
5051 || (env->hflags & HF_CPL_MASK)
5052 || !(env->hflags & HF_LMA_MASK)) {
5053 for(i = 0; i < nb_xmm_regs; i++) {
5054 stq(addr, env->xmm_regs[i].XMM_Q(0));
5055 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5056 addr += 16;
5057 }
5058 }
5059 }
5060}
5061
5062void helper_fxrstor(target_ulong ptr, int data64)
5063{
5064 int i, fpus, fptag, nb_xmm_regs;
5065 CPU86_LDouble tmp;
5066 target_ulong addr;
5067
5068 env->fpuc = lduw(ptr);
5069 fpus = lduw(ptr + 2);
5070 fptag = lduw(ptr + 4);
5071 env->fpstt = (fpus >> 11) & 7;
5072 env->fpus = fpus & ~0x3800;
5073 fptag ^= 0xff;
5074 for(i = 0;i < 8; i++) {
5075 env->fptags[i] = ((fptag >> i) & 1);
5076 }
5077
5078 addr = ptr + 0x20;
5079 for(i = 0;i < 8; i++) {
5080 tmp = helper_fldt(addr);
5081 ST(i) = tmp;
5082 addr += 16;
5083 }
5084
5085 if (env->cr[4] & CR4_OSFXSR_MASK) {
5086 /* XXX: finish it */
5087 env->mxcsr = ldl(ptr + 0x18);
5088 //ldl(ptr + 0x1c);
5089 if (env->hflags & HF_CS64_MASK)
5090 nb_xmm_regs = 16;
5091 else
5092 nb_xmm_regs = 8;
5093 addr = ptr + 0xa0;
5094 /* Fast FXRESTORE leaves out the XMM registers */
5095 if (!(env->efer & MSR_EFER_FFXSR)
5096 || (env->hflags & HF_CPL_MASK)
5097 || !(env->hflags & HF_LMA_MASK)) {
5098 for(i = 0; i < nb_xmm_regs; i++) {
5099#if !defined(VBOX) || __GNUC__ < 4
5100 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5101 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5102#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5103# if 1
5104 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5105 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5106 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5107 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5108# else
5109 /* this works fine on Mac OS X, gcc 4.0.1 */
5110 uint64_t u64 = ldq(addr);
5111 env->xmm_regs[i].XMM_Q(0);
5112 u64 = ldq(addr + 4);
5113 env->xmm_regs[i].XMM_Q(1) = u64;
5114# endif
5115#endif
5116 addr += 16;
5117 }
5118 }
5119 }
5120}
5121
5122#ifndef USE_X86LDOUBLE
5123
5124void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5125{
5126 CPU86_LDoubleU temp;
5127 int e;
5128
5129 temp.d = f;
5130 /* mantissa */
5131 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5132 /* exponent + sign */
5133 e = EXPD(temp) - EXPBIAS + 16383;
5134 e |= SIGND(temp) >> 16;
5135 *pexp = e;
5136}
5137
5138CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5139{
5140 CPU86_LDoubleU temp;
5141 int e;
5142 uint64_t ll;
5143
5144 /* XXX: handle overflow ? */
5145 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5146 e |= (upper >> 4) & 0x800; /* sign */
5147 ll = (mant >> 11) & ((1LL << 52) - 1);
5148#ifdef __arm__
5149 temp.l.upper = (e << 20) | (ll >> 32);
5150 temp.l.lower = ll;
5151#else
5152 temp.ll = ll | ((uint64_t)e << 52);
5153#endif
5154 return temp.d;
5155}
5156
5157#else
5158
5159void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5160{
5161 CPU86_LDoubleU temp;
5162
5163 temp.d = f;
5164 *pmant = temp.l.lower;
5165 *pexp = temp.l.upper;
5166}
5167
5168CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5169{
5170 CPU86_LDoubleU temp;
5171
5172 temp.l.upper = upper;
5173 temp.l.lower = mant;
5174 return temp.d;
5175}
5176#endif
5177
5178#ifdef TARGET_X86_64
5179
5180//#define DEBUG_MULDIV
5181
5182static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5183{
5184 *plow += a;
5185 /* carry test */
5186 if (*plow < a)
5187 (*phigh)++;
5188 *phigh += b;
5189}
5190
5191static void neg128(uint64_t *plow, uint64_t *phigh)
5192{
5193 *plow = ~ *plow;
5194 *phigh = ~ *phigh;
5195 add128(plow, phigh, 1, 0);
5196}
5197
5198/* return TRUE if overflow */
5199static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5200{
5201 uint64_t q, r, a1, a0;
5202 int i, qb, ab;
5203
5204 a0 = *plow;
5205 a1 = *phigh;
5206 if (a1 == 0) {
5207 q = a0 / b;
5208 r = a0 % b;
5209 *plow = q;
5210 *phigh = r;
5211 } else {
5212 if (a1 >= b)
5213 return 1;
5214 /* XXX: use a better algorithm */
5215 for(i = 0; i < 64; i++) {
5216 ab = a1 >> 63;
5217 a1 = (a1 << 1) | (a0 >> 63);
5218 if (ab || a1 >= b) {
5219 a1 -= b;
5220 qb = 1;
5221 } else {
5222 qb = 0;
5223 }
5224 a0 = (a0 << 1) | qb;
5225 }
5226#if defined(DEBUG_MULDIV)
5227 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5228 *phigh, *plow, b, a0, a1);
5229#endif
5230 *plow = a0;
5231 *phigh = a1;
5232 }
5233 return 0;
5234}
5235
5236/* return TRUE if overflow */
5237static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5238{
5239 int sa, sb;
5240 sa = ((int64_t)*phigh < 0);
5241 if (sa)
5242 neg128(plow, phigh);
5243 sb = (b < 0);
5244 if (sb)
5245 b = -b;
5246 if (div64(plow, phigh, b) != 0)
5247 return 1;
5248 if (sa ^ sb) {
5249 if (*plow > (1ULL << 63))
5250 return 1;
5251 *plow = - *plow;
5252 } else {
5253 if (*plow >= (1ULL << 63))
5254 return 1;
5255 }
5256 if (sa)
5257 *phigh = - *phigh;
5258 return 0;
5259}
5260
5261void helper_mulq_EAX_T0(target_ulong t0)
5262{
5263 uint64_t r0, r1;
5264
5265 mulu64(&r0, &r1, EAX, t0);
5266 EAX = r0;
5267 EDX = r1;
5268 CC_DST = r0;
5269 CC_SRC = r1;
5270}
5271
5272void helper_imulq_EAX_T0(target_ulong t0)
5273{
5274 uint64_t r0, r1;
5275
5276 muls64(&r0, &r1, EAX, t0);
5277 EAX = r0;
5278 EDX = r1;
5279 CC_DST = r0;
5280 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5281}
5282
5283target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5284{
5285 uint64_t r0, r1;
5286
5287 muls64(&r0, &r1, t0, t1);
5288 CC_DST = r0;
5289 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5290 return r0;
5291}
5292
5293void helper_divq_EAX(target_ulong t0)
5294{
5295 uint64_t r0, r1;
5296 if (t0 == 0) {
5297 raise_exception(EXCP00_DIVZ);
5298 }
5299 r0 = EAX;
5300 r1 = EDX;
5301 if (div64(&r0, &r1, t0))
5302 raise_exception(EXCP00_DIVZ);
5303 EAX = r0;
5304 EDX = r1;
5305}
5306
5307void helper_idivq_EAX(target_ulong t0)
5308{
5309 uint64_t r0, r1;
5310 if (t0 == 0) {
5311 raise_exception(EXCP00_DIVZ);
5312 }
5313 r0 = EAX;
5314 r1 = EDX;
5315 if (idiv64(&r0, &r1, t0))
5316 raise_exception(EXCP00_DIVZ);
5317 EAX = r0;
5318 EDX = r1;
5319}
5320#endif
5321
5322static void do_hlt(void)
5323{
5324 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5325 env->halted = 1;
5326 env->exception_index = EXCP_HLT;
5327 cpu_loop_exit();
5328}
5329
5330void helper_hlt(int next_eip_addend)
5331{
5332 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5333 EIP += next_eip_addend;
5334
5335 do_hlt();
5336}
5337
5338void helper_monitor(target_ulong ptr)
5339{
5340#ifdef VBOX
5341 if ((uint32_t)ECX > 1)
5342 raise_exception(EXCP0D_GPF);
5343#else /* !VBOX */
5344 if ((uint32_t)ECX != 0)
5345 raise_exception(EXCP0D_GPF);
5346#endif /* !VBOX */
5347 /* XXX: store address ? */
5348 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5349}
5350
5351void helper_mwait(int next_eip_addend)
5352{
5353 if ((uint32_t)ECX != 0)
5354 raise_exception(EXCP0D_GPF);
5355#ifdef VBOX
5356 helper_hlt(next_eip_addend);
5357#else /* !VBOX */
5358 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5359 EIP += next_eip_addend;
5360
5361 /* XXX: not complete but not completely erroneous */
5362 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5363 /* more than one CPU: do not sleep because another CPU may
5364 wake this one */
5365 } else {
5366 do_hlt();
5367 }
5368#endif /* !VBOX */
5369}
5370
5371void helper_debug(void)
5372{
5373 env->exception_index = EXCP_DEBUG;
5374 cpu_loop_exit();
5375}
5376
5377void helper_reset_rf(void)
5378{
5379 env->eflags &= ~RF_MASK;
5380}
5381
5382void helper_raise_interrupt(int intno, int next_eip_addend)
5383{
5384 raise_interrupt(intno, 1, 0, next_eip_addend);
5385}
5386
5387void helper_raise_exception(int exception_index)
5388{
5389 raise_exception(exception_index);
5390}
5391
5392void helper_cli(void)
5393{
5394 env->eflags &= ~IF_MASK;
5395}
5396
5397void helper_sti(void)
5398{
5399 env->eflags |= IF_MASK;
5400}
5401
5402#ifdef VBOX
5403void helper_cli_vme(void)
5404{
5405 env->eflags &= ~VIF_MASK;
5406}
5407
5408void helper_sti_vme(void)
5409{
5410 /* First check, then change eflags according to the AMD manual */
5411 if (env->eflags & VIP_MASK) {
5412 raise_exception(EXCP0D_GPF);
5413 }
5414 env->eflags |= VIF_MASK;
5415}
5416#endif /* VBOX */
5417
5418#if 0
5419/* vm86plus instructions */
5420void helper_cli_vm(void)
5421{
5422 env->eflags &= ~VIF_MASK;
5423}
5424
5425void helper_sti_vm(void)
5426{
5427 env->eflags |= VIF_MASK;
5428 if (env->eflags & VIP_MASK) {
5429 raise_exception(EXCP0D_GPF);
5430 }
5431}
5432#endif
5433
5434void helper_set_inhibit_irq(void)
5435{
5436 env->hflags |= HF_INHIBIT_IRQ_MASK;
5437}
5438
5439void helper_reset_inhibit_irq(void)
5440{
5441 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5442}
5443
5444void helper_boundw(target_ulong a0, int v)
5445{
5446 int low, high;
5447 low = ldsw(a0);
5448 high = ldsw(a0 + 2);
5449 v = (int16_t)v;
5450 if (v < low || v > high) {
5451 raise_exception(EXCP05_BOUND);
5452 }
5453}
5454
5455void helper_boundl(target_ulong a0, int v)
5456{
5457 int low, high;
5458 low = ldl(a0);
5459 high = ldl(a0 + 4);
5460 if (v < low || v > high) {
5461 raise_exception(EXCP05_BOUND);
5462 }
5463}
5464
5465static float approx_rsqrt(float a)
5466{
5467 return 1.0 / sqrt(a);
5468}
5469
5470static float approx_rcp(float a)
5471{
5472 return 1.0 / a;
5473}
5474
5475#if !defined(CONFIG_USER_ONLY)
5476
5477#define MMUSUFFIX _mmu
5478
5479#define SHIFT 0
5480#include "softmmu_template.h"
5481
5482#define SHIFT 1
5483#include "softmmu_template.h"
5484
5485#define SHIFT 2
5486#include "softmmu_template.h"
5487
5488#define SHIFT 3
5489#include "softmmu_template.h"
5490
5491#endif
5492
5493#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5494/* This code assumes real physical address always fit into host CPU reg,
5495 which is wrong in general, but true for our current use cases. */
5496RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5497{
5498 return remR3PhysReadS8(addr);
5499}
5500RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5501{
5502 return remR3PhysReadU8(addr);
5503}
5504void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5505{
5506 remR3PhysWriteU8(addr, val);
5507}
5508RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5509{
5510 return remR3PhysReadS16(addr);
5511}
5512RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5513{
5514 return remR3PhysReadU16(addr);
5515}
5516void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5517{
5518 remR3PhysWriteU16(addr, val);
5519}
5520RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5521{
5522 return remR3PhysReadS32(addr);
5523}
5524RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5525{
5526 return remR3PhysReadU32(addr);
5527}
5528void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5529{
5530 remR3PhysWriteU32(addr, val);
5531}
5532uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5533{
5534 return remR3PhysReadU64(addr);
5535}
5536void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5537{
5538 remR3PhysWriteU64(addr, val);
5539}
5540#endif /* VBOX */
5541
5542#if !defined(CONFIG_USER_ONLY)
5543/* try to fill the TLB and return an exception if error. If retaddr is
5544 NULL, it means that the function was called in C code (i.e. not
5545 from generated code or from helper.c) */
5546/* XXX: fix it to restore all registers */
5547void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5548{
5549 TranslationBlock *tb;
5550 int ret;
5551 unsigned long pc;
5552 CPUX86State *saved_env;
5553
5554 /* XXX: hack to restore env in all cases, even if not called from
5555 generated code */
5556 saved_env = env;
5557 env = cpu_single_env;
5558
5559 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5560 if (ret) {
5561 if (retaddr) {
5562 /* now we have a real cpu fault */
5563 pc = (unsigned long)retaddr;
5564 tb = tb_find_pc(pc);
5565 if (tb) {
5566 /* the PC is inside the translated code. It means that we have
5567 a virtual CPU fault */
5568 cpu_restore_state(tb, env, pc, NULL);
5569 }
5570 }
5571 raise_exception_err(env->exception_index, env->error_code);
5572 }
5573 env = saved_env;
5574}
5575#endif
5576
5577#ifdef VBOX
5578
5579/**
5580 * Correctly computes the eflags.
5581 * @returns eflags.
5582 * @param env1 CPU environment.
5583 */
5584uint32_t raw_compute_eflags(CPUX86State *env1)
5585{
5586 CPUX86State *savedenv = env;
5587 uint32_t efl;
5588 env = env1;
5589 efl = compute_eflags();
5590 env = savedenv;
5591 return efl;
5592}
5593
5594/**
5595 * Reads byte from virtual address in guest memory area.
5596 * XXX: is it working for any addresses? swapped out pages?
5597 * @returns read data byte.
5598 * @param env1 CPU environment.
5599 * @param pvAddr GC Virtual address.
5600 */
5601uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5602{
5603 CPUX86State *savedenv = env;
5604 uint8_t u8;
5605 env = env1;
5606 u8 = ldub_kernel(addr);
5607 env = savedenv;
5608 return u8;
5609}
5610
5611/**
5612 * Reads byte from virtual address in guest memory area.
5613 * XXX: is it working for any addresses? swapped out pages?
5614 * @returns read data byte.
5615 * @param env1 CPU environment.
5616 * @param pvAddr GC Virtual address.
5617 */
5618uint16_t read_word(CPUX86State *env1, target_ulong addr)
5619{
5620 CPUX86State *savedenv = env;
5621 uint16_t u16;
5622 env = env1;
5623 u16 = lduw_kernel(addr);
5624 env = savedenv;
5625 return u16;
5626}
5627
5628/**
5629 * Reads byte from virtual address in guest memory area.
5630 * XXX: is it working for any addresses? swapped out pages?
5631 * @returns read data byte.
5632 * @param env1 CPU environment.
5633 * @param pvAddr GC Virtual address.
5634 */
5635uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5636{
5637 CPUX86State *savedenv = env;
5638 uint32_t u32;
5639 env = env1;
5640 u32 = ldl_kernel(addr);
5641 env = savedenv;
5642 return u32;
5643}
5644
5645/**
5646 * Writes byte to virtual address in guest memory area.
5647 * XXX: is it working for any addresses? swapped out pages?
5648 * @returns read data byte.
5649 * @param env1 CPU environment.
5650 * @param pvAddr GC Virtual address.
5651 * @param val byte value
5652 */
5653void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5654{
5655 CPUX86State *savedenv = env;
5656 env = env1;
5657 stb(addr, val);
5658 env = savedenv;
5659}
5660
5661void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5662{
5663 CPUX86State *savedenv = env;
5664 env = env1;
5665 stw(addr, val);
5666 env = savedenv;
5667}
5668
5669void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5670{
5671 CPUX86State *savedenv = env;
5672 env = env1;
5673 stl(addr, val);
5674 env = savedenv;
5675}
5676
5677/**
5678 * Correctly loads selector into segment register with updating internal
5679 * qemu data/caches.
5680 * @param env1 CPU environment.
5681 * @param seg_reg Segment register.
5682 * @param selector Selector to load.
5683 */
5684void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5685{
5686 CPUX86State *savedenv = env;
5687#ifdef FORCE_SEGMENT_SYNC
5688 jmp_buf old_buf;
5689#endif
5690
5691 env = env1;
5692
5693 if ( env->eflags & X86_EFL_VM
5694 || !(env->cr[0] & X86_CR0_PE))
5695 {
5696 load_seg_vm(seg_reg, selector);
5697
5698 env = savedenv;
5699
5700 /* Successful sync. */
5701 env1->segs[seg_reg].newselector = 0;
5702 }
5703 else
5704 {
5705 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5706 time critical - let's not do that */
5707#ifdef FORCE_SEGMENT_SYNC
5708 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5709#endif
5710 if (setjmp(env1->jmp_env) == 0)
5711 {
5712 if (seg_reg == R_CS)
5713 {
5714 uint32_t e1, e2;
5715 e1 = e2 = 0;
5716 load_segment(&e1, &e2, selector);
5717 cpu_x86_load_seg_cache(env, R_CS, selector,
5718 get_seg_base(e1, e2),
5719 get_seg_limit(e1, e2),
5720 e2);
5721 }
5722 else
5723 helper_load_seg(seg_reg, selector);
5724 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5725 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5726
5727 env = savedenv;
5728
5729 /* Successful sync. */
5730 env1->segs[seg_reg].newselector = 0;
5731 }
5732 else
5733 {
5734 env = savedenv;
5735
5736 /* Postpone sync until the guest uses the selector. */
5737 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5738 env1->segs[seg_reg].newselector = selector;
5739 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5740 env1->exception_index = -1;
5741 env1->error_code = 0;
5742 env1->old_exception = -1;
5743 }
5744#ifdef FORCE_SEGMENT_SYNC
5745 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5746#endif
5747 }
5748
5749}
5750
5751DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5752{
5753 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5754}
5755
5756
5757int emulate_single_instr(CPUX86State *env1)
5758{
5759 TranslationBlock *tb;
5760 TranslationBlock *current;
5761 int flags;
5762 uint8_t *tc_ptr;
5763 target_ulong old_eip;
5764
5765 /* ensures env is loaded! */
5766 CPUX86State *savedenv = env;
5767 env = env1;
5768
5769 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5770
5771 current = env->current_tb;
5772 env->current_tb = NULL;
5773 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5774
5775 /*
5776 * Translate only one instruction.
5777 */
5778 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5779 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5780 env->segs[R_CS].base, flags, 0);
5781
5782 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5783
5784
5785 /* tb_link_phys: */
5786 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5787 tb->jmp_next[0] = NULL;
5788 tb->jmp_next[1] = NULL;
5789 Assert(tb->jmp_next[0] == NULL);
5790 Assert(tb->jmp_next[1] == NULL);
5791 if (tb->tb_next_offset[0] != 0xffff)
5792 tb_reset_jump(tb, 0);
5793 if (tb->tb_next_offset[1] != 0xffff)
5794 tb_reset_jump(tb, 1);
5795
5796 /*
5797 * Execute it using emulation
5798 */
5799 old_eip = env->eip;
5800 env->current_tb = tb;
5801
5802 /*
5803 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5804 * perhaps not a very safe hack
5805 */
5806 while(old_eip == env->eip)
5807 {
5808 tc_ptr = tb->tc_ptr;
5809
5810#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5811 int fake_ret;
5812 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5813#else
5814 tcg_qemu_tb_exec(tc_ptr);
5815#endif
5816 /*
5817 * Exit once we detect an external interrupt and interrupts are enabled
5818 */
5819 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5820 ( (env->eflags & IF_MASK) &&
5821 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5822 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5823 {
5824 break;
5825 }
5826 }
5827 env->current_tb = current;
5828
5829 tb_phys_invalidate(tb, -1);
5830 tb_free(tb);
5831/*
5832 Assert(tb->tb_next_offset[0] == 0xffff);
5833 Assert(tb->tb_next_offset[1] == 0xffff);
5834 Assert(tb->tb_next[0] == 0xffff);
5835 Assert(tb->tb_next[1] == 0xffff);
5836 Assert(tb->jmp_next[0] == NULL);
5837 Assert(tb->jmp_next[1] == NULL);
5838 Assert(tb->jmp_first == NULL); */
5839
5840 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5841
5842 /*
5843 * Execute the next instruction when we encounter instruction fusing.
5844 */
5845 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5846 {
5847 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5848 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5849 emulate_single_instr(env);
5850 }
5851
5852 env = savedenv;
5853 return 0;
5854}
5855
5856/**
5857 * Correctly loads a new ldtr selector.
5858 *
5859 * @param env1 CPU environment.
5860 * @param selector Selector to load.
5861 */
5862void sync_ldtr(CPUX86State *env1, int selector)
5863{
5864 CPUX86State *saved_env = env;
5865 if (setjmp(env1->jmp_env) == 0)
5866 {
5867 env = env1;
5868 helper_lldt(selector);
5869 env = saved_env;
5870 }
5871 else
5872 {
5873 env = saved_env;
5874#ifdef VBOX_STRICT
5875 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5876#endif
5877 }
5878}
5879
5880int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5881 uint32_t *esp_ptr, int dpl)
5882{
5883 int type, index, shift;
5884
5885 CPUX86State *savedenv = env;
5886 env = env1;
5887
5888 if (!(env->tr.flags & DESC_P_MASK))
5889 cpu_abort(env, "invalid tss");
5890 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5891 if ((type & 7) != 1)
5892 cpu_abort(env, "invalid tss type %d", type);
5893 shift = type >> 3;
5894 index = (dpl * 4 + 2) << shift;
5895 if (index + (4 << shift) - 1 > env->tr.limit)
5896 {
5897 env = savedenv;
5898 return 0;
5899 }
5900 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5901
5902 if (shift == 0) {
5903 *esp_ptr = lduw_kernel(env->tr.base + index);
5904 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5905 } else {
5906 *esp_ptr = ldl_kernel(env->tr.base + index);
5907 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5908 }
5909
5910 env = savedenv;
5911 return 1;
5912}
5913
5914//*****************************************************************************
5915// Needs to be at the bottom of the file (overriding macros)
5916
5917static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5918{
5919 return *(CPU86_LDouble *)ptr;
5920}
5921
5922static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5923{
5924 *(CPU86_LDouble *)ptr = f;
5925}
5926
5927#undef stw
5928#undef stl
5929#undef stq
5930#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5931#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5932#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5933
5934//*****************************************************************************
5935void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5936{
5937 int fpus, fptag, i, nb_xmm_regs;
5938 CPU86_LDouble tmp;
5939 uint8_t *addr;
5940 int data64 = !!(env->hflags & HF_LMA_MASK);
5941
5942 if (env->cpuid_features & CPUID_FXSR)
5943 {
5944 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5945 fptag = 0;
5946 for(i = 0; i < 8; i++) {
5947 fptag |= (env->fptags[i] << i);
5948 }
5949 stw(ptr, env->fpuc);
5950 stw(ptr + 2, fpus);
5951 stw(ptr + 4, fptag ^ 0xff);
5952
5953 addr = ptr + 0x20;
5954 for(i = 0;i < 8; i++) {
5955 tmp = ST(i);
5956 helper_fstt_raw(tmp, addr);
5957 addr += 16;
5958 }
5959
5960 if (env->cr[4] & CR4_OSFXSR_MASK) {
5961 /* XXX: finish it */
5962 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5963 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5964 nb_xmm_regs = 8 << data64;
5965 addr = ptr + 0xa0;
5966 for(i = 0; i < nb_xmm_regs; i++) {
5967#if __GNUC__ < 4
5968 stq(addr, env->xmm_regs[i].XMM_Q(0));
5969 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5970#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5971 stl(addr, env->xmm_regs[i].XMM_L(0));
5972 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5973 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5974 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5975#endif
5976 addr += 16;
5977 }
5978 }
5979 }
5980 else
5981 {
5982 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5983 int fptag;
5984
5985 fp->FCW = env->fpuc;
5986 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5987 fptag = 0;
5988 for (i=7; i>=0; i--) {
5989 fptag <<= 2;
5990 if (env->fptags[i]) {
5991 fptag |= 3;
5992 } else {
5993 /* the FPU automatically computes it */
5994 }
5995 }
5996 fp->FTW = fptag;
5997
5998 for(i = 0;i < 8; i++) {
5999 tmp = ST(i);
6000 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6001 }
6002 }
6003}
6004
6005//*****************************************************************************
6006#undef lduw
6007#undef ldl
6008#undef ldq
6009#define lduw(a) *(uint16_t *)(a)
6010#define ldl(a) *(uint32_t *)(a)
6011#define ldq(a) *(uint64_t *)(a)
6012//*****************************************************************************
6013void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6014{
6015 int i, fpus, fptag, nb_xmm_regs;
6016 CPU86_LDouble tmp;
6017 uint8_t *addr;
6018 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6019
6020 if (env->cpuid_features & CPUID_FXSR)
6021 {
6022 env->fpuc = lduw(ptr);
6023 fpus = lduw(ptr + 2);
6024 fptag = lduw(ptr + 4);
6025 env->fpstt = (fpus >> 11) & 7;
6026 env->fpus = fpus & ~0x3800;
6027 fptag ^= 0xff;
6028 for(i = 0;i < 8; i++) {
6029 env->fptags[i] = ((fptag >> i) & 1);
6030 }
6031
6032 addr = ptr + 0x20;
6033 for(i = 0;i < 8; i++) {
6034 tmp = helper_fldt_raw(addr);
6035 ST(i) = tmp;
6036 addr += 16;
6037 }
6038
6039 if (env->cr[4] & CR4_OSFXSR_MASK) {
6040 /* XXX: finish it, endianness */
6041 env->mxcsr = ldl(ptr + 0x18);
6042 //ldl(ptr + 0x1c);
6043 nb_xmm_regs = 8 << data64;
6044 addr = ptr + 0xa0;
6045 for(i = 0; i < nb_xmm_regs; i++) {
6046#if HC_ARCH_BITS == 32
6047 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6048 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6049 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6050 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6051 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6052#else
6053 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6054 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6055#endif
6056 addr += 16;
6057 }
6058 }
6059 }
6060 else
6061 {
6062 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6063 int fptag, j;
6064
6065 env->fpuc = fp->FCW;
6066 env->fpstt = (fp->FSW >> 11) & 7;
6067 env->fpus = fp->FSW & ~0x3800;
6068 fptag = fp->FTW;
6069 for(i = 0;i < 8; i++) {
6070 env->fptags[i] = ((fptag & 3) == 3);
6071 fptag >>= 2;
6072 }
6073 j = env->fpstt;
6074 for(i = 0;i < 8; i++) {
6075 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6076 ST(i) = tmp;
6077 }
6078 }
6079}
6080//*****************************************************************************
6081//*****************************************************************************
6082
6083#endif /* VBOX */
6084
6085/* Secure Virtual Machine helpers */
6086
6087#if defined(CONFIG_USER_ONLY)
6088
6089void helper_vmrun(int aflag, int next_eip_addend)
6090{
6091}
6092void helper_vmmcall(void)
6093{
6094}
6095void helper_vmload(int aflag)
6096{
6097}
6098void helper_vmsave(int aflag)
6099{
6100}
6101void helper_stgi(void)
6102{
6103}
6104void helper_clgi(void)
6105{
6106}
6107void helper_skinit(void)
6108{
6109}
6110void helper_invlpga(int aflag)
6111{
6112}
6113void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6114{
6115}
6116void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6117{
6118}
6119
6120void helper_svm_check_io(uint32_t port, uint32_t param,
6121 uint32_t next_eip_addend)
6122{
6123}
6124#else
6125
6126static inline void svm_save_seg(target_phys_addr_t addr,
6127 const SegmentCache *sc)
6128{
6129 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6130 sc->selector);
6131 stq_phys(addr + offsetof(struct vmcb_seg, base),
6132 sc->base);
6133 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6134 sc->limit);
6135 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6136 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6137}
6138
6139static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6140{
6141 unsigned int flags;
6142
6143 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6144 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6145 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6146 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6147 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6148}
6149
6150static inline void svm_load_seg_cache(target_phys_addr_t addr,
6151 CPUState *env, int seg_reg)
6152{
6153 SegmentCache sc1, *sc = &sc1;
6154 svm_load_seg(addr, sc);
6155 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6156 sc->base, sc->limit, sc->flags);
6157}
6158
6159void helper_vmrun(int aflag, int next_eip_addend)
6160{
6161 target_ulong addr;
6162 uint32_t event_inj;
6163 uint32_t int_ctl;
6164
6165 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6166
6167 if (aflag == 2)
6168 addr = EAX;
6169 else
6170 addr = (uint32_t)EAX;
6171
6172 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6173
6174 env->vm_vmcb = addr;
6175
6176 /* save the current CPU state in the hsave page */
6177 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6178 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6179
6180 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6181 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6182
6183 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6184 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6185 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6186 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6187 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6188 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6189
6190 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6191 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6192
6193 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6194 &env->segs[R_ES]);
6195 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6196 &env->segs[R_CS]);
6197 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6198 &env->segs[R_SS]);
6199 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6200 &env->segs[R_DS]);
6201
6202 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6203 EIP + next_eip_addend);
6204 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6205 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6206
6207 /* load the interception bitmaps so we do not need to access the
6208 vmcb in svm mode */
6209 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6210 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6211 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6212 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6213 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6214 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6215
6216 /* enable intercepts */
6217 env->hflags |= HF_SVMI_MASK;
6218
6219 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6220
6221 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6222 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6223
6224 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6225 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6226
6227 /* clear exit_info_2 so we behave like the real hardware */
6228 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6229
6230 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6231 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6232 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6233 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6234 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6235 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6236 if (int_ctl & V_INTR_MASKING_MASK) {
6237 env->v_tpr = int_ctl & V_TPR_MASK;
6238 env->hflags2 |= HF2_VINTR_MASK;
6239 if (env->eflags & IF_MASK)
6240 env->hflags2 |= HF2_HIF_MASK;
6241 }
6242
6243 cpu_load_efer(env,
6244 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6245 env->eflags = 0;
6246 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6247 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6248 CC_OP = CC_OP_EFLAGS;
6249
6250 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6251 env, R_ES);
6252 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6253 env, R_CS);
6254 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6255 env, R_SS);
6256 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6257 env, R_DS);
6258
6259 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6260 env->eip = EIP;
6261 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6262 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6263 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6264 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6265 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6266
6267 /* FIXME: guest state consistency checks */
6268
6269 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6270 case TLB_CONTROL_DO_NOTHING:
6271 break;
6272 case TLB_CONTROL_FLUSH_ALL_ASID:
6273 /* FIXME: this is not 100% correct but should work for now */
6274 tlb_flush(env, 1);
6275 break;
6276 }
6277
6278 env->hflags2 |= HF2_GIF_MASK;
6279
6280 if (int_ctl & V_IRQ_MASK) {
6281 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6282 }
6283
6284 /* maybe we need to inject an event */
6285 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6286 if (event_inj & SVM_EVTINJ_VALID) {
6287 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6288 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6289 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6290
6291 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6292 /* FIXME: need to implement valid_err */
6293 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6294 case SVM_EVTINJ_TYPE_INTR:
6295 env->exception_index = vector;
6296 env->error_code = event_inj_err;
6297 env->exception_is_int = 0;
6298 env->exception_next_eip = -1;
6299 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6300 /* XXX: is it always correct ? */
6301 do_interrupt(vector, 0, 0, 0, 1);
6302 break;
6303 case SVM_EVTINJ_TYPE_NMI:
6304 env->exception_index = EXCP02_NMI;
6305 env->error_code = event_inj_err;
6306 env->exception_is_int = 0;
6307 env->exception_next_eip = EIP;
6308 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6309 cpu_loop_exit();
6310 break;
6311 case SVM_EVTINJ_TYPE_EXEPT:
6312 env->exception_index = vector;
6313 env->error_code = event_inj_err;
6314 env->exception_is_int = 0;
6315 env->exception_next_eip = -1;
6316 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6317 cpu_loop_exit();
6318 break;
6319 case SVM_EVTINJ_TYPE_SOFT:
6320 env->exception_index = vector;
6321 env->error_code = event_inj_err;
6322 env->exception_is_int = 1;
6323 env->exception_next_eip = EIP;
6324 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6325 cpu_loop_exit();
6326 break;
6327 }
6328 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6329 }
6330}
6331
6332void helper_vmmcall(void)
6333{
6334 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6335 raise_exception(EXCP06_ILLOP);
6336}
6337
6338void helper_vmload(int aflag)
6339{
6340 target_ulong addr;
6341 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6342
6343 if (aflag == 2)
6344 addr = EAX;
6345 else
6346 addr = (uint32_t)EAX;
6347
6348 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6349 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6350 env->segs[R_FS].base);
6351
6352 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6353 env, R_FS);
6354 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6355 env, R_GS);
6356 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6357 &env->tr);
6358 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6359 &env->ldt);
6360
6361#ifdef TARGET_X86_64
6362 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6363 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6364 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6365 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6366#endif
6367 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6368 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6369 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6370 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6371}
6372
6373void helper_vmsave(int aflag)
6374{
6375 target_ulong addr;
6376 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6377
6378 if (aflag == 2)
6379 addr = EAX;
6380 else
6381 addr = (uint32_t)EAX;
6382
6383 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6384 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6385 env->segs[R_FS].base);
6386
6387 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6388 &env->segs[R_FS]);
6389 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6390 &env->segs[R_GS]);
6391 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6392 &env->tr);
6393 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6394 &env->ldt);
6395
6396#ifdef TARGET_X86_64
6397 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6398 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6399 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6400 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6401#endif
6402 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6403 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6404 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6405 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6406}
6407
6408void helper_stgi(void)
6409{
6410 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6411 env->hflags2 |= HF2_GIF_MASK;
6412}
6413
6414void helper_clgi(void)
6415{
6416 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6417 env->hflags2 &= ~HF2_GIF_MASK;
6418}
6419
6420void helper_skinit(void)
6421{
6422 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6423 /* XXX: not implemented */
6424 raise_exception(EXCP06_ILLOP);
6425}
6426
6427void helper_invlpga(int aflag)
6428{
6429 target_ulong addr;
6430 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6431
6432 if (aflag == 2)
6433 addr = EAX;
6434 else
6435 addr = (uint32_t)EAX;
6436
6437 /* XXX: could use the ASID to see if it is needed to do the
6438 flush */
6439 tlb_flush_page(env, addr);
6440}
6441
6442void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6443{
6444 if (likely(!(env->hflags & HF_SVMI_MASK)))
6445 return;
6446#ifndef VBOX
6447 switch(type) {
6448 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6449 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6450 helper_vmexit(type, param);
6451 }
6452 break;
6453 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6454 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6455 helper_vmexit(type, param);
6456 }
6457 break;
6458 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6459 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6460 helper_vmexit(type, param);
6461 }
6462 break;
6463 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6464 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6465 helper_vmexit(type, param);
6466 }
6467 break;
6468 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6469 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6470 helper_vmexit(type, param);
6471 }
6472 break;
6473 case SVM_EXIT_MSR:
6474 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6475 /* FIXME: this should be read in at vmrun (faster this way?) */
6476 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6477 uint32_t t0, t1;
6478 switch((uint32_t)ECX) {
6479 case 0 ... 0x1fff:
6480 t0 = (ECX * 2) % 8;
6481 t1 = ECX / 8;
6482 break;
6483 case 0xc0000000 ... 0xc0001fff:
6484 t0 = (8192 + ECX - 0xc0000000) * 2;
6485 t1 = (t0 / 8);
6486 t0 %= 8;
6487 break;
6488 case 0xc0010000 ... 0xc0011fff:
6489 t0 = (16384 + ECX - 0xc0010000) * 2;
6490 t1 = (t0 / 8);
6491 t0 %= 8;
6492 break;
6493 default:
6494 helper_vmexit(type, param);
6495 t0 = 0;
6496 t1 = 0;
6497 break;
6498 }
6499 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6500 helper_vmexit(type, param);
6501 }
6502 break;
6503 default:
6504 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6505 helper_vmexit(type, param);
6506 }
6507 break;
6508 }
6509#else /* VBOX */
6510 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6511#endif /* VBOX */
6512}
6513
6514void helper_svm_check_io(uint32_t port, uint32_t param,
6515 uint32_t next_eip_addend)
6516{
6517 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6518 /* FIXME: this should be read in at vmrun (faster this way?) */
6519 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6520 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6521 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6522 /* next EIP */
6523 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6524 env->eip + next_eip_addend);
6525 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6526 }
6527 }
6528}
6529
6530/* Note: currently only 32 bits of exit_code are used */
6531void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6532{
6533 uint32_t int_ctl;
6534
6535 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6536 exit_code, exit_info_1,
6537 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6538 EIP);
6539
6540 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6541 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6542 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6543 } else {
6544 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6545 }
6546
6547 /* Save the VM state in the vmcb */
6548 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6549 &env->segs[R_ES]);
6550 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6551 &env->segs[R_CS]);
6552 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6553 &env->segs[R_SS]);
6554 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6555 &env->segs[R_DS]);
6556
6557 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6558 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6559
6560 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6561 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6562
6563 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6564 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6565 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6566 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6567 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6568
6569 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6570 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6571 int_ctl |= env->v_tpr & V_TPR_MASK;
6572 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6573 int_ctl |= V_IRQ_MASK;
6574 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6575
6576 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6577 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6578 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6579 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6580 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6581 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6582 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6583
6584 /* Reload the host state from vm_hsave */
6585 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6586 env->hflags &= ~HF_SVMI_MASK;
6587 env->intercept = 0;
6588 env->intercept_exceptions = 0;
6589 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6590 env->tsc_offset = 0;
6591
6592 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6593 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6594
6595 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6596 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6597
6598 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6599 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6600 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6601 /* we need to set the efer after the crs so the hidden flags get
6602 set properly */
6603 cpu_load_efer(env,
6604 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6605 env->eflags = 0;
6606 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6607 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6608 CC_OP = CC_OP_EFLAGS;
6609
6610 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6611 env, R_ES);
6612 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6613 env, R_CS);
6614 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6615 env, R_SS);
6616 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6617 env, R_DS);
6618
6619 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6620 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6621 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6622
6623 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6624 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6625
6626 /* other setups */
6627 cpu_x86_set_cpl(env, 0);
6628 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6629 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6630
6631 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6632 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6633 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6634 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6635
6636 env->hflags2 &= ~HF2_GIF_MASK;
6637 /* FIXME: Resets the current ASID register to zero (host ASID). */
6638
6639 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6640
6641 /* Clears the TSC_OFFSET inside the processor. */
6642
6643 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6644 from the page table indicated the host's CR3. If the PDPEs contain
6645 illegal state, the processor causes a shutdown. */
6646
6647 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6648 env->cr[0] |= CR0_PE_MASK;
6649 env->eflags &= ~VM_MASK;
6650
6651 /* Disables all breakpoints in the host DR7 register. */
6652
6653 /* Checks the reloaded host state for consistency. */
6654
6655 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6656 host's code segment or non-canonical (in the case of long mode), a
6657 #GP fault is delivered inside the host.) */
6658
6659 /* remove any pending exception */
6660 env->exception_index = -1;
6661 env->error_code = 0;
6662 env->old_exception = -1;
6663
6664 cpu_loop_exit();
6665}
6666
6667#endif
6668
6669/* MMX/SSE */
6670/* XXX: optimize by storing fptt and fptags in the static cpu state */
6671void helper_enter_mmx(void)
6672{
6673 env->fpstt = 0;
6674 *(uint32_t *)(env->fptags) = 0;
6675 *(uint32_t *)(env->fptags + 4) = 0;
6676}
6677
6678void helper_emms(void)
6679{
6680 /* set to empty state */
6681 *(uint32_t *)(env->fptags) = 0x01010101;
6682 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6683}
6684
6685/* XXX: suppress */
6686void helper_movq(void *d, void *s)
6687{
6688 *(uint64_t *)d = *(uint64_t *)s;
6689}
6690
6691#define SHIFT 0
6692#include "ops_sse.h"
6693
6694#define SHIFT 1
6695#include "ops_sse.h"
6696
6697#define SHIFT 0
6698#include "helper_template.h"
6699#undef SHIFT
6700
6701#define SHIFT 1
6702#include "helper_template.h"
6703#undef SHIFT
6704
6705#define SHIFT 2
6706#include "helper_template.h"
6707#undef SHIFT
6708
6709#ifdef TARGET_X86_64
6710
6711#define SHIFT 3
6712#include "helper_template.h"
6713#undef SHIFT
6714
6715#endif
6716
6717/* bit operations */
6718target_ulong helper_bsf(target_ulong t0)
6719{
6720 int count;
6721 target_ulong res;
6722
6723 res = t0;
6724 count = 0;
6725 while ((res & 1) == 0) {
6726 count++;
6727 res >>= 1;
6728 }
6729 return count;
6730}
6731
6732target_ulong helper_bsr(target_ulong t0)
6733{
6734 int count;
6735 target_ulong res, mask;
6736
6737 res = t0;
6738 count = TARGET_LONG_BITS - 1;
6739 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6740 while ((res & mask) == 0) {
6741 count--;
6742 res <<= 1;
6743 }
6744 return count;
6745}
6746
6747
6748static int compute_all_eflags(void)
6749{
6750 return CC_SRC;
6751}
6752
6753static int compute_c_eflags(void)
6754{
6755 return CC_SRC & CC_C;
6756}
6757
6758uint32_t helper_cc_compute_all(int op)
6759{
6760 switch (op) {
6761 default: /* should never happen */ return 0;
6762
6763 case CC_OP_EFLAGS: return compute_all_eflags();
6764
6765 case CC_OP_MULB: return compute_all_mulb();
6766 case CC_OP_MULW: return compute_all_mulw();
6767 case CC_OP_MULL: return compute_all_mull();
6768
6769 case CC_OP_ADDB: return compute_all_addb();
6770 case CC_OP_ADDW: return compute_all_addw();
6771 case CC_OP_ADDL: return compute_all_addl();
6772
6773 case CC_OP_ADCB: return compute_all_adcb();
6774 case CC_OP_ADCW: return compute_all_adcw();
6775 case CC_OP_ADCL: return compute_all_adcl();
6776
6777 case CC_OP_SUBB: return compute_all_subb();
6778 case CC_OP_SUBW: return compute_all_subw();
6779 case CC_OP_SUBL: return compute_all_subl();
6780
6781 case CC_OP_SBBB: return compute_all_sbbb();
6782 case CC_OP_SBBW: return compute_all_sbbw();
6783 case CC_OP_SBBL: return compute_all_sbbl();
6784
6785 case CC_OP_LOGICB: return compute_all_logicb();
6786 case CC_OP_LOGICW: return compute_all_logicw();
6787 case CC_OP_LOGICL: return compute_all_logicl();
6788
6789 case CC_OP_INCB: return compute_all_incb();
6790 case CC_OP_INCW: return compute_all_incw();
6791 case CC_OP_INCL: return compute_all_incl();
6792
6793 case CC_OP_DECB: return compute_all_decb();
6794 case CC_OP_DECW: return compute_all_decw();
6795 case CC_OP_DECL: return compute_all_decl();
6796
6797 case CC_OP_SHLB: return compute_all_shlb();
6798 case CC_OP_SHLW: return compute_all_shlw();
6799 case CC_OP_SHLL: return compute_all_shll();
6800
6801 case CC_OP_SARB: return compute_all_sarb();
6802 case CC_OP_SARW: return compute_all_sarw();
6803 case CC_OP_SARL: return compute_all_sarl();
6804
6805#ifdef TARGET_X86_64
6806 case CC_OP_MULQ: return compute_all_mulq();
6807
6808 case CC_OP_ADDQ: return compute_all_addq();
6809
6810 case CC_OP_ADCQ: return compute_all_adcq();
6811
6812 case CC_OP_SUBQ: return compute_all_subq();
6813
6814 case CC_OP_SBBQ: return compute_all_sbbq();
6815
6816 case CC_OP_LOGICQ: return compute_all_logicq();
6817
6818 case CC_OP_INCQ: return compute_all_incq();
6819
6820 case CC_OP_DECQ: return compute_all_decq();
6821
6822 case CC_OP_SHLQ: return compute_all_shlq();
6823
6824 case CC_OP_SARQ: return compute_all_sarq();
6825#endif
6826 }
6827}
6828
6829uint32_t helper_cc_compute_c(int op)
6830{
6831 switch (op) {
6832 default: /* should never happen */ return 0;
6833
6834 case CC_OP_EFLAGS: return compute_c_eflags();
6835
6836 case CC_OP_MULB: return compute_c_mull();
6837 case CC_OP_MULW: return compute_c_mull();
6838 case CC_OP_MULL: return compute_c_mull();
6839
6840 case CC_OP_ADDB: return compute_c_addb();
6841 case CC_OP_ADDW: return compute_c_addw();
6842 case CC_OP_ADDL: return compute_c_addl();
6843
6844 case CC_OP_ADCB: return compute_c_adcb();
6845 case CC_OP_ADCW: return compute_c_adcw();
6846 case CC_OP_ADCL: return compute_c_adcl();
6847
6848 case CC_OP_SUBB: return compute_c_subb();
6849 case CC_OP_SUBW: return compute_c_subw();
6850 case CC_OP_SUBL: return compute_c_subl();
6851
6852 case CC_OP_SBBB: return compute_c_sbbb();
6853 case CC_OP_SBBW: return compute_c_sbbw();
6854 case CC_OP_SBBL: return compute_c_sbbl();
6855
6856 case CC_OP_LOGICB: return compute_c_logicb();
6857 case CC_OP_LOGICW: return compute_c_logicw();
6858 case CC_OP_LOGICL: return compute_c_logicl();
6859
6860 case CC_OP_INCB: return compute_c_incl();
6861 case CC_OP_INCW: return compute_c_incl();
6862 case CC_OP_INCL: return compute_c_incl();
6863
6864 case CC_OP_DECB: return compute_c_incl();
6865 case CC_OP_DECW: return compute_c_incl();
6866 case CC_OP_DECL: return compute_c_incl();
6867
6868 case CC_OP_SHLB: return compute_c_shlb();
6869 case CC_OP_SHLW: return compute_c_shlw();
6870 case CC_OP_SHLL: return compute_c_shll();
6871
6872 case CC_OP_SARB: return compute_c_sarl();
6873 case CC_OP_SARW: return compute_c_sarl();
6874 case CC_OP_SARL: return compute_c_sarl();
6875
6876#ifdef TARGET_X86_64
6877 case CC_OP_MULQ: return compute_c_mull();
6878
6879 case CC_OP_ADDQ: return compute_c_addq();
6880
6881 case CC_OP_ADCQ: return compute_c_adcq();
6882
6883 case CC_OP_SUBQ: return compute_c_subq();
6884
6885 case CC_OP_SBBQ: return compute_c_sbbq();
6886
6887 case CC_OP_LOGICQ: return compute_c_logicq();
6888
6889 case CC_OP_INCQ: return compute_c_incl();
6890
6891 case CC_OP_DECQ: return compute_c_incl();
6892
6893 case CC_OP_SHLQ: return compute_c_shlq();
6894
6895 case CC_OP_SARQ: return compute_c_sarl();
6896#endif
6897 }
6898}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette