VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 39691

Last change on this file since 39691 was 37702, checked in by vboxsync, 13 years ago

REM/VMM: Don't flush the TLB if you don't hold the EM/REM lock, some other EMT may be executing code in the recompiler and could be really surprised by a TLB flush.

  • Property svn:eol-style set to native
File size: 194.1 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205#endif /* VBOX */
206
207/* return non zero if error */
208static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
209 int selector)
210{
211 SegmentCache *dt;
212 int index;
213 target_ulong ptr;
214
215#ifdef VBOX
216 /* Trying to load a selector with CPL=1? */
217 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
218 {
219 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
220 selector = selector & 0xfffc;
221 }
222#endif /* VBOX */
223
224 if (selector & 0x4)
225 dt = &env->ldt;
226 else
227 dt = &env->gdt;
228 index = selector & ~7;
229 if ((index + 7) > dt->limit)
230 return -1;
231 ptr = dt->base + index;
232 *e1_ptr = ldl_kernel(ptr);
233 *e2_ptr = ldl_kernel(ptr + 4);
234 return 0;
235}
236
237static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
238{
239 unsigned int limit;
240 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
241 if (e2 & DESC_G_MASK)
242 limit = (limit << 12) | 0xfff;
243 return limit;
244}
245
246static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
247{
248 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
249}
250
251static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
252{
253 sc->base = get_seg_base(e1, e2);
254 sc->limit = get_seg_limit(e1, e2);
255 sc->flags = e2;
256}
257
258/* init the segment cache in vm86 mode. */
259static inline void load_seg_vm(int seg, int selector)
260{
261 selector &= 0xffff;
262#ifdef VBOX
263 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
264 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
265 flags |= (3 << DESC_DPL_SHIFT);
266
267 cpu_x86_load_seg_cache(env, seg, selector,
268 (selector << 4), 0xffff, flags);
269#else /* VBOX */
270 cpu_x86_load_seg_cache(env, seg, selector,
271 (selector << 4), 0xffff, 0);
272#endif /* VBOX */
273}
274
275static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
276 uint32_t *esp_ptr, int dpl)
277{
278#ifndef VBOX
279 int type, index, shift;
280#else
281 unsigned int type, index, shift;
282#endif
283
284#if 0
285 {
286 int i;
287 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
288 for(i=0;i<env->tr.limit;i++) {
289 printf("%02x ", env->tr.base[i]);
290 if ((i & 7) == 7) printf("\n");
291 }
292 printf("\n");
293 }
294#endif
295
296 if (!(env->tr.flags & DESC_P_MASK))
297 cpu_abort(env, "invalid tss");
298 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
299 if ((type & 7) != 1)
300 cpu_abort(env, "invalid tss type");
301 shift = type >> 3;
302 index = (dpl * 4 + 2) << shift;
303 if (index + (4 << shift) - 1 > env->tr.limit)
304 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
305 if (shift == 0) {
306 *esp_ptr = lduw_kernel(env->tr.base + index);
307 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
308 } else {
309 *esp_ptr = ldl_kernel(env->tr.base + index);
310 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
311 }
312}
313
314/* XXX: merge with load_seg() */
315static void tss_load_seg(int seg_reg, int selector)
316{
317 uint32_t e1, e2;
318 int rpl, dpl, cpl;
319
320#ifdef VBOX
321 e1 = e2 = 0; /* gcc warning? */
322 cpl = env->hflags & HF_CPL_MASK;
323 /* Trying to load a selector with CPL=1? */
324 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
325 {
326 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
327 selector = selector & 0xfffc;
328 }
329#endif /* VBOX */
330
331 if ((selector & 0xfffc) != 0) {
332 if (load_segment(&e1, &e2, selector) != 0)
333 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
334 if (!(e2 & DESC_S_MASK))
335 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
336 rpl = selector & 3;
337 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
338 cpl = env->hflags & HF_CPL_MASK;
339 if (seg_reg == R_CS) {
340 if (!(e2 & DESC_CS_MASK))
341 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
342 /* XXX: is it correct ? */
343 if (dpl != rpl)
344 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
345 if ((e2 & DESC_C_MASK) && dpl > rpl)
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 } else if (seg_reg == R_SS) {
348 /* SS must be writable data */
349 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 if (dpl != cpl || dpl != rpl)
352 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
353 } else {
354 /* not readable code */
355 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 /* if data or non conforming code, checks the rights */
358 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
359 if (dpl < cpl || dpl < rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 }
362 }
363 if (!(e2 & DESC_P_MASK))
364 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
365 cpu_x86_load_seg_cache(env, seg_reg, selector,
366 get_seg_base(e1, e2),
367 get_seg_limit(e1, e2),
368 e2);
369 } else {
370 if (seg_reg == R_SS || seg_reg == R_CS)
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372#ifdef VBOX
373# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
374 cpu_x86_load_seg_cache(env, seg_reg, selector,
375 0, 0, 0);
376# endif
377#endif /* VBOX */
378 }
379}
380
381#define SWITCH_TSS_JMP 0
382#define SWITCH_TSS_IRET 1
383#define SWITCH_TSS_CALL 2
384
385/* XXX: restore CPU state in registers (PowerPC case) */
386static void switch_tss(int tss_selector,
387 uint32_t e1, uint32_t e2, int source,
388 uint32_t next_eip)
389{
390 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
391 target_ulong tss_base;
392 uint32_t new_regs[8], new_segs[6];
393 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
394 uint32_t old_eflags, eflags_mask;
395 SegmentCache *dt;
396#ifndef VBOX
397 int index;
398#else
399 unsigned int index;
400#endif
401 target_ulong ptr;
402
403 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
404 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
405
406 /* if task gate, we read the TSS segment and we load it */
407 if (type == 5) {
408 if (!(e2 & DESC_P_MASK))
409 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
410 tss_selector = e1 >> 16;
411 if (tss_selector & 4)
412 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
413 if (load_segment(&e1, &e2, tss_selector) != 0)
414 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
415 if (e2 & DESC_S_MASK)
416 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
417 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
418 if ((type & 7) != 1)
419 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
420 }
421
422 if (!(e2 & DESC_P_MASK))
423 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
424
425 if (type & 8)
426 tss_limit_max = 103;
427 else
428 tss_limit_max = 43;
429 tss_limit = get_seg_limit(e1, e2);
430 tss_base = get_seg_base(e1, e2);
431 if ((tss_selector & 4) != 0 ||
432 tss_limit < tss_limit_max)
433 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
434 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
435 if (old_type & 8)
436 old_tss_limit_max = 103;
437 else
438 old_tss_limit_max = 43;
439
440 /* read all the registers from the new TSS */
441 if (type & 8) {
442 /* 32 bit */
443 new_cr3 = ldl_kernel(tss_base + 0x1c);
444 new_eip = ldl_kernel(tss_base + 0x20);
445 new_eflags = ldl_kernel(tss_base + 0x24);
446 for(i = 0; i < 8; i++)
447 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
448 for(i = 0; i < 6; i++)
449 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
450 new_ldt = lduw_kernel(tss_base + 0x60);
451 new_trap = ldl_kernel(tss_base + 0x64);
452 } else {
453 /* 16 bit */
454 new_cr3 = 0;
455 new_eip = lduw_kernel(tss_base + 0x0e);
456 new_eflags = lduw_kernel(tss_base + 0x10);
457 for(i = 0; i < 8; i++)
458 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
459 for(i = 0; i < 4; i++)
460 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
461 new_ldt = lduw_kernel(tss_base + 0x2a);
462 new_segs[R_FS] = 0;
463 new_segs[R_GS] = 0;
464 new_trap = 0;
465 }
466
467 /* NOTE: we must avoid memory exceptions during the task switch,
468 so we make dummy accesses before */
469 /* XXX: it can still fail in some cases, so a bigger hack is
470 necessary to valid the TLB after having done the accesses */
471
472 v1 = ldub_kernel(env->tr.base);
473 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
474 stb_kernel(env->tr.base, v1);
475 stb_kernel(env->tr.base + old_tss_limit_max, v2);
476
477 /* clear busy bit (it is restartable) */
478 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
479 target_ulong ptr;
480 uint32_t e2;
481 ptr = env->gdt.base + (env->tr.selector & ~7);
482 e2 = ldl_kernel(ptr + 4);
483 e2 &= ~DESC_TSS_BUSY_MASK;
484 stl_kernel(ptr + 4, e2);
485 }
486 old_eflags = compute_eflags();
487 if (source == SWITCH_TSS_IRET)
488 old_eflags &= ~NT_MASK;
489
490 /* save the current state in the old TSS */
491 if (type & 8) {
492 /* 32 bit */
493 stl_kernel(env->tr.base + 0x20, next_eip);
494 stl_kernel(env->tr.base + 0x24, old_eflags);
495 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
496 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
497 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
498 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
499 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
500 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
501 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
502 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
503 for(i = 0; i < 6; i++)
504 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
505#ifdef VBOX
506 /* Must store the ldt as it gets reloaded and might have been changed. */
507 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
508#endif
509#if defined(VBOX) && defined(DEBUG)
510 printf("TSS 32 bits switch\n");
511 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
512#endif
513 } else {
514 /* 16 bit */
515 stw_kernel(env->tr.base + 0x0e, next_eip);
516 stw_kernel(env->tr.base + 0x10, old_eflags);
517 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
518 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
519 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
520 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
521 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
522 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
523 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
524 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
525 for(i = 0; i < 4; i++)
526 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
527#ifdef VBOX
528 /* Must store the ldt as it gets reloaded and might have been changed. */
529 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
530#endif
531 }
532
533 /* now if an exception occurs, it will occurs in the next task
534 context */
535
536 if (source == SWITCH_TSS_CALL) {
537 stw_kernel(tss_base, env->tr.selector);
538 new_eflags |= NT_MASK;
539 }
540
541 /* set busy bit */
542 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
543 target_ulong ptr;
544 uint32_t e2;
545 ptr = env->gdt.base + (tss_selector & ~7);
546 e2 = ldl_kernel(ptr + 4);
547 e2 |= DESC_TSS_BUSY_MASK;
548 stl_kernel(ptr + 4, e2);
549 }
550
551 /* set the new CPU state */
552 /* from this point, any exception which occurs can give problems */
553 env->cr[0] |= CR0_TS_MASK;
554 env->hflags |= HF_TS_MASK;
555 env->tr.selector = tss_selector;
556 env->tr.base = tss_base;
557 env->tr.limit = tss_limit;
558 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
559
560 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
561 cpu_x86_update_cr3(env, new_cr3);
562 }
563
564 /* load all registers without an exception, then reload them with
565 possible exception */
566 env->eip = new_eip;
567 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
568 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
569 if (!(type & 8))
570 eflags_mask &= 0xffff;
571 load_eflags(new_eflags, eflags_mask);
572 /* XXX: what to do in 16 bit case ? */
573 EAX = new_regs[0];
574 ECX = new_regs[1];
575 EDX = new_regs[2];
576 EBX = new_regs[3];
577 ESP = new_regs[4];
578 EBP = new_regs[5];
579 ESI = new_regs[6];
580 EDI = new_regs[7];
581 if (new_eflags & VM_MASK) {
582 for(i = 0; i < 6; i++)
583 load_seg_vm(i, new_segs[i]);
584 /* in vm86, CPL is always 3 */
585 cpu_x86_set_cpl(env, 3);
586 } else {
587 /* CPL is set the RPL of CS */
588 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
589 /* first just selectors as the rest may trigger exceptions */
590 for(i = 0; i < 6; i++)
591 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
592 }
593
594 env->ldt.selector = new_ldt & ~4;
595 env->ldt.base = 0;
596 env->ldt.limit = 0;
597 env->ldt.flags = 0;
598
599 /* load the LDT */
600 if (new_ldt & 4)
601 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
602
603 if ((new_ldt & 0xfffc) != 0) {
604 dt = &env->gdt;
605 index = new_ldt & ~7;
606 if ((index + 7) > dt->limit)
607 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
608 ptr = dt->base + index;
609 e1 = ldl_kernel(ptr);
610 e2 = ldl_kernel(ptr + 4);
611 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
612 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
613 if (!(e2 & DESC_P_MASK))
614 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
615 load_seg_cache_raw_dt(&env->ldt, e1, e2);
616 }
617
618 /* load the segments */
619 if (!(new_eflags & VM_MASK)) {
620 tss_load_seg(R_CS, new_segs[R_CS]);
621 tss_load_seg(R_SS, new_segs[R_SS]);
622 tss_load_seg(R_ES, new_segs[R_ES]);
623 tss_load_seg(R_DS, new_segs[R_DS]);
624 tss_load_seg(R_FS, new_segs[R_FS]);
625 tss_load_seg(R_GS, new_segs[R_GS]);
626 }
627
628 /* check that EIP is in the CS segment limits */
629 if (new_eip > env->segs[R_CS].limit) {
630 /* XXX: different exception if CALL ? */
631 raise_exception_err(EXCP0D_GPF, 0);
632 }
633
634#ifndef CONFIG_USER_ONLY
635 /* reset local breakpoints */
636 if (env->dr[7] & 0x55) {
637 for (i = 0; i < 4; i++) {
638 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
639 hw_breakpoint_remove(env, i);
640 }
641 env->dr[7] &= ~0x55;
642 }
643#endif
644}
645
646/* check if Port I/O is allowed in TSS */
647static inline void check_io(int addr, int size)
648{
649#ifndef VBOX
650 int io_offset, val, mask;
651#else
652 int val, mask;
653 unsigned int io_offset;
654#endif /* VBOX */
655
656 /* TSS must be a valid 32 bit one */
657 if (!(env->tr.flags & DESC_P_MASK) ||
658 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
659 env->tr.limit < 103)
660 goto fail;
661 io_offset = lduw_kernel(env->tr.base + 0x66);
662 io_offset += (addr >> 3);
663 /* Note: the check needs two bytes */
664 if ((io_offset + 1) > env->tr.limit)
665 goto fail;
666 val = lduw_kernel(env->tr.base + io_offset);
667 val >>= (addr & 7);
668 mask = (1 << size) - 1;
669 /* all bits must be zero to allow the I/O */
670 if ((val & mask) != 0) {
671 fail:
672 raise_exception_err(EXCP0D_GPF, 0);
673 }
674}
675
676#ifdef VBOX
677
678/* Keep in sync with gen_check_external_event() */
679void helper_check_external_event()
680{
681 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
682 | CPU_INTERRUPT_EXTERNAL_EXIT
683 | CPU_INTERRUPT_EXTERNAL_TIMER
684 | CPU_INTERRUPT_EXTERNAL_DMA))
685 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
686 && (env->eflags & IF_MASK)
687 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
688 {
689 helper_external_event();
690 }
691
692}
693
694void helper_sync_seg(uint32_t reg)
695{
696 if (env->segs[reg].newselector)
697 sync_seg(env, reg, env->segs[reg].newselector);
698}
699
700#endif /* VBOX */
701
702void helper_check_iob(uint32_t t0)
703{
704 check_io(t0, 1);
705}
706
707void helper_check_iow(uint32_t t0)
708{
709 check_io(t0, 2);
710}
711
712void helper_check_iol(uint32_t t0)
713{
714 check_io(t0, 4);
715}
716
717void helper_outb(uint32_t port, uint32_t data)
718{
719#ifndef VBOX
720 cpu_outb(port, data & 0xff);
721#else
722 cpu_outb(env, port, data & 0xff);
723#endif
724}
725
726target_ulong helper_inb(uint32_t port)
727{
728#ifndef VBOX
729 return cpu_inb(port);
730#else
731 return cpu_inb(env, port);
732#endif
733}
734
735void helper_outw(uint32_t port, uint32_t data)
736{
737#ifndef VBOX
738 cpu_outw(port, data & 0xffff);
739#else
740 cpu_outw(env, port, data & 0xffff);
741#endif
742}
743
744target_ulong helper_inw(uint32_t port)
745{
746#ifndef VBOX
747 return cpu_inw(port);
748#else
749 return cpu_inw(env, port);
750#endif
751}
752
753void helper_outl(uint32_t port, uint32_t data)
754{
755#ifndef VBOX
756 cpu_outl(port, data);
757#else
758 cpu_outl(env, port, data);
759#endif
760}
761
762target_ulong helper_inl(uint32_t port)
763{
764#ifndef VBOX
765 return cpu_inl(port);
766#else
767 return cpu_inl(env, port);
768#endif
769}
770
771static inline unsigned int get_sp_mask(unsigned int e2)
772{
773 if (e2 & DESC_B_MASK)
774 return 0xffffffff;
775 else
776 return 0xffff;
777}
778
779static int exeption_has_error_code(int intno)
780{
781 switch(intno) {
782 case 8:
783 case 10:
784 case 11:
785 case 12:
786 case 13:
787 case 14:
788 case 17:
789 return 1;
790 }
791 return 0;
792}
793
794#ifdef TARGET_X86_64
795#define SET_ESP(val, sp_mask)\
796do {\
797 if ((sp_mask) == 0xffff)\
798 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
799 else if ((sp_mask) == 0xffffffffLL)\
800 ESP = (uint32_t)(val);\
801 else\
802 ESP = (val);\
803} while (0)
804#else
805#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
806#endif
807
808/* in 64-bit machines, this can overflow. So this segment addition macro
809 * can be used to trim the value to 32-bit whenever needed */
810#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
811
812/* XXX: add a is_user flag to have proper security support */
813#define PUSHW(ssp, sp, sp_mask, val)\
814{\
815 sp -= 2;\
816 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
817}
818
819#define PUSHL(ssp, sp, sp_mask, val)\
820{\
821 sp -= 4;\
822 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
823}
824
825#define POPW(ssp, sp, sp_mask, val)\
826{\
827 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
828 sp += 2;\
829}
830
831#define POPL(ssp, sp, sp_mask, val)\
832{\
833 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
834 sp += 4;\
835}
836
837/* protected mode interrupt */
838static void do_interrupt_protected(int intno, int is_int, int error_code,
839 unsigned int next_eip, int is_hw)
840{
841 SegmentCache *dt;
842 target_ulong ptr, ssp;
843 int type, dpl, selector, ss_dpl, cpl;
844 int has_error_code, new_stack, shift;
845 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
846 uint32_t old_eip, sp_mask;
847
848#ifdef VBOX
849 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
850 cpu_loop_exit();
851#endif
852
853 has_error_code = 0;
854 if (!is_int && !is_hw)
855 has_error_code = exeption_has_error_code(intno);
856 if (is_int)
857 old_eip = next_eip;
858 else
859 old_eip = env->eip;
860
861 dt = &env->idt;
862#ifndef VBOX
863 if (intno * 8 + 7 > dt->limit)
864#else
865 if ((unsigned)intno * 8 + 7 > dt->limit)
866#endif
867 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
868 ptr = dt->base + intno * 8;
869 e1 = ldl_kernel(ptr);
870 e2 = ldl_kernel(ptr + 4);
871 /* check gate type */
872 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
873 switch(type) {
874 case 5: /* task gate */
875 /* must do that check here to return the correct error code */
876 if (!(e2 & DESC_P_MASK))
877 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
878 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
879 if (has_error_code) {
880 int type;
881 uint32_t mask;
882 /* push the error code */
883 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
884 shift = type >> 3;
885 if (env->segs[R_SS].flags & DESC_B_MASK)
886 mask = 0xffffffff;
887 else
888 mask = 0xffff;
889 esp = (ESP - (2 << shift)) & mask;
890 ssp = env->segs[R_SS].base + esp;
891 if (shift)
892 stl_kernel(ssp, error_code);
893 else
894 stw_kernel(ssp, error_code);
895 SET_ESP(esp, mask);
896 }
897 return;
898 case 6: /* 286 interrupt gate */
899 case 7: /* 286 trap gate */
900 case 14: /* 386 interrupt gate */
901 case 15: /* 386 trap gate */
902 break;
903 default:
904 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
905 break;
906 }
907 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
908 cpl = env->hflags & HF_CPL_MASK;
909 /* check privilege if software int */
910 if (is_int && dpl < cpl)
911 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
912 /* check valid bit */
913 if (!(e2 & DESC_P_MASK))
914 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
915 selector = e1 >> 16;
916 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
917 if ((selector & 0xfffc) == 0)
918 raise_exception_err(EXCP0D_GPF, 0);
919
920 if (load_segment(&e1, &e2, selector) != 0)
921 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
922 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
923 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
924 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
925 if (dpl > cpl)
926 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
927 if (!(e2 & DESC_P_MASK))
928 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
929 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
930 /* to inner privilege */
931 get_ss_esp_from_tss(&ss, &esp, dpl);
932 if ((ss & 0xfffc) == 0)
933 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
934 if ((ss & 3) != dpl)
935 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
936 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
937 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
938 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
939 if (ss_dpl != dpl)
940 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
941 if (!(ss_e2 & DESC_S_MASK) ||
942 (ss_e2 & DESC_CS_MASK) ||
943 !(ss_e2 & DESC_W_MASK))
944 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
945 if (!(ss_e2 & DESC_P_MASK))
946#ifdef VBOX /* See page 3-477 of 253666.pdf */
947 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
948#else
949 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
950#endif
951 new_stack = 1;
952 sp_mask = get_sp_mask(ss_e2);
953 ssp = get_seg_base(ss_e1, ss_e2);
954#if defined(VBOX) && defined(DEBUG)
955 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
956#endif
957 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
958 /* to same privilege */
959 if (env->eflags & VM_MASK)
960 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
961 new_stack = 0;
962 sp_mask = get_sp_mask(env->segs[R_SS].flags);
963 ssp = env->segs[R_SS].base;
964 esp = ESP;
965 dpl = cpl;
966 } else {
967 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
968 new_stack = 0; /* avoid warning */
969 sp_mask = 0; /* avoid warning */
970 ssp = 0; /* avoid warning */
971 esp = 0; /* avoid warning */
972 }
973
974 shift = type >> 3;
975
976#if 0
977 /* XXX: check that enough room is available */
978 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
979 if (env->eflags & VM_MASK)
980 push_size += 8;
981 push_size <<= shift;
982#endif
983 if (shift == 1) {
984 if (new_stack) {
985 if (env->eflags & VM_MASK) {
986 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
987 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
988 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
989 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
990 }
991 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
992 PUSHL(ssp, esp, sp_mask, ESP);
993 }
994 PUSHL(ssp, esp, sp_mask, compute_eflags());
995 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
996 PUSHL(ssp, esp, sp_mask, old_eip);
997 if (has_error_code) {
998 PUSHL(ssp, esp, sp_mask, error_code);
999 }
1000 } else {
1001 if (new_stack) {
1002 if (env->eflags & VM_MASK) {
1003 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1004 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1005 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1006 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1007 }
1008 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1009 PUSHW(ssp, esp, sp_mask, ESP);
1010 }
1011 PUSHW(ssp, esp, sp_mask, compute_eflags());
1012 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1013 PUSHW(ssp, esp, sp_mask, old_eip);
1014 if (has_error_code) {
1015 PUSHW(ssp, esp, sp_mask, error_code);
1016 }
1017 }
1018
1019 if (new_stack) {
1020 if (env->eflags & VM_MASK) {
1021 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1022 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1023 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1024 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1025 }
1026 ss = (ss & ~3) | dpl;
1027 cpu_x86_load_seg_cache(env, R_SS, ss,
1028 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1029 }
1030 SET_ESP(esp, sp_mask);
1031
1032 selector = (selector & ~3) | dpl;
1033 cpu_x86_load_seg_cache(env, R_CS, selector,
1034 get_seg_base(e1, e2),
1035 get_seg_limit(e1, e2),
1036 e2);
1037 cpu_x86_set_cpl(env, dpl);
1038 env->eip = offset;
1039
1040 /* interrupt gate clear IF mask */
1041 if ((type & 1) == 0) {
1042 env->eflags &= ~IF_MASK;
1043 }
1044#ifndef VBOX
1045 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1046#else
1047 /*
1048 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1049 * gets confused by seemingly changed EFLAGS. See #3491 and
1050 * public bug #2341.
1051 */
1052 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1053#endif
1054}
1055
1056#ifdef VBOX
1057
1058/* check if VME interrupt redirection is enabled in TSS */
1059DECLINLINE(bool) is_vme_irq_redirected(int intno)
1060{
1061 unsigned int io_offset, intredir_offset;
1062 unsigned char val, mask;
1063
1064 /* TSS must be a valid 32 bit one */
1065 if (!(env->tr.flags & DESC_P_MASK) ||
1066 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1067 env->tr.limit < 103)
1068 goto fail;
1069 io_offset = lduw_kernel(env->tr.base + 0x66);
1070 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1071 if (io_offset < 0x68 + 0x20)
1072 io_offset = 0x68 + 0x20;
1073 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1074 intredir_offset = io_offset - 0x20;
1075
1076 intredir_offset += (intno >> 3);
1077 if ((intredir_offset) > env->tr.limit)
1078 goto fail;
1079
1080 val = ldub_kernel(env->tr.base + intredir_offset);
1081 mask = 1 << (unsigned char)(intno & 7);
1082
1083 /* bit set means no redirection. */
1084 if ((val & mask) != 0) {
1085 return false;
1086 }
1087 return true;
1088
1089fail:
1090 raise_exception_err(EXCP0D_GPF, 0);
1091 return true;
1092}
1093
1094/* V86 mode software interrupt with CR4.VME=1 */
1095static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1096{
1097 target_ulong ptr, ssp;
1098 int selector;
1099 uint32_t offset, esp;
1100 uint32_t old_cs, old_eflags;
1101 uint32_t iopl;
1102
1103 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1104
1105 if (!is_vme_irq_redirected(intno))
1106 {
1107 if (iopl == 3)
1108 {
1109 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1110 return;
1111 }
1112 else
1113 raise_exception_err(EXCP0D_GPF, 0);
1114 }
1115
1116 /* virtual mode idt is at linear address 0 */
1117 ptr = 0 + intno * 4;
1118 offset = lduw_kernel(ptr);
1119 selector = lduw_kernel(ptr + 2);
1120 esp = ESP;
1121 ssp = env->segs[R_SS].base;
1122 old_cs = env->segs[R_CS].selector;
1123
1124 old_eflags = compute_eflags();
1125 if (iopl < 3)
1126 {
1127 /* copy VIF into IF and set IOPL to 3 */
1128 if (env->eflags & VIF_MASK)
1129 old_eflags |= IF_MASK;
1130 else
1131 old_eflags &= ~IF_MASK;
1132
1133 old_eflags |= (3 << IOPL_SHIFT);
1134 }
1135
1136 /* XXX: use SS segment size ? */
1137 PUSHW(ssp, esp, 0xffff, old_eflags);
1138 PUSHW(ssp, esp, 0xffff, old_cs);
1139 PUSHW(ssp, esp, 0xffff, next_eip);
1140
1141 /* update processor state */
1142 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1143 env->eip = offset;
1144 env->segs[R_CS].selector = selector;
1145 env->segs[R_CS].base = (selector << 4);
1146 env->eflags &= ~(TF_MASK | RF_MASK);
1147
1148 if (iopl < 3)
1149 env->eflags &= ~VIF_MASK;
1150 else
1151 env->eflags &= ~IF_MASK;
1152}
1153
1154#endif /* VBOX */
1155
1156#ifdef TARGET_X86_64
1157
1158#define PUSHQ(sp, val)\
1159{\
1160 sp -= 8;\
1161 stq_kernel(sp, (val));\
1162}
1163
1164#define POPQ(sp, val)\
1165{\
1166 val = ldq_kernel(sp);\
1167 sp += 8;\
1168}
1169
1170static inline target_ulong get_rsp_from_tss(int level)
1171{
1172 int index;
1173
1174#if 0
1175 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1176 env->tr.base, env->tr.limit);
1177#endif
1178
1179 if (!(env->tr.flags & DESC_P_MASK))
1180 cpu_abort(env, "invalid tss");
1181 index = 8 * level + 4;
1182 if ((index + 7) > env->tr.limit)
1183 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1184 return ldq_kernel(env->tr.base + index);
1185}
1186
1187/* 64 bit interrupt */
1188static void do_interrupt64(int intno, int is_int, int error_code,
1189 target_ulong next_eip, int is_hw)
1190{
1191 SegmentCache *dt;
1192 target_ulong ptr;
1193 int type, dpl, selector, cpl, ist;
1194 int has_error_code, new_stack;
1195 uint32_t e1, e2, e3, ss;
1196 target_ulong old_eip, esp, offset;
1197
1198#ifdef VBOX
1199 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1200 cpu_loop_exit();
1201#endif
1202
1203 has_error_code = 0;
1204 if (!is_int && !is_hw)
1205 has_error_code = exeption_has_error_code(intno);
1206 if (is_int)
1207 old_eip = next_eip;
1208 else
1209 old_eip = env->eip;
1210
1211 dt = &env->idt;
1212 if (intno * 16 + 15 > dt->limit)
1213 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1214 ptr = dt->base + intno * 16;
1215 e1 = ldl_kernel(ptr);
1216 e2 = ldl_kernel(ptr + 4);
1217 e3 = ldl_kernel(ptr + 8);
1218 /* check gate type */
1219 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1220 switch(type) {
1221 case 14: /* 386 interrupt gate */
1222 case 15: /* 386 trap gate */
1223 break;
1224 default:
1225 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1226 break;
1227 }
1228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1229 cpl = env->hflags & HF_CPL_MASK;
1230 /* check privilege if software int */
1231 if (is_int && dpl < cpl)
1232 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1233 /* check valid bit */
1234 if (!(e2 & DESC_P_MASK))
1235 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1236 selector = e1 >> 16;
1237 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1238 ist = e2 & 7;
1239 if ((selector & 0xfffc) == 0)
1240 raise_exception_err(EXCP0D_GPF, 0);
1241
1242 if (load_segment(&e1, &e2, selector) != 0)
1243 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1244 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1245 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1246 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1247 if (dpl > cpl)
1248 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1249 if (!(e2 & DESC_P_MASK))
1250 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1251 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1252 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1253 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1254 /* to inner privilege */
1255 if (ist != 0)
1256 esp = get_rsp_from_tss(ist + 3);
1257 else
1258 esp = get_rsp_from_tss(dpl);
1259 esp &= ~0xfLL; /* align stack */
1260 ss = 0;
1261 new_stack = 1;
1262 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1263 /* to same privilege */
1264 if (env->eflags & VM_MASK)
1265 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1266 new_stack = 0;
1267 if (ist != 0)
1268 esp = get_rsp_from_tss(ist + 3);
1269 else
1270 esp = ESP;
1271 esp &= ~0xfLL; /* align stack */
1272 dpl = cpl;
1273 } else {
1274 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1275 new_stack = 0; /* avoid warning */
1276 esp = 0; /* avoid warning */
1277 }
1278
1279 PUSHQ(esp, env->segs[R_SS].selector);
1280 PUSHQ(esp, ESP);
1281 PUSHQ(esp, compute_eflags());
1282 PUSHQ(esp, env->segs[R_CS].selector);
1283 PUSHQ(esp, old_eip);
1284 if (has_error_code) {
1285 PUSHQ(esp, error_code);
1286 }
1287
1288 if (new_stack) {
1289 ss = 0 | dpl;
1290 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1291 }
1292 ESP = esp;
1293
1294 selector = (selector & ~3) | dpl;
1295 cpu_x86_load_seg_cache(env, R_CS, selector,
1296 get_seg_base(e1, e2),
1297 get_seg_limit(e1, e2),
1298 e2);
1299 cpu_x86_set_cpl(env, dpl);
1300 env->eip = offset;
1301
1302 /* interrupt gate clear IF mask */
1303 if ((type & 1) == 0) {
1304 env->eflags &= ~IF_MASK;
1305 }
1306#ifndef VBOX
1307 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1308#else /* VBOX */
1309 /*
1310 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1311 * gets confused by seemingly changed EFLAGS. See #3491 and
1312 * public bug #2341.
1313 */
1314 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1315#endif /* VBOX */
1316}
1317#endif
1318
1319#ifdef TARGET_X86_64
1320#if defined(CONFIG_USER_ONLY)
1321void helper_syscall(int next_eip_addend)
1322{
1323 env->exception_index = EXCP_SYSCALL;
1324 env->exception_next_eip = env->eip + next_eip_addend;
1325 cpu_loop_exit();
1326}
1327#else
1328void helper_syscall(int next_eip_addend)
1329{
1330 int selector;
1331
1332 if (!(env->efer & MSR_EFER_SCE)) {
1333 raise_exception_err(EXCP06_ILLOP, 0);
1334 }
1335 selector = (env->star >> 32) & 0xffff;
1336 if (env->hflags & HF_LMA_MASK) {
1337 int code64;
1338
1339 ECX = env->eip + next_eip_addend;
1340 env->regs[11] = compute_eflags();
1341
1342 code64 = env->hflags & HF_CS64_MASK;
1343
1344 cpu_x86_set_cpl(env, 0);
1345 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1346 0, 0xffffffff,
1347 DESC_G_MASK | DESC_P_MASK |
1348 DESC_S_MASK |
1349 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1350 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1351 0, 0xffffffff,
1352 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1353 DESC_S_MASK |
1354 DESC_W_MASK | DESC_A_MASK);
1355 env->eflags &= ~env->fmask;
1356 load_eflags(env->eflags, 0);
1357 if (code64)
1358 env->eip = env->lstar;
1359 else
1360 env->eip = env->cstar;
1361 } else {
1362 ECX = (uint32_t)(env->eip + next_eip_addend);
1363
1364 cpu_x86_set_cpl(env, 0);
1365 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1366 0, 0xffffffff,
1367 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1368 DESC_S_MASK |
1369 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1370 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1371 0, 0xffffffff,
1372 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1373 DESC_S_MASK |
1374 DESC_W_MASK | DESC_A_MASK);
1375 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1376 env->eip = (uint32_t)env->star;
1377 }
1378}
1379#endif
1380#endif
1381
1382#ifdef TARGET_X86_64
1383void helper_sysret(int dflag)
1384{
1385 int cpl, selector;
1386
1387 if (!(env->efer & MSR_EFER_SCE)) {
1388 raise_exception_err(EXCP06_ILLOP, 0);
1389 }
1390 cpl = env->hflags & HF_CPL_MASK;
1391 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1392 raise_exception_err(EXCP0D_GPF, 0);
1393 }
1394 selector = (env->star >> 48) & 0xffff;
1395 if (env->hflags & HF_LMA_MASK) {
1396 if (dflag == 2) {
1397 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1398 0, 0xffffffff,
1399 DESC_G_MASK | DESC_P_MASK |
1400 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1401 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1402 DESC_L_MASK);
1403 env->eip = ECX;
1404 } else {
1405 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1406 0, 0xffffffff,
1407 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1408 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1409 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1410 env->eip = (uint32_t)ECX;
1411 }
1412 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1413 0, 0xffffffff,
1414 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1415 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1416 DESC_W_MASK | DESC_A_MASK);
1417 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1418 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1419 cpu_x86_set_cpl(env, 3);
1420 } else {
1421 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1422 0, 0xffffffff,
1423 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1424 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1425 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1426 env->eip = (uint32_t)ECX;
1427 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1428 0, 0xffffffff,
1429 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1430 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1431 DESC_W_MASK | DESC_A_MASK);
1432 env->eflags |= IF_MASK;
1433 cpu_x86_set_cpl(env, 3);
1434 }
1435}
1436#endif
1437
1438#ifdef VBOX
1439
1440/**
1441 * Checks and processes external VMM events.
1442 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1443 */
1444void helper_external_event(void)
1445{
1446# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1447 uintptr_t uSP;
1448# ifdef RT_ARCH_AMD64
1449 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1450# else
1451 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1452# endif
1453 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1454# endif
1455 /* Keep in sync with flags checked by gen_check_external_event() */
1456 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1457 {
1458 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1459 ~CPU_INTERRUPT_EXTERNAL_HARD);
1460 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1461 }
1462 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1463 {
1464 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1465 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1466 cpu_exit(env);
1467 }
1468 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1469 {
1470 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1471 ~CPU_INTERRUPT_EXTERNAL_DMA);
1472 remR3DmaRun(env);
1473 }
1474 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1475 {
1476 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1477 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1478 remR3TimersRun(env);
1479 }
1480 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1481 {
1482 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1483 ~CPU_INTERRUPT_EXTERNAL_HARD);
1484 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1485 }
1486}
1487
1488/* helper for recording call instruction addresses for later scanning */
1489void helper_record_call()
1490{
1491 if ( !(env->state & CPU_RAW_RING0)
1492 && (env->cr[0] & CR0_PG_MASK)
1493 && !(env->eflags & X86_EFL_IF))
1494 remR3RecordCall(env);
1495}
1496
1497#endif /* VBOX */
1498
1499/* real mode interrupt */
1500static void do_interrupt_real(int intno, int is_int, int error_code,
1501 unsigned int next_eip)
1502{
1503 SegmentCache *dt;
1504 target_ulong ptr, ssp;
1505 int selector;
1506 uint32_t offset, esp;
1507 uint32_t old_cs, old_eip;
1508
1509 /* real mode (simpler !) */
1510 dt = &env->idt;
1511#ifndef VBOX
1512 if (intno * 4 + 3 > dt->limit)
1513#else
1514 if ((unsigned)intno * 4 + 3 > dt->limit)
1515#endif
1516 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1517 ptr = dt->base + intno * 4;
1518 offset = lduw_kernel(ptr);
1519 selector = lduw_kernel(ptr + 2);
1520 esp = ESP;
1521 ssp = env->segs[R_SS].base;
1522 if (is_int)
1523 old_eip = next_eip;
1524 else
1525 old_eip = env->eip;
1526 old_cs = env->segs[R_CS].selector;
1527 /* XXX: use SS segment size ? */
1528 PUSHW(ssp, esp, 0xffff, compute_eflags());
1529 PUSHW(ssp, esp, 0xffff, old_cs);
1530 PUSHW(ssp, esp, 0xffff, old_eip);
1531
1532 /* update processor state */
1533 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1534 env->eip = offset;
1535 env->segs[R_CS].selector = selector;
1536 env->segs[R_CS].base = (selector << 4);
1537 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1538}
1539
1540/* fake user mode interrupt */
1541void do_interrupt_user(int intno, int is_int, int error_code,
1542 target_ulong next_eip)
1543{
1544 SegmentCache *dt;
1545 target_ulong ptr;
1546 int dpl, cpl, shift;
1547 uint32_t e2;
1548
1549 dt = &env->idt;
1550 if (env->hflags & HF_LMA_MASK) {
1551 shift = 4;
1552 } else {
1553 shift = 3;
1554 }
1555 ptr = dt->base + (intno << shift);
1556 e2 = ldl_kernel(ptr + 4);
1557
1558 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1559 cpl = env->hflags & HF_CPL_MASK;
1560 /* check privilege if software int */
1561 if (is_int && dpl < cpl)
1562 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1563
1564 /* Since we emulate only user space, we cannot do more than
1565 exiting the emulation with the suitable exception and error
1566 code */
1567 if (is_int)
1568 EIP = next_eip;
1569}
1570
1571#if !defined(CONFIG_USER_ONLY)
1572static void handle_even_inj(int intno, int is_int, int error_code,
1573 int is_hw, int rm)
1574{
1575 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1576 if (!(event_inj & SVM_EVTINJ_VALID)) {
1577 int type;
1578 if (is_int)
1579 type = SVM_EVTINJ_TYPE_SOFT;
1580 else
1581 type = SVM_EVTINJ_TYPE_EXEPT;
1582 event_inj = intno | type | SVM_EVTINJ_VALID;
1583 if (!rm && exeption_has_error_code(intno)) {
1584 event_inj |= SVM_EVTINJ_VALID_ERR;
1585 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1586 }
1587 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1588 }
1589}
1590#endif
1591
1592/*
1593 * Begin execution of an interruption. is_int is TRUE if coming from
1594 * the int instruction. next_eip is the EIP value AFTER the interrupt
1595 * instruction. It is only relevant if is_int is TRUE.
1596 */
1597void do_interrupt(int intno, int is_int, int error_code,
1598 target_ulong next_eip, int is_hw)
1599{
1600 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1601 if ((env->cr[0] & CR0_PE_MASK)) {
1602 static int count;
1603 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1604 count, intno, error_code, is_int,
1605 env->hflags & HF_CPL_MASK,
1606 env->segs[R_CS].selector, EIP,
1607 (int)env->segs[R_CS].base + EIP,
1608 env->segs[R_SS].selector, ESP);
1609 if (intno == 0x0e) {
1610 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1611 } else {
1612 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1613 }
1614 qemu_log("\n");
1615 log_cpu_state(env, X86_DUMP_CCOP);
1616#if 0
1617 {
1618 int i;
1619 uint8_t *ptr;
1620 qemu_log(" code=");
1621 ptr = env->segs[R_CS].base + env->eip;
1622 for(i = 0; i < 16; i++) {
1623 qemu_log(" %02x", ldub(ptr + i));
1624 }
1625 qemu_log("\n");
1626 }
1627#endif
1628 count++;
1629 }
1630 }
1631#ifdef VBOX
1632 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1633 if (is_int) {
1634 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1635 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1636 } else {
1637 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1638 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1639 }
1640 }
1641#endif
1642 if (env->cr[0] & CR0_PE_MASK) {
1643#if !defined(CONFIG_USER_ONLY)
1644 if (env->hflags & HF_SVMI_MASK)
1645 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1646#endif
1647#ifdef TARGET_X86_64
1648 if (env->hflags & HF_LMA_MASK) {
1649 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1650 } else
1651#endif
1652 {
1653#ifdef VBOX
1654 /* int xx *, v86 code and VME enabled? */
1655 if ( (env->eflags & VM_MASK)
1656 && (env->cr[4] & CR4_VME_MASK)
1657 && is_int
1658 && !is_hw
1659 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1660 )
1661 do_soft_interrupt_vme(intno, error_code, next_eip);
1662 else
1663#endif /* VBOX */
1664 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1665 }
1666 } else {
1667#if !defined(CONFIG_USER_ONLY)
1668 if (env->hflags & HF_SVMI_MASK)
1669 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1670#endif
1671 do_interrupt_real(intno, is_int, error_code, next_eip);
1672 }
1673
1674#if !defined(CONFIG_USER_ONLY)
1675 if (env->hflags & HF_SVMI_MASK) {
1676 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1677 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1678 }
1679#endif
1680}
1681
1682/* This should come from sysemu.h - if we could include it here... */
1683void qemu_system_reset_request(void);
1684
1685/*
1686 * Check nested exceptions and change to double or triple fault if
1687 * needed. It should only be called, if this is not an interrupt.
1688 * Returns the new exception number.
1689 */
1690static int check_exception(int intno, int *error_code)
1691{
1692 int first_contributory = env->old_exception == 0 ||
1693 (env->old_exception >= 10 &&
1694 env->old_exception <= 13);
1695 int second_contributory = intno == 0 ||
1696 (intno >= 10 && intno <= 13);
1697
1698 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1699 env->old_exception, intno);
1700
1701#if !defined(CONFIG_USER_ONLY)
1702 if (env->old_exception == EXCP08_DBLE) {
1703 if (env->hflags & HF_SVMI_MASK)
1704 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1705
1706 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1707
1708# ifndef VBOX
1709 qemu_system_reset_request();
1710# else
1711 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1712# endif
1713 return EXCP_HLT;
1714 }
1715#endif
1716
1717 if ((first_contributory && second_contributory)
1718 || (env->old_exception == EXCP0E_PAGE &&
1719 (second_contributory || (intno == EXCP0E_PAGE)))) {
1720 intno = EXCP08_DBLE;
1721 *error_code = 0;
1722 }
1723
1724 if (second_contributory || (intno == EXCP0E_PAGE) ||
1725 (intno == EXCP08_DBLE))
1726 env->old_exception = intno;
1727
1728 return intno;
1729}
1730
1731/*
1732 * Signal an interruption. It is executed in the main CPU loop.
1733 * is_int is TRUE if coming from the int instruction. next_eip is the
1734 * EIP value AFTER the interrupt instruction. It is only relevant if
1735 * is_int is TRUE.
1736 */
1737static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1738 int next_eip_addend)
1739{
1740#if defined(VBOX) && defined(DEBUG)
1741 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1742#endif
1743 if (!is_int) {
1744 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1745 intno = check_exception(intno, &error_code);
1746 } else {
1747 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1748 }
1749
1750 env->exception_index = intno;
1751 env->error_code = error_code;
1752 env->exception_is_int = is_int;
1753 env->exception_next_eip = env->eip + next_eip_addend;
1754 cpu_loop_exit();
1755}
1756
1757/* shortcuts to generate exceptions */
1758
1759void raise_exception_err(int exception_index, int error_code)
1760{
1761 raise_interrupt(exception_index, 0, error_code, 0);
1762}
1763
1764void raise_exception(int exception_index)
1765{
1766 raise_interrupt(exception_index, 0, 0, 0);
1767}
1768
1769void raise_exception_env(int exception_index, CPUState *nenv)
1770{
1771 env = nenv;
1772 raise_exception(exception_index);
1773}
1774/* SMM support */
1775
1776#if defined(CONFIG_USER_ONLY)
1777
1778void do_smm_enter(void)
1779{
1780}
1781
1782void helper_rsm(void)
1783{
1784}
1785
1786#else
1787
1788#ifdef TARGET_X86_64
1789#define SMM_REVISION_ID 0x00020064
1790#else
1791#define SMM_REVISION_ID 0x00020000
1792#endif
1793
1794void do_smm_enter(void)
1795{
1796 target_ulong sm_state;
1797 SegmentCache *dt;
1798 int i, offset;
1799
1800 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1801 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1802
1803 env->hflags |= HF_SMM_MASK;
1804 cpu_smm_update(env);
1805
1806 sm_state = env->smbase + 0x8000;
1807
1808#ifdef TARGET_X86_64
1809 for(i = 0; i < 6; i++) {
1810 dt = &env->segs[i];
1811 offset = 0x7e00 + i * 16;
1812 stw_phys(sm_state + offset, dt->selector);
1813 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1814 stl_phys(sm_state + offset + 4, dt->limit);
1815 stq_phys(sm_state + offset + 8, dt->base);
1816 }
1817
1818 stq_phys(sm_state + 0x7e68, env->gdt.base);
1819 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1820
1821 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1822 stq_phys(sm_state + 0x7e78, env->ldt.base);
1823 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1824 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1825
1826 stq_phys(sm_state + 0x7e88, env->idt.base);
1827 stl_phys(sm_state + 0x7e84, env->idt.limit);
1828
1829 stw_phys(sm_state + 0x7e90, env->tr.selector);
1830 stq_phys(sm_state + 0x7e98, env->tr.base);
1831 stl_phys(sm_state + 0x7e94, env->tr.limit);
1832 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1833
1834 stq_phys(sm_state + 0x7ed0, env->efer);
1835
1836 stq_phys(sm_state + 0x7ff8, EAX);
1837 stq_phys(sm_state + 0x7ff0, ECX);
1838 stq_phys(sm_state + 0x7fe8, EDX);
1839 stq_phys(sm_state + 0x7fe0, EBX);
1840 stq_phys(sm_state + 0x7fd8, ESP);
1841 stq_phys(sm_state + 0x7fd0, EBP);
1842 stq_phys(sm_state + 0x7fc8, ESI);
1843 stq_phys(sm_state + 0x7fc0, EDI);
1844 for(i = 8; i < 16; i++)
1845 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1846 stq_phys(sm_state + 0x7f78, env->eip);
1847 stl_phys(sm_state + 0x7f70, compute_eflags());
1848 stl_phys(sm_state + 0x7f68, env->dr[6]);
1849 stl_phys(sm_state + 0x7f60, env->dr[7]);
1850
1851 stl_phys(sm_state + 0x7f48, env->cr[4]);
1852 stl_phys(sm_state + 0x7f50, env->cr[3]);
1853 stl_phys(sm_state + 0x7f58, env->cr[0]);
1854
1855 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1856 stl_phys(sm_state + 0x7f00, env->smbase);
1857#else
1858 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1859 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1860 stl_phys(sm_state + 0x7ff4, compute_eflags());
1861 stl_phys(sm_state + 0x7ff0, env->eip);
1862 stl_phys(sm_state + 0x7fec, EDI);
1863 stl_phys(sm_state + 0x7fe8, ESI);
1864 stl_phys(sm_state + 0x7fe4, EBP);
1865 stl_phys(sm_state + 0x7fe0, ESP);
1866 stl_phys(sm_state + 0x7fdc, EBX);
1867 stl_phys(sm_state + 0x7fd8, EDX);
1868 stl_phys(sm_state + 0x7fd4, ECX);
1869 stl_phys(sm_state + 0x7fd0, EAX);
1870 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1871 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1872
1873 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1874 stl_phys(sm_state + 0x7f64, env->tr.base);
1875 stl_phys(sm_state + 0x7f60, env->tr.limit);
1876 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1877
1878 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1879 stl_phys(sm_state + 0x7f80, env->ldt.base);
1880 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1881 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1882
1883 stl_phys(sm_state + 0x7f74, env->gdt.base);
1884 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1885
1886 stl_phys(sm_state + 0x7f58, env->idt.base);
1887 stl_phys(sm_state + 0x7f54, env->idt.limit);
1888
1889 for(i = 0; i < 6; i++) {
1890 dt = &env->segs[i];
1891 if (i < 3)
1892 offset = 0x7f84 + i * 12;
1893 else
1894 offset = 0x7f2c + (i - 3) * 12;
1895 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1896 stl_phys(sm_state + offset + 8, dt->base);
1897 stl_phys(sm_state + offset + 4, dt->limit);
1898 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1899 }
1900 stl_phys(sm_state + 0x7f14, env->cr[4]);
1901
1902 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1903 stl_phys(sm_state + 0x7ef8, env->smbase);
1904#endif
1905 /* init SMM cpu state */
1906
1907#ifdef TARGET_X86_64
1908 cpu_load_efer(env, 0);
1909#endif
1910 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1911 env->eip = 0x00008000;
1912 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1913 0xffffffff, 0);
1914 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1915 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1916 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1917 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1918 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1919
1920 cpu_x86_update_cr0(env,
1921 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1922 cpu_x86_update_cr4(env, 0);
1923 env->dr[7] = 0x00000400;
1924 CC_OP = CC_OP_EFLAGS;
1925}
1926
1927void helper_rsm(void)
1928{
1929#ifdef VBOX
1930 cpu_abort(env, "helper_rsm");
1931#else /* !VBOX */
1932 target_ulong sm_state;
1933 int i, offset;
1934 uint32_t val;
1935
1936 sm_state = env->smbase + 0x8000;
1937#ifdef TARGET_X86_64
1938 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1939
1940 for(i = 0; i < 6; i++) {
1941 offset = 0x7e00 + i * 16;
1942 cpu_x86_load_seg_cache(env, i,
1943 lduw_phys(sm_state + offset),
1944 ldq_phys(sm_state + offset + 8),
1945 ldl_phys(sm_state + offset + 4),
1946 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1947 }
1948
1949 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1950 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1951
1952 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1953 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1954 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1955 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1956
1957 env->idt.base = ldq_phys(sm_state + 0x7e88);
1958 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1959
1960 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1961 env->tr.base = ldq_phys(sm_state + 0x7e98);
1962 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1963 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1964
1965 EAX = ldq_phys(sm_state + 0x7ff8);
1966 ECX = ldq_phys(sm_state + 0x7ff0);
1967 EDX = ldq_phys(sm_state + 0x7fe8);
1968 EBX = ldq_phys(sm_state + 0x7fe0);
1969 ESP = ldq_phys(sm_state + 0x7fd8);
1970 EBP = ldq_phys(sm_state + 0x7fd0);
1971 ESI = ldq_phys(sm_state + 0x7fc8);
1972 EDI = ldq_phys(sm_state + 0x7fc0);
1973 for(i = 8; i < 16; i++)
1974 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1975 env->eip = ldq_phys(sm_state + 0x7f78);
1976 load_eflags(ldl_phys(sm_state + 0x7f70),
1977 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1978 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1979 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1980
1981 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1982 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1983 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1984
1985 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1986 if (val & 0x20000) {
1987 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1988 }
1989#else
1990 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1991 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1992 load_eflags(ldl_phys(sm_state + 0x7ff4),
1993 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1994 env->eip = ldl_phys(sm_state + 0x7ff0);
1995 EDI = ldl_phys(sm_state + 0x7fec);
1996 ESI = ldl_phys(sm_state + 0x7fe8);
1997 EBP = ldl_phys(sm_state + 0x7fe4);
1998 ESP = ldl_phys(sm_state + 0x7fe0);
1999 EBX = ldl_phys(sm_state + 0x7fdc);
2000 EDX = ldl_phys(sm_state + 0x7fd8);
2001 ECX = ldl_phys(sm_state + 0x7fd4);
2002 EAX = ldl_phys(sm_state + 0x7fd0);
2003 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2004 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2005
2006 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2007 env->tr.base = ldl_phys(sm_state + 0x7f64);
2008 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2009 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2010
2011 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2012 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2013 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2014 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2015
2016 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2017 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2018
2019 env->idt.base = ldl_phys(sm_state + 0x7f58);
2020 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2021
2022 for(i = 0; i < 6; i++) {
2023 if (i < 3)
2024 offset = 0x7f84 + i * 12;
2025 else
2026 offset = 0x7f2c + (i - 3) * 12;
2027 cpu_x86_load_seg_cache(env, i,
2028 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2029 ldl_phys(sm_state + offset + 8),
2030 ldl_phys(sm_state + offset + 4),
2031 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2032 }
2033 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2034
2035 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2036 if (val & 0x20000) {
2037 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2038 }
2039#endif
2040 CC_OP = CC_OP_EFLAGS;
2041 env->hflags &= ~HF_SMM_MASK;
2042 cpu_smm_update(env);
2043
2044 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2045 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2046#endif /* !VBOX */
2047}
2048
2049#endif /* !CONFIG_USER_ONLY */
2050
2051
2052/* division, flags are undefined */
2053
2054void helper_divb_AL(target_ulong t0)
2055{
2056 unsigned int num, den, q, r;
2057
2058 num = (EAX & 0xffff);
2059 den = (t0 & 0xff);
2060 if (den == 0) {
2061 raise_exception(EXCP00_DIVZ);
2062 }
2063 q = (num / den);
2064 if (q > 0xff)
2065 raise_exception(EXCP00_DIVZ);
2066 q &= 0xff;
2067 r = (num % den) & 0xff;
2068 EAX = (EAX & ~0xffff) | (r << 8) | q;
2069}
2070
2071void helper_idivb_AL(target_ulong t0)
2072{
2073 int num, den, q, r;
2074
2075 num = (int16_t)EAX;
2076 den = (int8_t)t0;
2077 if (den == 0) {
2078 raise_exception(EXCP00_DIVZ);
2079 }
2080 q = (num / den);
2081 if (q != (int8_t)q)
2082 raise_exception(EXCP00_DIVZ);
2083 q &= 0xff;
2084 r = (num % den) & 0xff;
2085 EAX = (EAX & ~0xffff) | (r << 8) | q;
2086}
2087
2088void helper_divw_AX(target_ulong t0)
2089{
2090 unsigned int num, den, q, r;
2091
2092 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2093 den = (t0 & 0xffff);
2094 if (den == 0) {
2095 raise_exception(EXCP00_DIVZ);
2096 }
2097 q = (num / den);
2098 if (q > 0xffff)
2099 raise_exception(EXCP00_DIVZ);
2100 q &= 0xffff;
2101 r = (num % den) & 0xffff;
2102 EAX = (EAX & ~0xffff) | q;
2103 EDX = (EDX & ~0xffff) | r;
2104}
2105
2106void helper_idivw_AX(target_ulong t0)
2107{
2108 int num, den, q, r;
2109
2110 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2111 den = (int16_t)t0;
2112 if (den == 0) {
2113 raise_exception(EXCP00_DIVZ);
2114 }
2115 q = (num / den);
2116 if (q != (int16_t)q)
2117 raise_exception(EXCP00_DIVZ);
2118 q &= 0xffff;
2119 r = (num % den) & 0xffff;
2120 EAX = (EAX & ~0xffff) | q;
2121 EDX = (EDX & ~0xffff) | r;
2122}
2123
2124void helper_divl_EAX(target_ulong t0)
2125{
2126 unsigned int den, r;
2127 uint64_t num, q;
2128
2129 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2130 den = t0;
2131 if (den == 0) {
2132 raise_exception(EXCP00_DIVZ);
2133 }
2134 q = (num / den);
2135 r = (num % den);
2136 if (q > 0xffffffff)
2137 raise_exception(EXCP00_DIVZ);
2138 EAX = (uint32_t)q;
2139 EDX = (uint32_t)r;
2140}
2141
2142void helper_idivl_EAX(target_ulong t0)
2143{
2144 int den, r;
2145 int64_t num, q;
2146
2147 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2148 den = t0;
2149 if (den == 0) {
2150 raise_exception(EXCP00_DIVZ);
2151 }
2152 q = (num / den);
2153 r = (num % den);
2154 if (q != (int32_t)q)
2155 raise_exception(EXCP00_DIVZ);
2156 EAX = (uint32_t)q;
2157 EDX = (uint32_t)r;
2158}
2159
2160/* bcd */
2161
2162/* XXX: exception */
2163void helper_aam(int base)
2164{
2165 int al, ah;
2166 al = EAX & 0xff;
2167 ah = al / base;
2168 al = al % base;
2169 EAX = (EAX & ~0xffff) | al | (ah << 8);
2170 CC_DST = al;
2171}
2172
2173void helper_aad(int base)
2174{
2175 int al, ah;
2176 al = EAX & 0xff;
2177 ah = (EAX >> 8) & 0xff;
2178 al = ((ah * base) + al) & 0xff;
2179 EAX = (EAX & ~0xffff) | al;
2180 CC_DST = al;
2181}
2182
2183void helper_aaa(void)
2184{
2185 int icarry;
2186 int al, ah, af;
2187 int eflags;
2188
2189 eflags = helper_cc_compute_all(CC_OP);
2190 af = eflags & CC_A;
2191 al = EAX & 0xff;
2192 ah = (EAX >> 8) & 0xff;
2193
2194 icarry = (al > 0xf9);
2195 if (((al & 0x0f) > 9 ) || af) {
2196 al = (al + 6) & 0x0f;
2197 ah = (ah + 1 + icarry) & 0xff;
2198 eflags |= CC_C | CC_A;
2199 } else {
2200 eflags &= ~(CC_C | CC_A);
2201 al &= 0x0f;
2202 }
2203 EAX = (EAX & ~0xffff) | al | (ah << 8);
2204 CC_SRC = eflags;
2205}
2206
2207void helper_aas(void)
2208{
2209 int icarry;
2210 int al, ah, af;
2211 int eflags;
2212
2213 eflags = helper_cc_compute_all(CC_OP);
2214 af = eflags & CC_A;
2215 al = EAX & 0xff;
2216 ah = (EAX >> 8) & 0xff;
2217
2218 icarry = (al < 6);
2219 if (((al & 0x0f) > 9 ) || af) {
2220 al = (al - 6) & 0x0f;
2221 ah = (ah - 1 - icarry) & 0xff;
2222 eflags |= CC_C | CC_A;
2223 } else {
2224 eflags &= ~(CC_C | CC_A);
2225 al &= 0x0f;
2226 }
2227 EAX = (EAX & ~0xffff) | al | (ah << 8);
2228 CC_SRC = eflags;
2229}
2230
2231void helper_daa(void)
2232{
2233 int al, af, cf;
2234 int eflags;
2235
2236 eflags = helper_cc_compute_all(CC_OP);
2237 cf = eflags & CC_C;
2238 af = eflags & CC_A;
2239 al = EAX & 0xff;
2240
2241 eflags = 0;
2242 if (((al & 0x0f) > 9 ) || af) {
2243 al = (al + 6) & 0xff;
2244 eflags |= CC_A;
2245 }
2246 if ((al > 0x9f) || cf) {
2247 al = (al + 0x60) & 0xff;
2248 eflags |= CC_C;
2249 }
2250 EAX = (EAX & ~0xff) | al;
2251 /* well, speed is not an issue here, so we compute the flags by hand */
2252 eflags |= (al == 0) << 6; /* zf */
2253 eflags |= parity_table[al]; /* pf */
2254 eflags |= (al & 0x80); /* sf */
2255 CC_SRC = eflags;
2256}
2257
2258void helper_das(void)
2259{
2260 int al, al1, af, cf;
2261 int eflags;
2262
2263 eflags = helper_cc_compute_all(CC_OP);
2264 cf = eflags & CC_C;
2265 af = eflags & CC_A;
2266 al = EAX & 0xff;
2267
2268 eflags = 0;
2269 al1 = al;
2270 if (((al & 0x0f) > 9 ) || af) {
2271 eflags |= CC_A;
2272 if (al < 6 || cf)
2273 eflags |= CC_C;
2274 al = (al - 6) & 0xff;
2275 }
2276 if ((al1 > 0x99) || cf) {
2277 al = (al - 0x60) & 0xff;
2278 eflags |= CC_C;
2279 }
2280 EAX = (EAX & ~0xff) | al;
2281 /* well, speed is not an issue here, so we compute the flags by hand */
2282 eflags |= (al == 0) << 6; /* zf */
2283 eflags |= parity_table[al]; /* pf */
2284 eflags |= (al & 0x80); /* sf */
2285 CC_SRC = eflags;
2286}
2287
2288void helper_into(int next_eip_addend)
2289{
2290 int eflags;
2291 eflags = helper_cc_compute_all(CC_OP);
2292 if (eflags & CC_O) {
2293 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2294 }
2295}
2296
2297void helper_cmpxchg8b(target_ulong a0)
2298{
2299 uint64_t d;
2300 int eflags;
2301
2302 eflags = helper_cc_compute_all(CC_OP);
2303 d = ldq(a0);
2304 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2305 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2306 eflags |= CC_Z;
2307 } else {
2308 /* always do the store */
2309 stq(a0, d);
2310 EDX = (uint32_t)(d >> 32);
2311 EAX = (uint32_t)d;
2312 eflags &= ~CC_Z;
2313 }
2314 CC_SRC = eflags;
2315}
2316
2317#ifdef TARGET_X86_64
2318void helper_cmpxchg16b(target_ulong a0)
2319{
2320 uint64_t d0, d1;
2321 int eflags;
2322
2323 if ((a0 & 0xf) != 0)
2324 raise_exception(EXCP0D_GPF);
2325 eflags = helper_cc_compute_all(CC_OP);
2326 d0 = ldq(a0);
2327 d1 = ldq(a0 + 8);
2328 if (d0 == EAX && d1 == EDX) {
2329 stq(a0, EBX);
2330 stq(a0 + 8, ECX);
2331 eflags |= CC_Z;
2332 } else {
2333 /* always do the store */
2334 stq(a0, d0);
2335 stq(a0 + 8, d1);
2336 EDX = d1;
2337 EAX = d0;
2338 eflags &= ~CC_Z;
2339 }
2340 CC_SRC = eflags;
2341}
2342#endif
2343
2344void helper_single_step(void)
2345{
2346#ifndef CONFIG_USER_ONLY
2347 check_hw_breakpoints(env, 1);
2348 env->dr[6] |= DR6_BS;
2349#endif
2350 raise_exception(EXCP01_DB);
2351}
2352
2353void helper_cpuid(void)
2354{
2355 uint32_t eax, ebx, ecx, edx;
2356
2357 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2358
2359 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2360 EAX = eax;
2361 EBX = ebx;
2362 ECX = ecx;
2363 EDX = edx;
2364}
2365
2366void helper_enter_level(int level, int data32, target_ulong t1)
2367{
2368 target_ulong ssp;
2369 uint32_t esp_mask, esp, ebp;
2370
2371 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2372 ssp = env->segs[R_SS].base;
2373 ebp = EBP;
2374 esp = ESP;
2375 if (data32) {
2376 /* 32 bit */
2377 esp -= 4;
2378 while (--level) {
2379 esp -= 4;
2380 ebp -= 4;
2381 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2382 }
2383 esp -= 4;
2384 stl(ssp + (esp & esp_mask), t1);
2385 } else {
2386 /* 16 bit */
2387 esp -= 2;
2388 while (--level) {
2389 esp -= 2;
2390 ebp -= 2;
2391 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2392 }
2393 esp -= 2;
2394 stw(ssp + (esp & esp_mask), t1);
2395 }
2396}
2397
2398#ifdef TARGET_X86_64
2399void helper_enter64_level(int level, int data64, target_ulong t1)
2400{
2401 target_ulong esp, ebp;
2402 ebp = EBP;
2403 esp = ESP;
2404
2405 if (data64) {
2406 /* 64 bit */
2407 esp -= 8;
2408 while (--level) {
2409 esp -= 8;
2410 ebp -= 8;
2411 stq(esp, ldq(ebp));
2412 }
2413 esp -= 8;
2414 stq(esp, t1);
2415 } else {
2416 /* 16 bit */
2417 esp -= 2;
2418 while (--level) {
2419 esp -= 2;
2420 ebp -= 2;
2421 stw(esp, lduw(ebp));
2422 }
2423 esp -= 2;
2424 stw(esp, t1);
2425 }
2426}
2427#endif
2428
2429void helper_lldt(int selector)
2430{
2431 SegmentCache *dt;
2432 uint32_t e1, e2;
2433#ifndef VBOX
2434 int index, entry_limit;
2435#else
2436 unsigned int index, entry_limit;
2437#endif
2438 target_ulong ptr;
2439
2440#ifdef VBOX
2441 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2442 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2443#endif
2444
2445 selector &= 0xffff;
2446 if ((selector & 0xfffc) == 0) {
2447 /* XXX: NULL selector case: invalid LDT */
2448 env->ldt.base = 0;
2449 env->ldt.limit = 0;
2450 } else {
2451 if (selector & 0x4)
2452 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2453 dt = &env->gdt;
2454 index = selector & ~7;
2455#ifdef TARGET_X86_64
2456 if (env->hflags & HF_LMA_MASK)
2457 entry_limit = 15;
2458 else
2459#endif
2460 entry_limit = 7;
2461 if ((index + entry_limit) > dt->limit)
2462 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2463 ptr = dt->base + index;
2464 e1 = ldl_kernel(ptr);
2465 e2 = ldl_kernel(ptr + 4);
2466 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2467 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2468 if (!(e2 & DESC_P_MASK))
2469 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2470#ifdef TARGET_X86_64
2471 if (env->hflags & HF_LMA_MASK) {
2472 uint32_t e3;
2473 e3 = ldl_kernel(ptr + 8);
2474 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2475 env->ldt.base |= (target_ulong)e3 << 32;
2476 } else
2477#endif
2478 {
2479 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2480 }
2481 }
2482 env->ldt.selector = selector;
2483#ifdef VBOX
2484 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2485 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2486#endif
2487}
2488
2489void helper_ltr(int selector)
2490{
2491 SegmentCache *dt;
2492 uint32_t e1, e2;
2493#ifndef VBOX
2494 int index, type, entry_limit;
2495#else
2496 unsigned int index;
2497 int type, entry_limit;
2498#endif
2499 target_ulong ptr;
2500
2501#ifdef VBOX
2502 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2503 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2504 env->tr.flags, (RTSEL)(selector & 0xffff)));
2505#endif
2506 selector &= 0xffff;
2507 if ((selector & 0xfffc) == 0) {
2508 /* NULL selector case: invalid TR */
2509 env->tr.base = 0;
2510 env->tr.limit = 0;
2511 env->tr.flags = 0;
2512 } else {
2513 if (selector & 0x4)
2514 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2515 dt = &env->gdt;
2516 index = selector & ~7;
2517#ifdef TARGET_X86_64
2518 if (env->hflags & HF_LMA_MASK)
2519 entry_limit = 15;
2520 else
2521#endif
2522 entry_limit = 7;
2523 if ((index + entry_limit) > dt->limit)
2524 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2525 ptr = dt->base + index;
2526 e1 = ldl_kernel(ptr);
2527 e2 = ldl_kernel(ptr + 4);
2528 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2529 if ((e2 & DESC_S_MASK) ||
2530 (type != 1 && type != 9))
2531 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2532 if (!(e2 & DESC_P_MASK))
2533 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2534#ifdef TARGET_X86_64
2535 if (env->hflags & HF_LMA_MASK) {
2536 uint32_t e3, e4;
2537 e3 = ldl_kernel(ptr + 8);
2538 e4 = ldl_kernel(ptr + 12);
2539 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2540 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2541 load_seg_cache_raw_dt(&env->tr, e1, e2);
2542 env->tr.base |= (target_ulong)e3 << 32;
2543 } else
2544#endif
2545 {
2546 load_seg_cache_raw_dt(&env->tr, e1, e2);
2547 }
2548 e2 |= DESC_TSS_BUSY_MASK;
2549 stl_kernel(ptr + 4, e2);
2550 }
2551 env->tr.selector = selector;
2552#ifdef VBOX
2553 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2554 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2555 env->tr.flags, (RTSEL)(selector & 0xffff)));
2556#endif
2557}
2558
2559/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2560void helper_load_seg(int seg_reg, int selector)
2561{
2562 uint32_t e1, e2;
2563 int cpl, dpl, rpl;
2564 SegmentCache *dt;
2565#ifndef VBOX
2566 int index;
2567#else
2568 unsigned int index;
2569#endif
2570 target_ulong ptr;
2571
2572 selector &= 0xffff;
2573 cpl = env->hflags & HF_CPL_MASK;
2574#ifdef VBOX
2575
2576 /* Trying to load a selector with CPL=1? */
2577 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2578 {
2579 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2580 selector = selector & 0xfffc;
2581 }
2582#endif /* VBOX */
2583 if ((selector & 0xfffc) == 0) {
2584 /* null selector case */
2585 if (seg_reg == R_SS
2586#ifdef TARGET_X86_64
2587 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2588#endif
2589 )
2590 raise_exception_err(EXCP0D_GPF, 0);
2591 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2592 } else {
2593
2594 if (selector & 0x4)
2595 dt = &env->ldt;
2596 else
2597 dt = &env->gdt;
2598 index = selector & ~7;
2599 if ((index + 7) > dt->limit)
2600 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2601 ptr = dt->base + index;
2602 e1 = ldl_kernel(ptr);
2603 e2 = ldl_kernel(ptr + 4);
2604
2605 if (!(e2 & DESC_S_MASK))
2606 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2607 rpl = selector & 3;
2608 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2609 if (seg_reg == R_SS) {
2610 /* must be writable segment */
2611 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2612 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2613 if (rpl != cpl || dpl != cpl)
2614 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2615 } else {
2616 /* must be readable segment */
2617 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2618 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2619
2620 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2621 /* if not conforming code, test rights */
2622 if (dpl < cpl || dpl < rpl)
2623 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2624 }
2625 }
2626
2627 if (!(e2 & DESC_P_MASK)) {
2628 if (seg_reg == R_SS)
2629 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2630 else
2631 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2632 }
2633
2634 /* set the access bit if not already set */
2635 if (!(e2 & DESC_A_MASK)) {
2636 e2 |= DESC_A_MASK;
2637 stl_kernel(ptr + 4, e2);
2638 }
2639
2640 cpu_x86_load_seg_cache(env, seg_reg, selector,
2641 get_seg_base(e1, e2),
2642 get_seg_limit(e1, e2),
2643 e2);
2644#if 0
2645 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2646 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2647#endif
2648 }
2649}
2650
2651/* protected mode jump */
2652void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2653 int next_eip_addend)
2654{
2655 int gate_cs, type;
2656 uint32_t e1, e2, cpl, dpl, rpl, limit;
2657 target_ulong next_eip;
2658
2659#ifdef VBOX /** @todo Why do we do this? */
2660 e1 = e2 = 0;
2661#endif
2662 if ((new_cs & 0xfffc) == 0)
2663 raise_exception_err(EXCP0D_GPF, 0);
2664 if (load_segment(&e1, &e2, new_cs) != 0)
2665 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2666 cpl = env->hflags & HF_CPL_MASK;
2667 if (e2 & DESC_S_MASK) {
2668 if (!(e2 & DESC_CS_MASK))
2669 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2671 if (e2 & DESC_C_MASK) {
2672 /* conforming code segment */
2673 if (dpl > cpl)
2674 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2675 } else {
2676 /* non conforming code segment */
2677 rpl = new_cs & 3;
2678 if (rpl > cpl)
2679 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2680 if (dpl != cpl)
2681 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2682 }
2683 if (!(e2 & DESC_P_MASK))
2684 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2685 limit = get_seg_limit(e1, e2);
2686 if (new_eip > limit &&
2687 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2688 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2689 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2690 get_seg_base(e1, e2), limit, e2);
2691 EIP = new_eip;
2692 } else {
2693 /* jump to call or task gate */
2694 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2695 rpl = new_cs & 3;
2696 cpl = env->hflags & HF_CPL_MASK;
2697 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2698 switch(type) {
2699 case 1: /* 286 TSS */
2700 case 9: /* 386 TSS */
2701 case 5: /* task gate */
2702 if (dpl < cpl || dpl < rpl)
2703 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2704 next_eip = env->eip + next_eip_addend;
2705 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2706 CC_OP = CC_OP_EFLAGS;
2707 break;
2708 case 4: /* 286 call gate */
2709 case 12: /* 386 call gate */
2710 if ((dpl < cpl) || (dpl < rpl))
2711 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2712 if (!(e2 & DESC_P_MASK))
2713 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2714 gate_cs = e1 >> 16;
2715 new_eip = (e1 & 0xffff);
2716 if (type == 12)
2717 new_eip |= (e2 & 0xffff0000);
2718 if (load_segment(&e1, &e2, gate_cs) != 0)
2719 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2720 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2721 /* must be code segment */
2722 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2723 (DESC_S_MASK | DESC_CS_MASK)))
2724 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2725 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2726 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2727 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2728 if (!(e2 & DESC_P_MASK))
2729#ifdef VBOX /* See page 3-514 of 253666.pdf */
2730 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2731#else
2732 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2733#endif
2734 limit = get_seg_limit(e1, e2);
2735 if (new_eip > limit)
2736 raise_exception_err(EXCP0D_GPF, 0);
2737 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2738 get_seg_base(e1, e2), limit, e2);
2739 EIP = new_eip;
2740 break;
2741 default:
2742 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2743 break;
2744 }
2745 }
2746}
2747
2748/* real mode call */
2749void helper_lcall_real(int new_cs, target_ulong new_eip1,
2750 int shift, int next_eip)
2751{
2752 int new_eip;
2753 uint32_t esp, esp_mask;
2754 target_ulong ssp;
2755
2756 new_eip = new_eip1;
2757 esp = ESP;
2758 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2759 ssp = env->segs[R_SS].base;
2760 if (shift) {
2761 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2762 PUSHL(ssp, esp, esp_mask, next_eip);
2763 } else {
2764 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2765 PUSHW(ssp, esp, esp_mask, next_eip);
2766 }
2767
2768 SET_ESP(esp, esp_mask);
2769 env->eip = new_eip;
2770 env->segs[R_CS].selector = new_cs;
2771 env->segs[R_CS].base = (new_cs << 4);
2772}
2773
2774/* protected mode call */
2775void helper_lcall_protected(int new_cs, target_ulong new_eip,
2776 int shift, int next_eip_addend)
2777{
2778 int new_stack, i;
2779 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2780 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2781 uint32_t val, limit, old_sp_mask;
2782 target_ulong ssp, old_ssp, next_eip;
2783
2784#ifdef VBOX /** @todo Why do we do this? */
2785 e1 = e2 = 0;
2786#endif
2787 next_eip = env->eip + next_eip_addend;
2788 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2789 LOG_PCALL_STATE(env);
2790 if ((new_cs & 0xfffc) == 0)
2791 raise_exception_err(EXCP0D_GPF, 0);
2792 if (load_segment(&e1, &e2, new_cs) != 0)
2793 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2794 cpl = env->hflags & HF_CPL_MASK;
2795 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2796 if (e2 & DESC_S_MASK) {
2797 if (!(e2 & DESC_CS_MASK))
2798 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2799 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2800 if (e2 & DESC_C_MASK) {
2801 /* conforming code segment */
2802 if (dpl > cpl)
2803 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2804 } else {
2805 /* non conforming code segment */
2806 rpl = new_cs & 3;
2807 if (rpl > cpl)
2808 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2809 if (dpl != cpl)
2810 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2811 }
2812 if (!(e2 & DESC_P_MASK))
2813 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2814
2815#ifdef TARGET_X86_64
2816 /* XXX: check 16/32 bit cases in long mode */
2817 if (shift == 2) {
2818 target_ulong rsp;
2819 /* 64 bit case */
2820 rsp = ESP;
2821 PUSHQ(rsp, env->segs[R_CS].selector);
2822 PUSHQ(rsp, next_eip);
2823 /* from this point, not restartable */
2824 ESP = rsp;
2825 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2826 get_seg_base(e1, e2),
2827 get_seg_limit(e1, e2), e2);
2828 EIP = new_eip;
2829 } else
2830#endif
2831 {
2832 sp = ESP;
2833 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2834 ssp = env->segs[R_SS].base;
2835 if (shift) {
2836 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2837 PUSHL(ssp, sp, sp_mask, next_eip);
2838 } else {
2839 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2840 PUSHW(ssp, sp, sp_mask, next_eip);
2841 }
2842
2843 limit = get_seg_limit(e1, e2);
2844 if (new_eip > limit)
2845 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2846 /* from this point, not restartable */
2847 SET_ESP(sp, sp_mask);
2848 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2849 get_seg_base(e1, e2), limit, e2);
2850 EIP = new_eip;
2851 }
2852 } else {
2853 /* check gate type */
2854 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2855 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2856 rpl = new_cs & 3;
2857 switch(type) {
2858 case 1: /* available 286 TSS */
2859 case 9: /* available 386 TSS */
2860 case 5: /* task gate */
2861 if (dpl < cpl || dpl < rpl)
2862 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2863 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2864 CC_OP = CC_OP_EFLAGS;
2865 return;
2866 case 4: /* 286 call gate */
2867 case 12: /* 386 call gate */
2868 break;
2869 default:
2870 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2871 break;
2872 }
2873 shift = type >> 3;
2874
2875 if (dpl < cpl || dpl < rpl)
2876 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2877 /* check valid bit */
2878 if (!(e2 & DESC_P_MASK))
2879 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2880 selector = e1 >> 16;
2881 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2882 param_count = e2 & 0x1f;
2883 if ((selector & 0xfffc) == 0)
2884 raise_exception_err(EXCP0D_GPF, 0);
2885
2886 if (load_segment(&e1, &e2, selector) != 0)
2887 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2888 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2889 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2890 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2891 if (dpl > cpl)
2892 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2893 if (!(e2 & DESC_P_MASK))
2894 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2895
2896 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2897 /* to inner privilege */
2898 get_ss_esp_from_tss(&ss, &sp, dpl);
2899 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2900 ss, sp, param_count, ESP);
2901 if ((ss & 0xfffc) == 0)
2902 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2903 if ((ss & 3) != dpl)
2904 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2905 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2906 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2907 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2908 if (ss_dpl != dpl)
2909 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2910 if (!(ss_e2 & DESC_S_MASK) ||
2911 (ss_e2 & DESC_CS_MASK) ||
2912 !(ss_e2 & DESC_W_MASK))
2913 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2914 if (!(ss_e2 & DESC_P_MASK))
2915#ifdef VBOX /* See page 3-99 of 253666.pdf */
2916 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2917#else
2918 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2919#endif
2920
2921 // push_size = ((param_count * 2) + 8) << shift;
2922
2923 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2924 old_ssp = env->segs[R_SS].base;
2925
2926 sp_mask = get_sp_mask(ss_e2);
2927 ssp = get_seg_base(ss_e1, ss_e2);
2928 if (shift) {
2929 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2930 PUSHL(ssp, sp, sp_mask, ESP);
2931 for(i = param_count - 1; i >= 0; i--) {
2932 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2933 PUSHL(ssp, sp, sp_mask, val);
2934 }
2935 } else {
2936 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2937 PUSHW(ssp, sp, sp_mask, ESP);
2938 for(i = param_count - 1; i >= 0; i--) {
2939 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2940 PUSHW(ssp, sp, sp_mask, val);
2941 }
2942 }
2943 new_stack = 1;
2944 } else {
2945 /* to same privilege */
2946 sp = ESP;
2947 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2948 ssp = env->segs[R_SS].base;
2949 // push_size = (4 << shift);
2950 new_stack = 0;
2951 }
2952
2953 if (shift) {
2954 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2955 PUSHL(ssp, sp, sp_mask, next_eip);
2956 } else {
2957 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2958 PUSHW(ssp, sp, sp_mask, next_eip);
2959 }
2960
2961 /* from this point, not restartable */
2962
2963 if (new_stack) {
2964 ss = (ss & ~3) | dpl;
2965 cpu_x86_load_seg_cache(env, R_SS, ss,
2966 ssp,
2967 get_seg_limit(ss_e1, ss_e2),
2968 ss_e2);
2969 }
2970
2971 selector = (selector & ~3) | dpl;
2972 cpu_x86_load_seg_cache(env, R_CS, selector,
2973 get_seg_base(e1, e2),
2974 get_seg_limit(e1, e2),
2975 e2);
2976 cpu_x86_set_cpl(env, dpl);
2977 SET_ESP(sp, sp_mask);
2978 EIP = offset;
2979 }
2980}
2981
2982/* real and vm86 mode iret */
2983void helper_iret_real(int shift)
2984{
2985 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2986 target_ulong ssp;
2987 int eflags_mask;
2988#ifdef VBOX
2989 bool fVME = false;
2990
2991 remR3TrapClear(env->pVM);
2992#endif /* VBOX */
2993
2994 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2995 sp = ESP;
2996 ssp = env->segs[R_SS].base;
2997 if (shift == 1) {
2998 /* 32 bits */
2999 POPL(ssp, sp, sp_mask, new_eip);
3000 POPL(ssp, sp, sp_mask, new_cs);
3001 new_cs &= 0xffff;
3002 POPL(ssp, sp, sp_mask, new_eflags);
3003 } else {
3004 /* 16 bits */
3005 POPW(ssp, sp, sp_mask, new_eip);
3006 POPW(ssp, sp, sp_mask, new_cs);
3007 POPW(ssp, sp, sp_mask, new_eflags);
3008 }
3009#ifdef VBOX
3010 if ( (env->eflags & VM_MASK)
3011 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3012 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3013 {
3014 fVME = true;
3015 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3016 /* if TF will be set -> #GP */
3017 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3018 || (new_eflags & TF_MASK))
3019 raise_exception(EXCP0D_GPF);
3020 }
3021#endif /* VBOX */
3022 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3023 env->segs[R_CS].selector = new_cs;
3024 env->segs[R_CS].base = (new_cs << 4);
3025 env->eip = new_eip;
3026#ifdef VBOX
3027 if (fVME)
3028 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3029 else
3030#endif
3031 if (env->eflags & VM_MASK)
3032 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3033 else
3034 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3035 if (shift == 0)
3036 eflags_mask &= 0xffff;
3037 load_eflags(new_eflags, eflags_mask);
3038 env->hflags2 &= ~HF2_NMI_MASK;
3039#ifdef VBOX
3040 if (fVME)
3041 {
3042 if (new_eflags & IF_MASK)
3043 env->eflags |= VIF_MASK;
3044 else
3045 env->eflags &= ~VIF_MASK;
3046 }
3047#endif /* VBOX */
3048}
3049
3050static inline void validate_seg(int seg_reg, int cpl)
3051{
3052 int dpl;
3053 uint32_t e2;
3054
3055 /* XXX: on x86_64, we do not want to nullify FS and GS because
3056 they may still contain a valid base. I would be interested to
3057 know how a real x86_64 CPU behaves */
3058 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3059 (env->segs[seg_reg].selector & 0xfffc) == 0)
3060 return;
3061
3062 e2 = env->segs[seg_reg].flags;
3063 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3064 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3065 /* data or non conforming code segment */
3066 if (dpl < cpl) {
3067 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3068 }
3069 }
3070}
3071
3072/* protected mode iret */
3073static inline void helper_ret_protected(int shift, int is_iret, int addend)
3074{
3075 uint32_t new_cs, new_eflags, new_ss;
3076 uint32_t new_es, new_ds, new_fs, new_gs;
3077 uint32_t e1, e2, ss_e1, ss_e2;
3078 int cpl, dpl, rpl, eflags_mask, iopl;
3079 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3080
3081#ifdef VBOX /** @todo Why do we do this? */
3082 ss_e1 = ss_e2 = e1 = e2 = 0;
3083#endif
3084
3085#ifdef TARGET_X86_64
3086 if (shift == 2)
3087 sp_mask = -1;
3088 else
3089#endif
3090 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3091 sp = ESP;
3092 ssp = env->segs[R_SS].base;
3093 new_eflags = 0; /* avoid warning */
3094#ifdef TARGET_X86_64
3095 if (shift == 2) {
3096 POPQ(sp, new_eip);
3097 POPQ(sp, new_cs);
3098 new_cs &= 0xffff;
3099 if (is_iret) {
3100 POPQ(sp, new_eflags);
3101 }
3102 } else
3103#endif
3104 if (shift == 1) {
3105 /* 32 bits */
3106 POPL(ssp, sp, sp_mask, new_eip);
3107 POPL(ssp, sp, sp_mask, new_cs);
3108 new_cs &= 0xffff;
3109 if (is_iret) {
3110 POPL(ssp, sp, sp_mask, new_eflags);
3111#if defined(VBOX) && defined(DEBUG)
3112 printf("iret: new CS %04X\n", new_cs);
3113 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3114 printf("iret: new EFLAGS %08X\n", new_eflags);
3115 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3116#endif
3117 if (new_eflags & VM_MASK)
3118 goto return_to_vm86;
3119 }
3120#ifdef VBOX
3121 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3122 {
3123# ifdef DEBUG
3124 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3125# endif
3126 new_cs = new_cs & 0xfffc;
3127 }
3128#endif
3129 } else {
3130 /* 16 bits */
3131 POPW(ssp, sp, sp_mask, new_eip);
3132 POPW(ssp, sp, sp_mask, new_cs);
3133 if (is_iret)
3134 POPW(ssp, sp, sp_mask, new_eflags);
3135 }
3136 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3137 new_cs, new_eip, shift, addend);
3138 LOG_PCALL_STATE(env);
3139 if ((new_cs & 0xfffc) == 0)
3140 {
3141#if defined(VBOX) && defined(DEBUG)
3142 printf("new_cs & 0xfffc) == 0\n");
3143#endif
3144 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3145 }
3146 if (load_segment(&e1, &e2, new_cs) != 0)
3147 {
3148#if defined(VBOX) && defined(DEBUG)
3149 printf("load_segment failed\n");
3150#endif
3151 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3152 }
3153 if (!(e2 & DESC_S_MASK) ||
3154 !(e2 & DESC_CS_MASK))
3155 {
3156#if defined(VBOX) && defined(DEBUG)
3157 printf("e2 mask %08x\n", e2);
3158#endif
3159 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3160 }
3161 cpl = env->hflags & HF_CPL_MASK;
3162 rpl = new_cs & 3;
3163 if (rpl < cpl)
3164 {
3165#if defined(VBOX) && defined(DEBUG)
3166 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3167#endif
3168 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3169 }
3170 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3171 if (e2 & DESC_C_MASK) {
3172 if (dpl > rpl)
3173 {
3174#if defined(VBOX) && defined(DEBUG)
3175 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3176#endif
3177 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3178 }
3179 } else {
3180 if (dpl != rpl)
3181 {
3182#if defined(VBOX) && defined(DEBUG)
3183 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3184#endif
3185 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3186 }
3187 }
3188 if (!(e2 & DESC_P_MASK))
3189 {
3190#if defined(VBOX) && defined(DEBUG)
3191 printf("DESC_P_MASK e2=%08x\n", e2);
3192#endif
3193 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3194 }
3195
3196 sp += addend;
3197 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3198 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3199 /* return to same privilege level */
3200 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3201 get_seg_base(e1, e2),
3202 get_seg_limit(e1, e2),
3203 e2);
3204 } else {
3205 /* return to different privilege level */
3206#ifdef TARGET_X86_64
3207 if (shift == 2) {
3208 POPQ(sp, new_esp);
3209 POPQ(sp, new_ss);
3210 new_ss &= 0xffff;
3211 } else
3212#endif
3213 if (shift == 1) {
3214 /* 32 bits */
3215 POPL(ssp, sp, sp_mask, new_esp);
3216 POPL(ssp, sp, sp_mask, new_ss);
3217 new_ss &= 0xffff;
3218 } else {
3219 /* 16 bits */
3220 POPW(ssp, sp, sp_mask, new_esp);
3221 POPW(ssp, sp, sp_mask, new_ss);
3222 }
3223 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3224 new_ss, new_esp);
3225 if ((new_ss & 0xfffc) == 0) {
3226#ifdef TARGET_X86_64
3227 /* NULL ss is allowed in long mode if cpl != 3*/
3228 /* XXX: test CS64 ? */
3229 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3230 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3231 0, 0xffffffff,
3232 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3233 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3234 DESC_W_MASK | DESC_A_MASK);
3235 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3236 } else
3237#endif
3238 {
3239 raise_exception_err(EXCP0D_GPF, 0);
3240 }
3241 } else {
3242 if ((new_ss & 3) != rpl)
3243 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3244 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3245 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3246 if (!(ss_e2 & DESC_S_MASK) ||
3247 (ss_e2 & DESC_CS_MASK) ||
3248 !(ss_e2 & DESC_W_MASK))
3249 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3250 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3251 if (dpl != rpl)
3252 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3253 if (!(ss_e2 & DESC_P_MASK))
3254 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3255 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3256 get_seg_base(ss_e1, ss_e2),
3257 get_seg_limit(ss_e1, ss_e2),
3258 ss_e2);
3259 }
3260
3261 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3262 get_seg_base(e1, e2),
3263 get_seg_limit(e1, e2),
3264 e2);
3265 cpu_x86_set_cpl(env, rpl);
3266 sp = new_esp;
3267#ifdef TARGET_X86_64
3268 if (env->hflags & HF_CS64_MASK)
3269 sp_mask = -1;
3270 else
3271#endif
3272 sp_mask = get_sp_mask(ss_e2);
3273
3274 /* validate data segments */
3275 validate_seg(R_ES, rpl);
3276 validate_seg(R_DS, rpl);
3277 validate_seg(R_FS, rpl);
3278 validate_seg(R_GS, rpl);
3279
3280 sp += addend;
3281 }
3282 SET_ESP(sp, sp_mask);
3283 env->eip = new_eip;
3284 if (is_iret) {
3285 /* NOTE: 'cpl' is the _old_ CPL */
3286 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3287 if (cpl == 0)
3288#ifdef VBOX
3289 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3290#else
3291 eflags_mask |= IOPL_MASK;
3292#endif
3293 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3294 if (cpl <= iopl)
3295 eflags_mask |= IF_MASK;
3296 if (shift == 0)
3297 eflags_mask &= 0xffff;
3298 load_eflags(new_eflags, eflags_mask);
3299 }
3300 return;
3301
3302 return_to_vm86:
3303 POPL(ssp, sp, sp_mask, new_esp);
3304 POPL(ssp, sp, sp_mask, new_ss);
3305 POPL(ssp, sp, sp_mask, new_es);
3306 POPL(ssp, sp, sp_mask, new_ds);
3307 POPL(ssp, sp, sp_mask, new_fs);
3308 POPL(ssp, sp, sp_mask, new_gs);
3309
3310 /* modify processor state */
3311 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3312 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3313 load_seg_vm(R_CS, new_cs & 0xffff);
3314 cpu_x86_set_cpl(env, 3);
3315 load_seg_vm(R_SS, new_ss & 0xffff);
3316 load_seg_vm(R_ES, new_es & 0xffff);
3317 load_seg_vm(R_DS, new_ds & 0xffff);
3318 load_seg_vm(R_FS, new_fs & 0xffff);
3319 load_seg_vm(R_GS, new_gs & 0xffff);
3320
3321 env->eip = new_eip & 0xffff;
3322 ESP = new_esp;
3323}
3324
3325void helper_iret_protected(int shift, int next_eip)
3326{
3327 int tss_selector, type;
3328 uint32_t e1, e2;
3329
3330#ifdef VBOX
3331 e1 = e2 = 0; /** @todo Why do we do this? */
3332 remR3TrapClear(env->pVM);
3333#endif
3334
3335 /* specific case for TSS */
3336 if (env->eflags & NT_MASK) {
3337#ifdef TARGET_X86_64
3338 if (env->hflags & HF_LMA_MASK)
3339 raise_exception_err(EXCP0D_GPF, 0);
3340#endif
3341 tss_selector = lduw_kernel(env->tr.base + 0);
3342 if (tss_selector & 4)
3343 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3344 if (load_segment(&e1, &e2, tss_selector) != 0)
3345 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3346 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3347 /* NOTE: we check both segment and busy TSS */
3348 if (type != 3)
3349 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3350 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3351 } else {
3352 helper_ret_protected(shift, 1, 0);
3353 }
3354 env->hflags2 &= ~HF2_NMI_MASK;
3355}
3356
3357void helper_lret_protected(int shift, int addend)
3358{
3359 helper_ret_protected(shift, 0, addend);
3360}
3361
3362void helper_sysenter(void)
3363{
3364 if (env->sysenter_cs == 0) {
3365 raise_exception_err(EXCP0D_GPF, 0);
3366 }
3367 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3368 cpu_x86_set_cpl(env, 0);
3369
3370#ifdef TARGET_X86_64
3371 if (env->hflags & HF_LMA_MASK) {
3372 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3373 0, 0xffffffff,
3374 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3375 DESC_S_MASK |
3376 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3377 } else
3378#endif
3379 {
3380 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3381 0, 0xffffffff,
3382 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3383 DESC_S_MASK |
3384 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3385 }
3386 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3387 0, 0xffffffff,
3388 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3389 DESC_S_MASK |
3390 DESC_W_MASK | DESC_A_MASK);
3391 ESP = env->sysenter_esp;
3392 EIP = env->sysenter_eip;
3393}
3394
3395void helper_sysexit(int dflag)
3396{
3397 int cpl;
3398
3399 cpl = env->hflags & HF_CPL_MASK;
3400 if (env->sysenter_cs == 0 || cpl != 0) {
3401 raise_exception_err(EXCP0D_GPF, 0);
3402 }
3403 cpu_x86_set_cpl(env, 3);
3404#ifdef TARGET_X86_64
3405 if (dflag == 2) {
3406 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3407 0, 0xffffffff,
3408 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3409 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3410 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3411 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3412 0, 0xffffffff,
3413 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3414 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3415 DESC_W_MASK | DESC_A_MASK);
3416 } else
3417#endif
3418 {
3419 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3420 0, 0xffffffff,
3421 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3422 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3423 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3424 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3425 0, 0xffffffff,
3426 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3427 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3428 DESC_W_MASK | DESC_A_MASK);
3429 }
3430 ESP = ECX;
3431 EIP = EDX;
3432}
3433
3434#if defined(CONFIG_USER_ONLY)
3435target_ulong helper_read_crN(int reg)
3436{
3437 return 0;
3438}
3439
3440void helper_write_crN(int reg, target_ulong t0)
3441{
3442}
3443
3444void helper_movl_drN_T0(int reg, target_ulong t0)
3445{
3446}
3447#else
3448target_ulong helper_read_crN(int reg)
3449{
3450 target_ulong val;
3451
3452 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3453 switch(reg) {
3454 default:
3455 val = env->cr[reg];
3456 break;
3457 case 8:
3458 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3459#ifndef VBOX
3460 val = cpu_get_apic_tpr(env->apic_state);
3461#else /* VBOX */
3462 val = cpu_get_apic_tpr(env);
3463#endif /* VBOX */
3464 } else {
3465 val = env->v_tpr;
3466 }
3467 break;
3468 }
3469 return val;
3470}
3471
3472void helper_write_crN(int reg, target_ulong t0)
3473{
3474 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3475 switch(reg) {
3476 case 0:
3477 cpu_x86_update_cr0(env, t0);
3478 break;
3479 case 3:
3480 cpu_x86_update_cr3(env, t0);
3481 break;
3482 case 4:
3483 cpu_x86_update_cr4(env, t0);
3484 break;
3485 case 8:
3486 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3487#ifndef VBOX
3488 cpu_set_apic_tpr(env->apic_state, t0);
3489#else /* VBOX */
3490 cpu_set_apic_tpr(env, t0);
3491#endif /* VBOX */
3492 }
3493 env->v_tpr = t0 & 0x0f;
3494 break;
3495 default:
3496 env->cr[reg] = t0;
3497 break;
3498 }
3499}
3500
3501void helper_movl_drN_T0(int reg, target_ulong t0)
3502{
3503 int i;
3504
3505 if (reg < 4) {
3506 hw_breakpoint_remove(env, reg);
3507 env->dr[reg] = t0;
3508 hw_breakpoint_insert(env, reg);
3509 } else if (reg == 7) {
3510 for (i = 0; i < 4; i++)
3511 hw_breakpoint_remove(env, i);
3512 env->dr[7] = t0;
3513 for (i = 0; i < 4; i++)
3514 hw_breakpoint_insert(env, i);
3515 } else
3516 env->dr[reg] = t0;
3517}
3518#endif
3519
3520void helper_lmsw(target_ulong t0)
3521{
3522 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3523 if already set to one. */
3524 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3525 helper_write_crN(0, t0);
3526}
3527
3528void helper_clts(void)
3529{
3530 env->cr[0] &= ~CR0_TS_MASK;
3531 env->hflags &= ~HF_TS_MASK;
3532}
3533
3534void helper_invlpg(target_ulong addr)
3535{
3536 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3537 tlb_flush_page(env, addr);
3538}
3539
3540void helper_rdtsc(void)
3541{
3542 uint64_t val;
3543
3544 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3545 raise_exception(EXCP0D_GPF);
3546 }
3547 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3548
3549 val = cpu_get_tsc(env) + env->tsc_offset;
3550 EAX = (uint32_t)(val);
3551 EDX = (uint32_t)(val >> 32);
3552}
3553
3554void helper_rdtscp(void)
3555{
3556 helper_rdtsc();
3557#ifndef VBOX
3558 ECX = (uint32_t)(env->tsc_aux);
3559#else /* VBOX */
3560 uint64_t val;
3561 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3562 ECX = (uint32_t)(val);
3563 else
3564 ECX = 0;
3565#endif /* VBOX */
3566}
3567
3568void helper_rdpmc(void)
3569{
3570#ifdef VBOX
3571 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3572 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3573 raise_exception(EXCP0D_GPF);
3574 }
3575 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3576 EAX = 0;
3577 EDX = 0;
3578#else /* !VBOX */
3579 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3580 raise_exception(EXCP0D_GPF);
3581 }
3582 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3583
3584 /* currently unimplemented */
3585 raise_exception_err(EXCP06_ILLOP, 0);
3586#endif /* !VBOX */
3587}
3588
3589#if defined(CONFIG_USER_ONLY)
3590void helper_wrmsr(void)
3591{
3592}
3593
3594void helper_rdmsr(void)
3595{
3596}
3597#else
3598void helper_wrmsr(void)
3599{
3600 uint64_t val;
3601
3602 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3603
3604 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3605
3606 switch((uint32_t)ECX) {
3607 case MSR_IA32_SYSENTER_CS:
3608 env->sysenter_cs = val & 0xffff;
3609 break;
3610 case MSR_IA32_SYSENTER_ESP:
3611 env->sysenter_esp = val;
3612 break;
3613 case MSR_IA32_SYSENTER_EIP:
3614 env->sysenter_eip = val;
3615 break;
3616 case MSR_IA32_APICBASE:
3617# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3618 cpu_set_apic_base(env->apic_state, val);
3619# endif
3620 break;
3621 case MSR_EFER:
3622 {
3623 uint64_t update_mask;
3624 update_mask = 0;
3625 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3626 update_mask |= MSR_EFER_SCE;
3627 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3628 update_mask |= MSR_EFER_LME;
3629 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3630 update_mask |= MSR_EFER_FFXSR;
3631 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3632 update_mask |= MSR_EFER_NXE;
3633 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3634 update_mask |= MSR_EFER_SVME;
3635 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3636 update_mask |= MSR_EFER_FFXSR;
3637 cpu_load_efer(env, (env->efer & ~update_mask) |
3638 (val & update_mask));
3639 }
3640 break;
3641 case MSR_STAR:
3642 env->star = val;
3643 break;
3644 case MSR_PAT:
3645 env->pat = val;
3646 break;
3647 case MSR_VM_HSAVE_PA:
3648 env->vm_hsave = val;
3649 break;
3650#ifdef TARGET_X86_64
3651 case MSR_LSTAR:
3652 env->lstar = val;
3653 break;
3654 case MSR_CSTAR:
3655 env->cstar = val;
3656 break;
3657 case MSR_FMASK:
3658 env->fmask = val;
3659 break;
3660 case MSR_FSBASE:
3661 env->segs[R_FS].base = val;
3662 break;
3663 case MSR_GSBASE:
3664 env->segs[R_GS].base = val;
3665 break;
3666 case MSR_KERNELGSBASE:
3667 env->kernelgsbase = val;
3668 break;
3669#endif
3670# ifndef VBOX
3671 case MSR_MTRRphysBase(0):
3672 case MSR_MTRRphysBase(1):
3673 case MSR_MTRRphysBase(2):
3674 case MSR_MTRRphysBase(3):
3675 case MSR_MTRRphysBase(4):
3676 case MSR_MTRRphysBase(5):
3677 case MSR_MTRRphysBase(6):
3678 case MSR_MTRRphysBase(7):
3679 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3680 break;
3681 case MSR_MTRRphysMask(0):
3682 case MSR_MTRRphysMask(1):
3683 case MSR_MTRRphysMask(2):
3684 case MSR_MTRRphysMask(3):
3685 case MSR_MTRRphysMask(4):
3686 case MSR_MTRRphysMask(5):
3687 case MSR_MTRRphysMask(6):
3688 case MSR_MTRRphysMask(7):
3689 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3690 break;
3691 case MSR_MTRRfix64K_00000:
3692 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3693 break;
3694 case MSR_MTRRfix16K_80000:
3695 case MSR_MTRRfix16K_A0000:
3696 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3697 break;
3698 case MSR_MTRRfix4K_C0000:
3699 case MSR_MTRRfix4K_C8000:
3700 case MSR_MTRRfix4K_D0000:
3701 case MSR_MTRRfix4K_D8000:
3702 case MSR_MTRRfix4K_E0000:
3703 case MSR_MTRRfix4K_E8000:
3704 case MSR_MTRRfix4K_F0000:
3705 case MSR_MTRRfix4K_F8000:
3706 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3707 break;
3708 case MSR_MTRRdefType:
3709 env->mtrr_deftype = val;
3710 break;
3711 case MSR_MCG_STATUS:
3712 env->mcg_status = val;
3713 break;
3714 case MSR_MCG_CTL:
3715 if ((env->mcg_cap & MCG_CTL_P)
3716 && (val == 0 || val == ~(uint64_t)0))
3717 env->mcg_ctl = val;
3718 break;
3719 case MSR_TSC_AUX:
3720 env->tsc_aux = val;
3721 break;
3722# endif /* !VBOX */
3723 default:
3724# ifndef VBOX
3725 if ((uint32_t)ECX >= MSR_MC0_CTL
3726 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3727 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3728 if ((offset & 0x3) != 0
3729 || (val == 0 || val == ~(uint64_t)0))
3730 env->mce_banks[offset] = val;
3731 break;
3732 }
3733 /* XXX: exception ? */
3734# endif
3735 break;
3736 }
3737
3738# ifdef VBOX
3739 /* call CPUM. */
3740 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3741 {
3742 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3743 }
3744# endif
3745}
3746
3747void helper_rdmsr(void)
3748{
3749 uint64_t val;
3750
3751 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3752
3753 switch((uint32_t)ECX) {
3754 case MSR_IA32_SYSENTER_CS:
3755 val = env->sysenter_cs;
3756 break;
3757 case MSR_IA32_SYSENTER_ESP:
3758 val = env->sysenter_esp;
3759 break;
3760 case MSR_IA32_SYSENTER_EIP:
3761 val = env->sysenter_eip;
3762 break;
3763 case MSR_IA32_APICBASE:
3764#ifndef VBOX
3765 val = cpu_get_apic_base(env->apic_state);
3766#else /* VBOX */
3767 val = cpu_get_apic_base(env);
3768#endif /* VBOX */
3769 break;
3770 case MSR_EFER:
3771 val = env->efer;
3772 break;
3773 case MSR_STAR:
3774 val = env->star;
3775 break;
3776 case MSR_PAT:
3777 val = env->pat;
3778 break;
3779 case MSR_VM_HSAVE_PA:
3780 val = env->vm_hsave;
3781 break;
3782# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3783 case MSR_IA32_PERF_STATUS:
3784 /* tsc_increment_by_tick */
3785 val = 1000ULL;
3786 /* CPU multiplier */
3787 val |= (((uint64_t)4ULL) << 40);
3788 break;
3789# endif /* !VBOX */
3790#ifdef TARGET_X86_64
3791 case MSR_LSTAR:
3792 val = env->lstar;
3793 break;
3794 case MSR_CSTAR:
3795 val = env->cstar;
3796 break;
3797 case MSR_FMASK:
3798 val = env->fmask;
3799 break;
3800 case MSR_FSBASE:
3801 val = env->segs[R_FS].base;
3802 break;
3803 case MSR_GSBASE:
3804 val = env->segs[R_GS].base;
3805 break;
3806 case MSR_KERNELGSBASE:
3807 val = env->kernelgsbase;
3808 break;
3809# ifndef VBOX
3810 case MSR_TSC_AUX:
3811 val = env->tsc_aux;
3812 break;
3813# endif /*!VBOX*/
3814#endif
3815# ifndef VBOX
3816 case MSR_MTRRphysBase(0):
3817 case MSR_MTRRphysBase(1):
3818 case MSR_MTRRphysBase(2):
3819 case MSR_MTRRphysBase(3):
3820 case MSR_MTRRphysBase(4):
3821 case MSR_MTRRphysBase(5):
3822 case MSR_MTRRphysBase(6):
3823 case MSR_MTRRphysBase(7):
3824 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3825 break;
3826 case MSR_MTRRphysMask(0):
3827 case MSR_MTRRphysMask(1):
3828 case MSR_MTRRphysMask(2):
3829 case MSR_MTRRphysMask(3):
3830 case MSR_MTRRphysMask(4):
3831 case MSR_MTRRphysMask(5):
3832 case MSR_MTRRphysMask(6):
3833 case MSR_MTRRphysMask(7):
3834 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3835 break;
3836 case MSR_MTRRfix64K_00000:
3837 val = env->mtrr_fixed[0];
3838 break;
3839 case MSR_MTRRfix16K_80000:
3840 case MSR_MTRRfix16K_A0000:
3841 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3842 break;
3843 case MSR_MTRRfix4K_C0000:
3844 case MSR_MTRRfix4K_C8000:
3845 case MSR_MTRRfix4K_D0000:
3846 case MSR_MTRRfix4K_D8000:
3847 case MSR_MTRRfix4K_E0000:
3848 case MSR_MTRRfix4K_E8000:
3849 case MSR_MTRRfix4K_F0000:
3850 case MSR_MTRRfix4K_F8000:
3851 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3852 break;
3853 case MSR_MTRRdefType:
3854 val = env->mtrr_deftype;
3855 break;
3856 case MSR_MTRRcap:
3857 if (env->cpuid_features & CPUID_MTRR)
3858 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3859 else
3860 /* XXX: exception ? */
3861 val = 0;
3862 break;
3863 case MSR_MCG_CAP:
3864 val = env->mcg_cap;
3865 break;
3866 case MSR_MCG_CTL:
3867 if (env->mcg_cap & MCG_CTL_P)
3868 val = env->mcg_ctl;
3869 else
3870 val = 0;
3871 break;
3872 case MSR_MCG_STATUS:
3873 val = env->mcg_status;
3874 break;
3875# endif /* !VBOX */
3876 default:
3877# ifndef VBOX
3878 if ((uint32_t)ECX >= MSR_MC0_CTL
3879 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3880 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3881 val = env->mce_banks[offset];
3882 break;
3883 }
3884 /* XXX: exception ? */
3885 val = 0;
3886# else /* VBOX */
3887 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3888 {
3889 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3890 val = 0;
3891 }
3892# endif /* VBOX */
3893 break;
3894 }
3895 EAX = (uint32_t)(val);
3896 EDX = (uint32_t)(val >> 32);
3897
3898# ifdef VBOX_STRICT
3899 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3900 val = 0;
3901 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3902# endif
3903}
3904#endif
3905
3906target_ulong helper_lsl(target_ulong selector1)
3907{
3908 unsigned int limit;
3909 uint32_t e1, e2, eflags, selector;
3910 int rpl, dpl, cpl, type;
3911
3912 selector = selector1 & 0xffff;
3913 eflags = helper_cc_compute_all(CC_OP);
3914 if ((selector & 0xfffc) == 0)
3915 goto fail;
3916 if (load_segment(&e1, &e2, selector) != 0)
3917 goto fail;
3918 rpl = selector & 3;
3919 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3920 cpl = env->hflags & HF_CPL_MASK;
3921 if (e2 & DESC_S_MASK) {
3922 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3923 /* conforming */
3924 } else {
3925 if (dpl < cpl || dpl < rpl)
3926 goto fail;
3927 }
3928 } else {
3929 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3930 switch(type) {
3931 case 1:
3932 case 2:
3933 case 3:
3934 case 9:
3935 case 11:
3936 break;
3937 default:
3938 goto fail;
3939 }
3940 if (dpl < cpl || dpl < rpl) {
3941 fail:
3942 CC_SRC = eflags & ~CC_Z;
3943 return 0;
3944 }
3945 }
3946 limit = get_seg_limit(e1, e2);
3947 CC_SRC = eflags | CC_Z;
3948 return limit;
3949}
3950
3951target_ulong helper_lar(target_ulong selector1)
3952{
3953 uint32_t e1, e2, eflags, selector;
3954 int rpl, dpl, cpl, type;
3955
3956 selector = selector1 & 0xffff;
3957 eflags = helper_cc_compute_all(CC_OP);
3958 if ((selector & 0xfffc) == 0)
3959 goto fail;
3960 if (load_segment(&e1, &e2, selector) != 0)
3961 goto fail;
3962 rpl = selector & 3;
3963 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3964 cpl = env->hflags & HF_CPL_MASK;
3965 if (e2 & DESC_S_MASK) {
3966 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3967 /* conforming */
3968 } else {
3969 if (dpl < cpl || dpl < rpl)
3970 goto fail;
3971 }
3972 } else {
3973 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3974 switch(type) {
3975 case 1:
3976 case 2:
3977 case 3:
3978 case 4:
3979 case 5:
3980 case 9:
3981 case 11:
3982 case 12:
3983 break;
3984 default:
3985 goto fail;
3986 }
3987 if (dpl < cpl || dpl < rpl) {
3988 fail:
3989 CC_SRC = eflags & ~CC_Z;
3990 return 0;
3991 }
3992 }
3993 CC_SRC = eflags | CC_Z;
3994 return e2 & 0x00f0ff00;
3995}
3996
3997void helper_verr(target_ulong selector1)
3998{
3999 uint32_t e1, e2, eflags, selector;
4000 int rpl, dpl, cpl;
4001
4002 selector = selector1 & 0xffff;
4003 eflags = helper_cc_compute_all(CC_OP);
4004 if ((selector & 0xfffc) == 0)
4005 goto fail;
4006 if (load_segment(&e1, &e2, selector) != 0)
4007 goto fail;
4008 if (!(e2 & DESC_S_MASK))
4009 goto fail;
4010 rpl = selector & 3;
4011 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4012 cpl = env->hflags & HF_CPL_MASK;
4013 if (e2 & DESC_CS_MASK) {
4014 if (!(e2 & DESC_R_MASK))
4015 goto fail;
4016 if (!(e2 & DESC_C_MASK)) {
4017 if (dpl < cpl || dpl < rpl)
4018 goto fail;
4019 }
4020 } else {
4021 if (dpl < cpl || dpl < rpl) {
4022 fail:
4023 CC_SRC = eflags & ~CC_Z;
4024 return;
4025 }
4026 }
4027 CC_SRC = eflags | CC_Z;
4028}
4029
4030void helper_verw(target_ulong selector1)
4031{
4032 uint32_t e1, e2, eflags, selector;
4033 int rpl, dpl, cpl;
4034
4035 selector = selector1 & 0xffff;
4036 eflags = helper_cc_compute_all(CC_OP);
4037 if ((selector & 0xfffc) == 0)
4038 goto fail;
4039 if (load_segment(&e1, &e2, selector) != 0)
4040 goto fail;
4041 if (!(e2 & DESC_S_MASK))
4042 goto fail;
4043 rpl = selector & 3;
4044 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4045 cpl = env->hflags & HF_CPL_MASK;
4046 if (e2 & DESC_CS_MASK) {
4047 goto fail;
4048 } else {
4049 if (dpl < cpl || dpl < rpl)
4050 goto fail;
4051 if (!(e2 & DESC_W_MASK)) {
4052 fail:
4053 CC_SRC = eflags & ~CC_Z;
4054 return;
4055 }
4056 }
4057 CC_SRC = eflags | CC_Z;
4058}
4059
4060/* x87 FPU helpers */
4061
4062static void fpu_set_exception(int mask)
4063{
4064 env->fpus |= mask;
4065 if (env->fpus & (~env->fpuc & FPUC_EM))
4066 env->fpus |= FPUS_SE | FPUS_B;
4067}
4068
4069static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4070{
4071 if (b == 0.0)
4072 fpu_set_exception(FPUS_ZE);
4073 return a / b;
4074}
4075
4076static void fpu_raise_exception(void)
4077{
4078 if (env->cr[0] & CR0_NE_MASK) {
4079 raise_exception(EXCP10_COPR);
4080 }
4081#if !defined(CONFIG_USER_ONLY)
4082 else {
4083 cpu_set_ferr(env);
4084 }
4085#endif
4086}
4087
4088void helper_flds_FT0(uint32_t val)
4089{
4090 union {
4091 float32 f;
4092 uint32_t i;
4093 } u;
4094 u.i = val;
4095 FT0 = float32_to_floatx(u.f, &env->fp_status);
4096}
4097
4098void helper_fldl_FT0(uint64_t val)
4099{
4100 union {
4101 float64 f;
4102 uint64_t i;
4103 } u;
4104 u.i = val;
4105 FT0 = float64_to_floatx(u.f, &env->fp_status);
4106}
4107
4108void helper_fildl_FT0(int32_t val)
4109{
4110 FT0 = int32_to_floatx(val, &env->fp_status);
4111}
4112
4113void helper_flds_ST0(uint32_t val)
4114{
4115 int new_fpstt;
4116 union {
4117 float32 f;
4118 uint32_t i;
4119 } u;
4120 new_fpstt = (env->fpstt - 1) & 7;
4121 u.i = val;
4122 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4123 env->fpstt = new_fpstt;
4124 env->fptags[new_fpstt] = 0; /* validate stack entry */
4125}
4126
4127void helper_fldl_ST0(uint64_t val)
4128{
4129 int new_fpstt;
4130 union {
4131 float64 f;
4132 uint64_t i;
4133 } u;
4134 new_fpstt = (env->fpstt - 1) & 7;
4135 u.i = val;
4136 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4137 env->fpstt = new_fpstt;
4138 env->fptags[new_fpstt] = 0; /* validate stack entry */
4139}
4140
4141void helper_fildl_ST0(int32_t val)
4142{
4143 int new_fpstt;
4144 new_fpstt = (env->fpstt - 1) & 7;
4145 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4146 env->fpstt = new_fpstt;
4147 env->fptags[new_fpstt] = 0; /* validate stack entry */
4148}
4149
4150void helper_fildll_ST0(int64_t val)
4151{
4152 int new_fpstt;
4153 new_fpstt = (env->fpstt - 1) & 7;
4154 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4155 env->fpstt = new_fpstt;
4156 env->fptags[new_fpstt] = 0; /* validate stack entry */
4157}
4158
4159#ifndef VBOX
4160uint32_t helper_fsts_ST0(void)
4161#else
4162RTCCUINTREG helper_fsts_ST0(void)
4163#endif
4164{
4165 union {
4166 float32 f;
4167 uint32_t i;
4168 } u;
4169 u.f = floatx_to_float32(ST0, &env->fp_status);
4170 return u.i;
4171}
4172
4173uint64_t helper_fstl_ST0(void)
4174{
4175 union {
4176 float64 f;
4177 uint64_t i;
4178 } u;
4179 u.f = floatx_to_float64(ST0, &env->fp_status);
4180 return u.i;
4181}
4182
4183#ifndef VBOX
4184int32_t helper_fist_ST0(void)
4185#else
4186RTCCINTREG helper_fist_ST0(void)
4187#endif
4188{
4189 int32_t val;
4190 val = floatx_to_int32(ST0, &env->fp_status);
4191 if (val != (int16_t)val)
4192 val = -32768;
4193 return val;
4194}
4195
4196#ifndef VBOX
4197int32_t helper_fistl_ST0(void)
4198#else
4199RTCCINTREG helper_fistl_ST0(void)
4200#endif
4201{
4202 int32_t val;
4203 val = floatx_to_int32(ST0, &env->fp_status);
4204 return val;
4205}
4206
4207int64_t helper_fistll_ST0(void)
4208{
4209 int64_t val;
4210 val = floatx_to_int64(ST0, &env->fp_status);
4211 return val;
4212}
4213
4214#ifndef VBOX
4215int32_t helper_fistt_ST0(void)
4216#else
4217RTCCINTREG helper_fistt_ST0(void)
4218#endif
4219{
4220 int32_t val;
4221 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4222 if (val != (int16_t)val)
4223 val = -32768;
4224 return val;
4225}
4226
4227#ifndef VBOX
4228int32_t helper_fisttl_ST0(void)
4229#else
4230RTCCINTREG helper_fisttl_ST0(void)
4231#endif
4232{
4233 int32_t val;
4234 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4235 return val;
4236}
4237
4238int64_t helper_fisttll_ST0(void)
4239{
4240 int64_t val;
4241 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4242 return val;
4243}
4244
4245void helper_fldt_ST0(target_ulong ptr)
4246{
4247 int new_fpstt;
4248 new_fpstt = (env->fpstt - 1) & 7;
4249 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4250 env->fpstt = new_fpstt;
4251 env->fptags[new_fpstt] = 0; /* validate stack entry */
4252}
4253
4254void helper_fstt_ST0(target_ulong ptr)
4255{
4256 helper_fstt(ST0, ptr);
4257}
4258
4259void helper_fpush(void)
4260{
4261 fpush();
4262}
4263
4264void helper_fpop(void)
4265{
4266 fpop();
4267}
4268
4269void helper_fdecstp(void)
4270{
4271 env->fpstt = (env->fpstt - 1) & 7;
4272 env->fpus &= (~0x4700);
4273}
4274
4275void helper_fincstp(void)
4276{
4277 env->fpstt = (env->fpstt + 1) & 7;
4278 env->fpus &= (~0x4700);
4279}
4280
4281/* FPU move */
4282
4283void helper_ffree_STN(int st_index)
4284{
4285 env->fptags[(env->fpstt + st_index) & 7] = 1;
4286}
4287
4288void helper_fmov_ST0_FT0(void)
4289{
4290 ST0 = FT0;
4291}
4292
4293void helper_fmov_FT0_STN(int st_index)
4294{
4295 FT0 = ST(st_index);
4296}
4297
4298void helper_fmov_ST0_STN(int st_index)
4299{
4300 ST0 = ST(st_index);
4301}
4302
4303void helper_fmov_STN_ST0(int st_index)
4304{
4305 ST(st_index) = ST0;
4306}
4307
4308void helper_fxchg_ST0_STN(int st_index)
4309{
4310 CPU86_LDouble tmp;
4311 tmp = ST(st_index);
4312 ST(st_index) = ST0;
4313 ST0 = tmp;
4314}
4315
4316/* FPU operations */
4317
4318static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4319
4320void helper_fcom_ST0_FT0(void)
4321{
4322 int ret;
4323
4324 ret = floatx_compare(ST0, FT0, &env->fp_status);
4325 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4326}
4327
4328void helper_fucom_ST0_FT0(void)
4329{
4330 int ret;
4331
4332 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4333 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4334}
4335
4336static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4337
4338void helper_fcomi_ST0_FT0(void)
4339{
4340 int eflags;
4341 int ret;
4342
4343 ret = floatx_compare(ST0, FT0, &env->fp_status);
4344 eflags = helper_cc_compute_all(CC_OP);
4345 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4346 CC_SRC = eflags;
4347}
4348
4349void helper_fucomi_ST0_FT0(void)
4350{
4351 int eflags;
4352 int ret;
4353
4354 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4355 eflags = helper_cc_compute_all(CC_OP);
4356 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4357 CC_SRC = eflags;
4358}
4359
4360void helper_fadd_ST0_FT0(void)
4361{
4362 ST0 += FT0;
4363}
4364
4365void helper_fmul_ST0_FT0(void)
4366{
4367 ST0 *= FT0;
4368}
4369
4370void helper_fsub_ST0_FT0(void)
4371{
4372 ST0 -= FT0;
4373}
4374
4375void helper_fsubr_ST0_FT0(void)
4376{
4377 ST0 = FT0 - ST0;
4378}
4379
4380void helper_fdiv_ST0_FT0(void)
4381{
4382 ST0 = helper_fdiv(ST0, FT0);
4383}
4384
4385void helper_fdivr_ST0_FT0(void)
4386{
4387 ST0 = helper_fdiv(FT0, ST0);
4388}
4389
4390/* fp operations between STN and ST0 */
4391
4392void helper_fadd_STN_ST0(int st_index)
4393{
4394 ST(st_index) += ST0;
4395}
4396
4397void helper_fmul_STN_ST0(int st_index)
4398{
4399 ST(st_index) *= ST0;
4400}
4401
4402void helper_fsub_STN_ST0(int st_index)
4403{
4404 ST(st_index) -= ST0;
4405}
4406
4407void helper_fsubr_STN_ST0(int st_index)
4408{
4409 CPU86_LDouble *p;
4410 p = &ST(st_index);
4411 *p = ST0 - *p;
4412}
4413
4414void helper_fdiv_STN_ST0(int st_index)
4415{
4416 CPU86_LDouble *p;
4417 p = &ST(st_index);
4418 *p = helper_fdiv(*p, ST0);
4419}
4420
4421void helper_fdivr_STN_ST0(int st_index)
4422{
4423 CPU86_LDouble *p;
4424 p = &ST(st_index);
4425 *p = helper_fdiv(ST0, *p);
4426}
4427
4428/* misc FPU operations */
4429void helper_fchs_ST0(void)
4430{
4431 ST0 = floatx_chs(ST0);
4432}
4433
4434void helper_fabs_ST0(void)
4435{
4436 ST0 = floatx_abs(ST0);
4437}
4438
4439void helper_fld1_ST0(void)
4440{
4441 ST0 = f15rk[1];
4442}
4443
4444void helper_fldl2t_ST0(void)
4445{
4446 ST0 = f15rk[6];
4447}
4448
4449void helper_fldl2e_ST0(void)
4450{
4451 ST0 = f15rk[5];
4452}
4453
4454void helper_fldpi_ST0(void)
4455{
4456 ST0 = f15rk[2];
4457}
4458
4459void helper_fldlg2_ST0(void)
4460{
4461 ST0 = f15rk[3];
4462}
4463
4464void helper_fldln2_ST0(void)
4465{
4466 ST0 = f15rk[4];
4467}
4468
4469void helper_fldz_ST0(void)
4470{
4471 ST0 = f15rk[0];
4472}
4473
4474void helper_fldz_FT0(void)
4475{
4476 FT0 = f15rk[0];
4477}
4478
4479#ifndef VBOX
4480uint32_t helper_fnstsw(void)
4481#else
4482RTCCUINTREG helper_fnstsw(void)
4483#endif
4484{
4485 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4486}
4487
4488#ifndef VBOX
4489uint32_t helper_fnstcw(void)
4490#else
4491RTCCUINTREG helper_fnstcw(void)
4492#endif
4493{
4494 return env->fpuc;
4495}
4496
4497static void update_fp_status(void)
4498{
4499 int rnd_type;
4500
4501 /* set rounding mode */
4502 switch(env->fpuc & RC_MASK) {
4503 default:
4504 case RC_NEAR:
4505 rnd_type = float_round_nearest_even;
4506 break;
4507 case RC_DOWN:
4508 rnd_type = float_round_down;
4509 break;
4510 case RC_UP:
4511 rnd_type = float_round_up;
4512 break;
4513 case RC_CHOP:
4514 rnd_type = float_round_to_zero;
4515 break;
4516 }
4517 set_float_rounding_mode(rnd_type, &env->fp_status);
4518#ifdef FLOATX80
4519 switch((env->fpuc >> 8) & 3) {
4520 case 0:
4521 rnd_type = 32;
4522 break;
4523 case 2:
4524 rnd_type = 64;
4525 break;
4526 case 3:
4527 default:
4528 rnd_type = 80;
4529 break;
4530 }
4531 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4532#endif
4533}
4534
4535void helper_fldcw(uint32_t val)
4536{
4537 env->fpuc = val;
4538 update_fp_status();
4539}
4540
4541void helper_fclex(void)
4542{
4543 env->fpus &= 0x7f00;
4544}
4545
4546void helper_fwait(void)
4547{
4548 if (env->fpus & FPUS_SE)
4549 fpu_raise_exception();
4550}
4551
4552void helper_fninit(void)
4553{
4554 env->fpus = 0;
4555 env->fpstt = 0;
4556 env->fpuc = 0x37f;
4557 env->fptags[0] = 1;
4558 env->fptags[1] = 1;
4559 env->fptags[2] = 1;
4560 env->fptags[3] = 1;
4561 env->fptags[4] = 1;
4562 env->fptags[5] = 1;
4563 env->fptags[6] = 1;
4564 env->fptags[7] = 1;
4565}
4566
4567/* BCD ops */
4568
4569void helper_fbld_ST0(target_ulong ptr)
4570{
4571 CPU86_LDouble tmp;
4572 uint64_t val;
4573 unsigned int v;
4574 int i;
4575
4576 val = 0;
4577 for(i = 8; i >= 0; i--) {
4578 v = ldub(ptr + i);
4579 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4580 }
4581 tmp = val;
4582 if (ldub(ptr + 9) & 0x80)
4583 tmp = -tmp;
4584 fpush();
4585 ST0 = tmp;
4586}
4587
4588void helper_fbst_ST0(target_ulong ptr)
4589{
4590 int v;
4591 target_ulong mem_ref, mem_end;
4592 int64_t val;
4593
4594 val = floatx_to_int64(ST0, &env->fp_status);
4595 mem_ref = ptr;
4596 mem_end = mem_ref + 9;
4597 if (val < 0) {
4598 stb(mem_end, 0x80);
4599 val = -val;
4600 } else {
4601 stb(mem_end, 0x00);
4602 }
4603 while (mem_ref < mem_end) {
4604 if (val == 0)
4605 break;
4606 v = val % 100;
4607 val = val / 100;
4608 v = ((v / 10) << 4) | (v % 10);
4609 stb(mem_ref++, v);
4610 }
4611 while (mem_ref < mem_end) {
4612 stb(mem_ref++, 0);
4613 }
4614}
4615
4616void helper_f2xm1(void)
4617{
4618 ST0 = pow(2.0,ST0) - 1.0;
4619}
4620
4621void helper_fyl2x(void)
4622{
4623 CPU86_LDouble fptemp;
4624
4625 fptemp = ST0;
4626 if (fptemp>0.0){
4627 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4628 ST1 *= fptemp;
4629 fpop();
4630 } else {
4631 env->fpus &= (~0x4700);
4632 env->fpus |= 0x400;
4633 }
4634}
4635
4636void helper_fptan(void)
4637{
4638 CPU86_LDouble fptemp;
4639
4640 fptemp = ST0;
4641 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4642 env->fpus |= 0x400;
4643 } else {
4644 ST0 = tan(fptemp);
4645 fpush();
4646 ST0 = 1.0;
4647 env->fpus &= (~0x400); /* C2 <-- 0 */
4648 /* the above code is for |arg| < 2**52 only */
4649 }
4650}
4651
4652void helper_fpatan(void)
4653{
4654 CPU86_LDouble fptemp, fpsrcop;
4655
4656 fpsrcop = ST1;
4657 fptemp = ST0;
4658 ST1 = atan2(fpsrcop,fptemp);
4659 fpop();
4660}
4661
4662void helper_fxtract(void)
4663{
4664 CPU86_LDoubleU temp;
4665 unsigned int expdif;
4666
4667 temp.d = ST0;
4668 expdif = EXPD(temp) - EXPBIAS;
4669 /*DP exponent bias*/
4670 ST0 = expdif;
4671 fpush();
4672 BIASEXPONENT(temp);
4673 ST0 = temp.d;
4674}
4675
4676void helper_fprem1(void)
4677{
4678 CPU86_LDouble dblq, fpsrcop, fptemp;
4679 CPU86_LDoubleU fpsrcop1, fptemp1;
4680 int expdif;
4681 signed long long int q;
4682
4683#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4684 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4685#else
4686 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4687#endif
4688 ST0 = 0.0 / 0.0; /* NaN */
4689 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4690 return;
4691 }
4692
4693 fpsrcop = ST0;
4694 fptemp = ST1;
4695 fpsrcop1.d = fpsrcop;
4696 fptemp1.d = fptemp;
4697 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4698
4699 if (expdif < 0) {
4700 /* optimisation? taken from the AMD docs */
4701 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4702 /* ST0 is unchanged */
4703 return;
4704 }
4705
4706 if (expdif < 53) {
4707 dblq = fpsrcop / fptemp;
4708 /* round dblq towards nearest integer */
4709 dblq = rint(dblq);
4710 ST0 = fpsrcop - fptemp * dblq;
4711
4712 /* convert dblq to q by truncating towards zero */
4713 if (dblq < 0.0)
4714 q = (signed long long int)(-dblq);
4715 else
4716 q = (signed long long int)dblq;
4717
4718 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4719 /* (C0,C3,C1) <-- (q2,q1,q0) */
4720 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4721 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4722 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4723 } else {
4724 env->fpus |= 0x400; /* C2 <-- 1 */
4725 fptemp = pow(2.0, expdif - 50);
4726 fpsrcop = (ST0 / ST1) / fptemp;
4727 /* fpsrcop = integer obtained by chopping */
4728 fpsrcop = (fpsrcop < 0.0) ?
4729 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4730 ST0 -= (ST1 * fpsrcop * fptemp);
4731 }
4732}
4733
4734void helper_fprem(void)
4735{
4736 CPU86_LDouble dblq, fpsrcop, fptemp;
4737 CPU86_LDoubleU fpsrcop1, fptemp1;
4738 int expdif;
4739 signed long long int q;
4740
4741#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4742 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4743#else
4744 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4745#endif
4746 ST0 = 0.0 / 0.0; /* NaN */
4747 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4748 return;
4749 }
4750
4751 fpsrcop = (CPU86_LDouble)ST0;
4752 fptemp = (CPU86_LDouble)ST1;
4753 fpsrcop1.d = fpsrcop;
4754 fptemp1.d = fptemp;
4755 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4756
4757 if (expdif < 0) {
4758 /* optimisation? taken from the AMD docs */
4759 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4760 /* ST0 is unchanged */
4761 return;
4762 }
4763
4764 if ( expdif < 53 ) {
4765 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4766 /* round dblq towards zero */
4767 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4768 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4769
4770 /* convert dblq to q by truncating towards zero */
4771 if (dblq < 0.0)
4772 q = (signed long long int)(-dblq);
4773 else
4774 q = (signed long long int)dblq;
4775
4776 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4777 /* (C0,C3,C1) <-- (q2,q1,q0) */
4778 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4779 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4780 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4781 } else {
4782 int N = 32 + (expdif % 32); /* as per AMD docs */
4783 env->fpus |= 0x400; /* C2 <-- 1 */
4784 fptemp = pow(2.0, (double)(expdif - N));
4785 fpsrcop = (ST0 / ST1) / fptemp;
4786 /* fpsrcop = integer obtained by chopping */
4787 fpsrcop = (fpsrcop < 0.0) ?
4788 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4789 ST0 -= (ST1 * fpsrcop * fptemp);
4790 }
4791}
4792
4793void helper_fyl2xp1(void)
4794{
4795 CPU86_LDouble fptemp;
4796
4797 fptemp = ST0;
4798 if ((fptemp+1.0)>0.0) {
4799 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4800 ST1 *= fptemp;
4801 fpop();
4802 } else {
4803 env->fpus &= (~0x4700);
4804 env->fpus |= 0x400;
4805 }
4806}
4807
4808void helper_fsqrt(void)
4809{
4810 CPU86_LDouble fptemp;
4811
4812 fptemp = ST0;
4813 if (fptemp<0.0) {
4814 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4815 env->fpus |= 0x400;
4816 }
4817 ST0 = sqrt(fptemp);
4818}
4819
4820void helper_fsincos(void)
4821{
4822 CPU86_LDouble fptemp;
4823
4824 fptemp = ST0;
4825 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4826 env->fpus |= 0x400;
4827 } else {
4828 ST0 = sin(fptemp);
4829 fpush();
4830 ST0 = cos(fptemp);
4831 env->fpus &= (~0x400); /* C2 <-- 0 */
4832 /* the above code is for |arg| < 2**63 only */
4833 }
4834}
4835
4836void helper_frndint(void)
4837{
4838 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4839}
4840
4841void helper_fscale(void)
4842{
4843 ST0 = ldexp (ST0, (int)(ST1));
4844}
4845
4846void helper_fsin(void)
4847{
4848 CPU86_LDouble fptemp;
4849
4850 fptemp = ST0;
4851 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4852 env->fpus |= 0x400;
4853 } else {
4854 ST0 = sin(fptemp);
4855 env->fpus &= (~0x400); /* C2 <-- 0 */
4856 /* the above code is for |arg| < 2**53 only */
4857 }
4858}
4859
4860void helper_fcos(void)
4861{
4862 CPU86_LDouble fptemp;
4863
4864 fptemp = ST0;
4865 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4866 env->fpus |= 0x400;
4867 } else {
4868 ST0 = cos(fptemp);
4869 env->fpus &= (~0x400); /* C2 <-- 0 */
4870 /* the above code is for |arg5 < 2**63 only */
4871 }
4872}
4873
4874void helper_fxam_ST0(void)
4875{
4876 CPU86_LDoubleU temp;
4877 int expdif;
4878
4879 temp.d = ST0;
4880
4881 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4882 if (SIGND(temp))
4883 env->fpus |= 0x200; /* C1 <-- 1 */
4884
4885 /* XXX: test fptags too */
4886 expdif = EXPD(temp);
4887 if (expdif == MAXEXPD) {
4888#ifdef USE_X86LDOUBLE
4889 if (MANTD(temp) == 0x8000000000000000ULL)
4890#else
4891 if (MANTD(temp) == 0)
4892#endif
4893 env->fpus |= 0x500 /*Infinity*/;
4894 else
4895 env->fpus |= 0x100 /*NaN*/;
4896 } else if (expdif == 0) {
4897 if (MANTD(temp) == 0)
4898 env->fpus |= 0x4000 /*Zero*/;
4899 else
4900 env->fpus |= 0x4400 /*Denormal*/;
4901 } else {
4902 env->fpus |= 0x400;
4903 }
4904}
4905
4906void helper_fstenv(target_ulong ptr, int data32)
4907{
4908 int fpus, fptag, exp, i;
4909 uint64_t mant;
4910 CPU86_LDoubleU tmp;
4911
4912 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4913 fptag = 0;
4914 for (i=7; i>=0; i--) {
4915 fptag <<= 2;
4916 if (env->fptags[i]) {
4917 fptag |= 3;
4918 } else {
4919 tmp.d = env->fpregs[i].d;
4920 exp = EXPD(tmp);
4921 mant = MANTD(tmp);
4922 if (exp == 0 && mant == 0) {
4923 /* zero */
4924 fptag |= 1;
4925 } else if (exp == 0 || exp == MAXEXPD
4926#ifdef USE_X86LDOUBLE
4927 || (mant & (1LL << 63)) == 0
4928#endif
4929 ) {
4930 /* NaNs, infinity, denormal */
4931 fptag |= 2;
4932 }
4933 }
4934 }
4935 if (data32) {
4936 /* 32 bit */
4937 stl(ptr, env->fpuc);
4938 stl(ptr + 4, fpus);
4939 stl(ptr + 8, fptag);
4940 stl(ptr + 12, 0); /* fpip */
4941 stl(ptr + 16, 0); /* fpcs */
4942 stl(ptr + 20, 0); /* fpoo */
4943 stl(ptr + 24, 0); /* fpos */
4944 } else {
4945 /* 16 bit */
4946 stw(ptr, env->fpuc);
4947 stw(ptr + 2, fpus);
4948 stw(ptr + 4, fptag);
4949 stw(ptr + 6, 0);
4950 stw(ptr + 8, 0);
4951 stw(ptr + 10, 0);
4952 stw(ptr + 12, 0);
4953 }
4954}
4955
4956void helper_fldenv(target_ulong ptr, int data32)
4957{
4958 int i, fpus, fptag;
4959
4960 if (data32) {
4961 env->fpuc = lduw(ptr);
4962 fpus = lduw(ptr + 4);
4963 fptag = lduw(ptr + 8);
4964 }
4965 else {
4966 env->fpuc = lduw(ptr);
4967 fpus = lduw(ptr + 2);
4968 fptag = lduw(ptr + 4);
4969 }
4970 env->fpstt = (fpus >> 11) & 7;
4971 env->fpus = fpus & ~0x3800;
4972 for(i = 0;i < 8; i++) {
4973 env->fptags[i] = ((fptag & 3) == 3);
4974 fptag >>= 2;
4975 }
4976}
4977
4978void helper_fsave(target_ulong ptr, int data32)
4979{
4980 CPU86_LDouble tmp;
4981 int i;
4982
4983 helper_fstenv(ptr, data32);
4984
4985 ptr += (14 << data32);
4986 for(i = 0;i < 8; i++) {
4987 tmp = ST(i);
4988 helper_fstt(tmp, ptr);
4989 ptr += 10;
4990 }
4991
4992 /* fninit */
4993 env->fpus = 0;
4994 env->fpstt = 0;
4995 env->fpuc = 0x37f;
4996 env->fptags[0] = 1;
4997 env->fptags[1] = 1;
4998 env->fptags[2] = 1;
4999 env->fptags[3] = 1;
5000 env->fptags[4] = 1;
5001 env->fptags[5] = 1;
5002 env->fptags[6] = 1;
5003 env->fptags[7] = 1;
5004}
5005
5006void helper_frstor(target_ulong ptr, int data32)
5007{
5008 CPU86_LDouble tmp;
5009 int i;
5010
5011 helper_fldenv(ptr, data32);
5012 ptr += (14 << data32);
5013
5014 for(i = 0;i < 8; i++) {
5015 tmp = helper_fldt(ptr);
5016 ST(i) = tmp;
5017 ptr += 10;
5018 }
5019}
5020
5021void helper_fxsave(target_ulong ptr, int data64)
5022{
5023 int fpus, fptag, i, nb_xmm_regs;
5024 CPU86_LDouble tmp;
5025 target_ulong addr;
5026
5027 /* The operand must be 16 byte aligned */
5028 if (ptr & 0xf) {
5029 raise_exception(EXCP0D_GPF);
5030 }
5031
5032 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5033 fptag = 0;
5034 for(i = 0; i < 8; i++) {
5035 fptag |= (env->fptags[i] << i);
5036 }
5037 stw(ptr, env->fpuc);
5038 stw(ptr + 2, fpus);
5039 stw(ptr + 4, fptag ^ 0xff);
5040#ifdef TARGET_X86_64
5041 if (data64) {
5042 stq(ptr + 0x08, 0); /* rip */
5043 stq(ptr + 0x10, 0); /* rdp */
5044 } else
5045#endif
5046 {
5047 stl(ptr + 0x08, 0); /* eip */
5048 stl(ptr + 0x0c, 0); /* sel */
5049 stl(ptr + 0x10, 0); /* dp */
5050 stl(ptr + 0x14, 0); /* sel */
5051 }
5052
5053 addr = ptr + 0x20;
5054 for(i = 0;i < 8; i++) {
5055 tmp = ST(i);
5056 helper_fstt(tmp, addr);
5057 addr += 16;
5058 }
5059
5060 if (env->cr[4] & CR4_OSFXSR_MASK) {
5061 /* XXX: finish it */
5062 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5063 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5064 if (env->hflags & HF_CS64_MASK)
5065 nb_xmm_regs = 16;
5066 else
5067 nb_xmm_regs = 8;
5068 addr = ptr + 0xa0;
5069 /* Fast FXSAVE leaves out the XMM registers */
5070 if (!(env->efer & MSR_EFER_FFXSR)
5071 || (env->hflags & HF_CPL_MASK)
5072 || !(env->hflags & HF_LMA_MASK)) {
5073 for(i = 0; i < nb_xmm_regs; i++) {
5074 stq(addr, env->xmm_regs[i].XMM_Q(0));
5075 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5076 addr += 16;
5077 }
5078 }
5079 }
5080}
5081
5082void helper_fxrstor(target_ulong ptr, int data64)
5083{
5084 int i, fpus, fptag, nb_xmm_regs;
5085 CPU86_LDouble tmp;
5086 target_ulong addr;
5087
5088 /* The operand must be 16 byte aligned */
5089 if (ptr & 0xf) {
5090 raise_exception(EXCP0D_GPF);
5091 }
5092
5093 env->fpuc = lduw(ptr);
5094 fpus = lduw(ptr + 2);
5095 fptag = lduw(ptr + 4);
5096 env->fpstt = (fpus >> 11) & 7;
5097 env->fpus = fpus & ~0x3800;
5098 fptag ^= 0xff;
5099 for(i = 0;i < 8; i++) {
5100 env->fptags[i] = ((fptag >> i) & 1);
5101 }
5102
5103 addr = ptr + 0x20;
5104 for(i = 0;i < 8; i++) {
5105 tmp = helper_fldt(addr);
5106 ST(i) = tmp;
5107 addr += 16;
5108 }
5109
5110 if (env->cr[4] & CR4_OSFXSR_MASK) {
5111 /* XXX: finish it */
5112 env->mxcsr = ldl(ptr + 0x18);
5113 //ldl(ptr + 0x1c);
5114 if (env->hflags & HF_CS64_MASK)
5115 nb_xmm_regs = 16;
5116 else
5117 nb_xmm_regs = 8;
5118 addr = ptr + 0xa0;
5119 /* Fast FXRESTORE leaves out the XMM registers */
5120 if (!(env->efer & MSR_EFER_FFXSR)
5121 || (env->hflags & HF_CPL_MASK)
5122 || !(env->hflags & HF_LMA_MASK)) {
5123 for(i = 0; i < nb_xmm_regs; i++) {
5124#if !defined(VBOX) || __GNUC__ < 4
5125 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5126 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5127#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5128# if 1
5129 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5130 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5131 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5132 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5133# else
5134 /* this works fine on Mac OS X, gcc 4.0.1 */
5135 uint64_t u64 = ldq(addr);
5136 env->xmm_regs[i].XMM_Q(0);
5137 u64 = ldq(addr + 4);
5138 env->xmm_regs[i].XMM_Q(1) = u64;
5139# endif
5140#endif
5141 addr += 16;
5142 }
5143 }
5144 }
5145}
5146
5147#ifndef USE_X86LDOUBLE
5148
5149void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5150{
5151 CPU86_LDoubleU temp;
5152 int e;
5153
5154 temp.d = f;
5155 /* mantissa */
5156 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5157 /* exponent + sign */
5158 e = EXPD(temp) - EXPBIAS + 16383;
5159 e |= SIGND(temp) >> 16;
5160 *pexp = e;
5161}
5162
5163CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5164{
5165 CPU86_LDoubleU temp;
5166 int e;
5167 uint64_t ll;
5168
5169 /* XXX: handle overflow ? */
5170 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5171 e |= (upper >> 4) & 0x800; /* sign */
5172 ll = (mant >> 11) & ((1LL << 52) - 1);
5173#ifdef __arm__
5174 temp.l.upper = (e << 20) | (ll >> 32);
5175 temp.l.lower = ll;
5176#else
5177 temp.ll = ll | ((uint64_t)e << 52);
5178#endif
5179 return temp.d;
5180}
5181
5182#else
5183
5184void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5185{
5186 CPU86_LDoubleU temp;
5187
5188 temp.d = f;
5189 *pmant = temp.l.lower;
5190 *pexp = temp.l.upper;
5191}
5192
5193CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5194{
5195 CPU86_LDoubleU temp;
5196
5197 temp.l.upper = upper;
5198 temp.l.lower = mant;
5199 return temp.d;
5200}
5201#endif
5202
5203#ifdef TARGET_X86_64
5204
5205//#define DEBUG_MULDIV
5206
5207static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5208{
5209 *plow += a;
5210 /* carry test */
5211 if (*plow < a)
5212 (*phigh)++;
5213 *phigh += b;
5214}
5215
5216static void neg128(uint64_t *plow, uint64_t *phigh)
5217{
5218 *plow = ~ *plow;
5219 *phigh = ~ *phigh;
5220 add128(plow, phigh, 1, 0);
5221}
5222
5223/* return TRUE if overflow */
5224static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5225{
5226 uint64_t q, r, a1, a0;
5227 int i, qb, ab;
5228
5229 a0 = *plow;
5230 a1 = *phigh;
5231 if (a1 == 0) {
5232 q = a0 / b;
5233 r = a0 % b;
5234 *plow = q;
5235 *phigh = r;
5236 } else {
5237 if (a1 >= b)
5238 return 1;
5239 /* XXX: use a better algorithm */
5240 for(i = 0; i < 64; i++) {
5241 ab = a1 >> 63;
5242 a1 = (a1 << 1) | (a0 >> 63);
5243 if (ab || a1 >= b) {
5244 a1 -= b;
5245 qb = 1;
5246 } else {
5247 qb = 0;
5248 }
5249 a0 = (a0 << 1) | qb;
5250 }
5251#if defined(DEBUG_MULDIV)
5252 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5253 *phigh, *plow, b, a0, a1);
5254#endif
5255 *plow = a0;
5256 *phigh = a1;
5257 }
5258 return 0;
5259}
5260
5261/* return TRUE if overflow */
5262static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5263{
5264 int sa, sb;
5265 sa = ((int64_t)*phigh < 0);
5266 if (sa)
5267 neg128(plow, phigh);
5268 sb = (b < 0);
5269 if (sb)
5270 b = -b;
5271 if (div64(plow, phigh, b) != 0)
5272 return 1;
5273 if (sa ^ sb) {
5274 if (*plow > (1ULL << 63))
5275 return 1;
5276 *plow = - *plow;
5277 } else {
5278 if (*plow >= (1ULL << 63))
5279 return 1;
5280 }
5281 if (sa)
5282 *phigh = - *phigh;
5283 return 0;
5284}
5285
5286void helper_mulq_EAX_T0(target_ulong t0)
5287{
5288 uint64_t r0, r1;
5289
5290 mulu64(&r0, &r1, EAX, t0);
5291 EAX = r0;
5292 EDX = r1;
5293 CC_DST = r0;
5294 CC_SRC = r1;
5295}
5296
5297void helper_imulq_EAX_T0(target_ulong t0)
5298{
5299 uint64_t r0, r1;
5300
5301 muls64(&r0, &r1, EAX, t0);
5302 EAX = r0;
5303 EDX = r1;
5304 CC_DST = r0;
5305 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5306}
5307
5308target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5309{
5310 uint64_t r0, r1;
5311
5312 muls64(&r0, &r1, t0, t1);
5313 CC_DST = r0;
5314 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5315 return r0;
5316}
5317
5318void helper_divq_EAX(target_ulong t0)
5319{
5320 uint64_t r0, r1;
5321 if (t0 == 0) {
5322 raise_exception(EXCP00_DIVZ);
5323 }
5324 r0 = EAX;
5325 r1 = EDX;
5326 if (div64(&r0, &r1, t0))
5327 raise_exception(EXCP00_DIVZ);
5328 EAX = r0;
5329 EDX = r1;
5330}
5331
5332void helper_idivq_EAX(target_ulong t0)
5333{
5334 uint64_t r0, r1;
5335 if (t0 == 0) {
5336 raise_exception(EXCP00_DIVZ);
5337 }
5338 r0 = EAX;
5339 r1 = EDX;
5340 if (idiv64(&r0, &r1, t0))
5341 raise_exception(EXCP00_DIVZ);
5342 EAX = r0;
5343 EDX = r1;
5344}
5345#endif
5346
5347static void do_hlt(void)
5348{
5349 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5350 env->halted = 1;
5351 env->exception_index = EXCP_HLT;
5352 cpu_loop_exit();
5353}
5354
5355void helper_hlt(int next_eip_addend)
5356{
5357 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5358 EIP += next_eip_addend;
5359
5360 do_hlt();
5361}
5362
5363void helper_monitor(target_ulong ptr)
5364{
5365#ifdef VBOX
5366 if ((uint32_t)ECX > 1)
5367 raise_exception(EXCP0D_GPF);
5368#else /* !VBOX */
5369 if ((uint32_t)ECX != 0)
5370 raise_exception(EXCP0D_GPF);
5371#endif /* !VBOX */
5372 /* XXX: store address ? */
5373 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5374}
5375
5376void helper_mwait(int next_eip_addend)
5377{
5378 if ((uint32_t)ECX != 0)
5379 raise_exception(EXCP0D_GPF);
5380#ifdef VBOX
5381 helper_hlt(next_eip_addend);
5382#else /* !VBOX */
5383 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5384 EIP += next_eip_addend;
5385
5386 /* XXX: not complete but not completely erroneous */
5387 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5388 /* more than one CPU: do not sleep because another CPU may
5389 wake this one */
5390 } else {
5391 do_hlt();
5392 }
5393#endif /* !VBOX */
5394}
5395
5396void helper_debug(void)
5397{
5398 env->exception_index = EXCP_DEBUG;
5399 cpu_loop_exit();
5400}
5401
5402void helper_reset_rf(void)
5403{
5404 env->eflags &= ~RF_MASK;
5405}
5406
5407void helper_raise_interrupt(int intno, int next_eip_addend)
5408{
5409 raise_interrupt(intno, 1, 0, next_eip_addend);
5410}
5411
5412void helper_raise_exception(int exception_index)
5413{
5414 raise_exception(exception_index);
5415}
5416
5417void helper_cli(void)
5418{
5419 env->eflags &= ~IF_MASK;
5420}
5421
5422void helper_sti(void)
5423{
5424 env->eflags |= IF_MASK;
5425}
5426
5427#ifdef VBOX
5428void helper_cli_vme(void)
5429{
5430 env->eflags &= ~VIF_MASK;
5431}
5432
5433void helper_sti_vme(void)
5434{
5435 /* First check, then change eflags according to the AMD manual */
5436 if (env->eflags & VIP_MASK) {
5437 raise_exception(EXCP0D_GPF);
5438 }
5439 env->eflags |= VIF_MASK;
5440}
5441#endif /* VBOX */
5442
5443#if 0
5444/* vm86plus instructions */
5445void helper_cli_vm(void)
5446{
5447 env->eflags &= ~VIF_MASK;
5448}
5449
5450void helper_sti_vm(void)
5451{
5452 env->eflags |= VIF_MASK;
5453 if (env->eflags & VIP_MASK) {
5454 raise_exception(EXCP0D_GPF);
5455 }
5456}
5457#endif
5458
5459void helper_set_inhibit_irq(void)
5460{
5461 env->hflags |= HF_INHIBIT_IRQ_MASK;
5462}
5463
5464void helper_reset_inhibit_irq(void)
5465{
5466 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5467}
5468
5469void helper_boundw(target_ulong a0, int v)
5470{
5471 int low, high;
5472 low = ldsw(a0);
5473 high = ldsw(a0 + 2);
5474 v = (int16_t)v;
5475 if (v < low || v > high) {
5476 raise_exception(EXCP05_BOUND);
5477 }
5478}
5479
5480void helper_boundl(target_ulong a0, int v)
5481{
5482 int low, high;
5483 low = ldl(a0);
5484 high = ldl(a0 + 4);
5485 if (v < low || v > high) {
5486 raise_exception(EXCP05_BOUND);
5487 }
5488}
5489
5490static float approx_rsqrt(float a)
5491{
5492 return 1.0 / sqrt(a);
5493}
5494
5495static float approx_rcp(float a)
5496{
5497 return 1.0 / a;
5498}
5499
5500#if !defined(CONFIG_USER_ONLY)
5501
5502#define MMUSUFFIX _mmu
5503
5504#define SHIFT 0
5505#include "softmmu_template.h"
5506
5507#define SHIFT 1
5508#include "softmmu_template.h"
5509
5510#define SHIFT 2
5511#include "softmmu_template.h"
5512
5513#define SHIFT 3
5514#include "softmmu_template.h"
5515
5516#endif
5517
5518#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5519/* This code assumes real physical address always fit into host CPU reg,
5520 which is wrong in general, but true for our current use cases. */
5521RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5522{
5523 return remR3PhysReadS8(addr);
5524}
5525RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5526{
5527 return remR3PhysReadU8(addr);
5528}
5529void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5530{
5531 remR3PhysWriteU8(addr, val);
5532}
5533RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5534{
5535 return remR3PhysReadS16(addr);
5536}
5537RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5538{
5539 return remR3PhysReadU16(addr);
5540}
5541void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5542{
5543 remR3PhysWriteU16(addr, val);
5544}
5545RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5546{
5547 return remR3PhysReadS32(addr);
5548}
5549RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5550{
5551 return remR3PhysReadU32(addr);
5552}
5553void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5554{
5555 remR3PhysWriteU32(addr, val);
5556}
5557uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5558{
5559 return remR3PhysReadU64(addr);
5560}
5561void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5562{
5563 remR3PhysWriteU64(addr, val);
5564}
5565#endif /* VBOX */
5566
5567#if !defined(CONFIG_USER_ONLY)
5568/* try to fill the TLB and return an exception if error. If retaddr is
5569 NULL, it means that the function was called in C code (i.e. not
5570 from generated code or from helper.c) */
5571/* XXX: fix it to restore all registers */
5572void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5573{
5574 TranslationBlock *tb;
5575 int ret;
5576 unsigned long pc;
5577 CPUX86State *saved_env;
5578
5579 /* XXX: hack to restore env in all cases, even if not called from
5580 generated code */
5581 saved_env = env;
5582 env = cpu_single_env;
5583
5584 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5585 if (ret) {
5586 if (retaddr) {
5587 /* now we have a real cpu fault */
5588 pc = (unsigned long)retaddr;
5589 tb = tb_find_pc(pc);
5590 if (tb) {
5591 /* the PC is inside the translated code. It means that we have
5592 a virtual CPU fault */
5593 cpu_restore_state(tb, env, pc, NULL);
5594 }
5595 }
5596 raise_exception_err(env->exception_index, env->error_code);
5597 }
5598 env = saved_env;
5599}
5600#endif
5601
5602#ifdef VBOX
5603
5604/**
5605 * Correctly computes the eflags.
5606 * @returns eflags.
5607 * @param env1 CPU environment.
5608 */
5609uint32_t raw_compute_eflags(CPUX86State *env1)
5610{
5611 CPUX86State *savedenv = env;
5612 uint32_t efl;
5613 env = env1;
5614 efl = compute_eflags();
5615 env = savedenv;
5616 return efl;
5617}
5618
5619/**
5620 * Reads byte from virtual address in guest memory area.
5621 * XXX: is it working for any addresses? swapped out pages?
5622 * @returns read data byte.
5623 * @param env1 CPU environment.
5624 * @param pvAddr GC Virtual address.
5625 */
5626uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5627{
5628 CPUX86State *savedenv = env;
5629 uint8_t u8;
5630 env = env1;
5631 u8 = ldub_kernel(addr);
5632 env = savedenv;
5633 return u8;
5634}
5635
5636/**
5637 * Reads byte from virtual address in guest memory area.
5638 * XXX: is it working for any addresses? swapped out pages?
5639 * @returns read data byte.
5640 * @param env1 CPU environment.
5641 * @param pvAddr GC Virtual address.
5642 */
5643uint16_t read_word(CPUX86State *env1, target_ulong addr)
5644{
5645 CPUX86State *savedenv = env;
5646 uint16_t u16;
5647 env = env1;
5648 u16 = lduw_kernel(addr);
5649 env = savedenv;
5650 return u16;
5651}
5652
5653/**
5654 * Reads byte from virtual address in guest memory area.
5655 * XXX: is it working for any addresses? swapped out pages?
5656 * @returns read data byte.
5657 * @param env1 CPU environment.
5658 * @param pvAddr GC Virtual address.
5659 */
5660uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5661{
5662 CPUX86State *savedenv = env;
5663 uint32_t u32;
5664 env = env1;
5665 u32 = ldl_kernel(addr);
5666 env = savedenv;
5667 return u32;
5668}
5669
5670/**
5671 * Writes byte to virtual address in guest memory area.
5672 * XXX: is it working for any addresses? swapped out pages?
5673 * @returns read data byte.
5674 * @param env1 CPU environment.
5675 * @param pvAddr GC Virtual address.
5676 * @param val byte value
5677 */
5678void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5679{
5680 CPUX86State *savedenv = env;
5681 env = env1;
5682 stb(addr, val);
5683 env = savedenv;
5684}
5685
5686void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5687{
5688 CPUX86State *savedenv = env;
5689 env = env1;
5690 stw(addr, val);
5691 env = savedenv;
5692}
5693
5694void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5695{
5696 CPUX86State *savedenv = env;
5697 env = env1;
5698 stl(addr, val);
5699 env = savedenv;
5700}
5701
5702/**
5703 * Correctly loads selector into segment register with updating internal
5704 * qemu data/caches.
5705 * @param env1 CPU environment.
5706 * @param seg_reg Segment register.
5707 * @param selector Selector to load.
5708 */
5709void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5710{
5711 CPUX86State *savedenv = env;
5712#ifdef FORCE_SEGMENT_SYNC
5713 jmp_buf old_buf;
5714#endif
5715
5716 env = env1;
5717
5718 if ( env->eflags & X86_EFL_VM
5719 || !(env->cr[0] & X86_CR0_PE))
5720 {
5721 load_seg_vm(seg_reg, selector);
5722
5723 env = savedenv;
5724
5725 /* Successful sync. */
5726 env1->segs[seg_reg].newselector = 0;
5727 }
5728 else
5729 {
5730 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5731 time critical - let's not do that */
5732#ifdef FORCE_SEGMENT_SYNC
5733 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5734#endif
5735 if (setjmp(env1->jmp_env) == 0)
5736 {
5737 if (seg_reg == R_CS)
5738 {
5739 uint32_t e1, e2;
5740 e1 = e2 = 0;
5741 load_segment(&e1, &e2, selector);
5742 cpu_x86_load_seg_cache(env, R_CS, selector,
5743 get_seg_base(e1, e2),
5744 get_seg_limit(e1, e2),
5745 e2);
5746 }
5747 else
5748 helper_load_seg(seg_reg, selector);
5749 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5750 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5751
5752 env = savedenv;
5753
5754 /* Successful sync. */
5755 env1->segs[seg_reg].newselector = 0;
5756 }
5757 else
5758 {
5759 env = savedenv;
5760
5761 /* Postpone sync until the guest uses the selector. */
5762 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5763 env1->segs[seg_reg].newselector = selector;
5764 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5765 env1->exception_index = -1;
5766 env1->error_code = 0;
5767 env1->old_exception = -1;
5768 }
5769#ifdef FORCE_SEGMENT_SYNC
5770 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5771#endif
5772 }
5773
5774}
5775
5776DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5777{
5778 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5779}
5780
5781
5782int emulate_single_instr(CPUX86State *env1)
5783{
5784 TranslationBlock *tb;
5785 TranslationBlock *current;
5786 int flags;
5787 uint8_t *tc_ptr;
5788 target_ulong old_eip;
5789
5790 /* ensures env is loaded! */
5791 CPUX86State *savedenv = env;
5792 env = env1;
5793
5794 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5795
5796 current = env->current_tb;
5797 env->current_tb = NULL;
5798 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5799
5800 /*
5801 * Translate only one instruction.
5802 */
5803 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5804 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5805 env->segs[R_CS].base, flags, 0);
5806
5807 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5808
5809
5810 /* tb_link_phys: */
5811 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5812 tb->jmp_next[0] = NULL;
5813 tb->jmp_next[1] = NULL;
5814 Assert(tb->jmp_next[0] == NULL);
5815 Assert(tb->jmp_next[1] == NULL);
5816 if (tb->tb_next_offset[0] != 0xffff)
5817 tb_reset_jump(tb, 0);
5818 if (tb->tb_next_offset[1] != 0xffff)
5819 tb_reset_jump(tb, 1);
5820
5821 /*
5822 * Execute it using emulation
5823 */
5824 old_eip = env->eip;
5825 env->current_tb = tb;
5826
5827 /*
5828 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5829 * perhaps not a very safe hack
5830 */
5831 while (old_eip == env->eip)
5832 {
5833 tc_ptr = tb->tc_ptr;
5834
5835#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5836 int fake_ret;
5837 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5838#else
5839 tcg_qemu_tb_exec(tc_ptr);
5840#endif
5841
5842 /*
5843 * Exit once we detect an external interrupt and interrupts are enabled
5844 */
5845 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
5846 || ( (env->eflags & IF_MASK)
5847 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
5848 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
5849 )
5850 {
5851 break;
5852 }
5853 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
5854 tlb_flush(env, true);
5855 }
5856 }
5857 env->current_tb = current;
5858
5859 tb_phys_invalidate(tb, -1);
5860 tb_free(tb);
5861/*
5862 Assert(tb->tb_next_offset[0] == 0xffff);
5863 Assert(tb->tb_next_offset[1] == 0xffff);
5864 Assert(tb->tb_next[0] == 0xffff);
5865 Assert(tb->tb_next[1] == 0xffff);
5866 Assert(tb->jmp_next[0] == NULL);
5867 Assert(tb->jmp_next[1] == NULL);
5868 Assert(tb->jmp_first == NULL); */
5869
5870 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5871
5872 /*
5873 * Execute the next instruction when we encounter instruction fusing.
5874 */
5875 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5876 {
5877 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5878 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5879 emulate_single_instr(env);
5880 }
5881
5882 env = savedenv;
5883 return 0;
5884}
5885
5886/**
5887 * Correctly loads a new ldtr selector.
5888 *
5889 * @param env1 CPU environment.
5890 * @param selector Selector to load.
5891 */
5892void sync_ldtr(CPUX86State *env1, int selector)
5893{
5894 CPUX86State *saved_env = env;
5895 if (setjmp(env1->jmp_env) == 0)
5896 {
5897 env = env1;
5898 helper_lldt(selector);
5899 env = saved_env;
5900 }
5901 else
5902 {
5903 env = saved_env;
5904#ifdef VBOX_STRICT
5905 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5906#endif
5907 }
5908}
5909
5910int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5911 uint32_t *esp_ptr, int dpl)
5912{
5913 int type, index, shift;
5914
5915 CPUX86State *savedenv = env;
5916 env = env1;
5917
5918 if (!(env->tr.flags & DESC_P_MASK))
5919 cpu_abort(env, "invalid tss");
5920 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5921 if ((type & 7) != 1)
5922 cpu_abort(env, "invalid tss type %d", type);
5923 shift = type >> 3;
5924 index = (dpl * 4 + 2) << shift;
5925 if (index + (4 << shift) - 1 > env->tr.limit)
5926 {
5927 env = savedenv;
5928 return 0;
5929 }
5930 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5931
5932 if (shift == 0) {
5933 *esp_ptr = lduw_kernel(env->tr.base + index);
5934 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5935 } else {
5936 *esp_ptr = ldl_kernel(env->tr.base + index);
5937 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5938 }
5939
5940 env = savedenv;
5941 return 1;
5942}
5943
5944//*****************************************************************************
5945// Needs to be at the bottom of the file (overriding macros)
5946
5947static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5948{
5949 return *(CPU86_LDouble *)ptr;
5950}
5951
5952static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5953{
5954 *(CPU86_LDouble *)ptr = f;
5955}
5956
5957#undef stw
5958#undef stl
5959#undef stq
5960#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5961#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5962#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5963
5964//*****************************************************************************
5965void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5966{
5967 int fpus, fptag, i, nb_xmm_regs;
5968 CPU86_LDouble tmp;
5969 uint8_t *addr;
5970 int data64 = !!(env->hflags & HF_LMA_MASK);
5971
5972 if (env->cpuid_features & CPUID_FXSR)
5973 {
5974 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5975 fptag = 0;
5976 for(i = 0; i < 8; i++) {
5977 fptag |= (env->fptags[i] << i);
5978 }
5979 stw(ptr, env->fpuc);
5980 stw(ptr + 2, fpus);
5981 stw(ptr + 4, fptag ^ 0xff);
5982
5983 addr = ptr + 0x20;
5984 for(i = 0;i < 8; i++) {
5985 tmp = ST(i);
5986 helper_fstt_raw(tmp, addr);
5987 addr += 16;
5988 }
5989
5990 if (env->cr[4] & CR4_OSFXSR_MASK) {
5991 /* XXX: finish it */
5992 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5993 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5994 nb_xmm_regs = 8 << data64;
5995 addr = ptr + 0xa0;
5996 for(i = 0; i < nb_xmm_regs; i++) {
5997#if __GNUC__ < 4
5998 stq(addr, env->xmm_regs[i].XMM_Q(0));
5999 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6000#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6001 stl(addr, env->xmm_regs[i].XMM_L(0));
6002 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6003 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6004 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6005#endif
6006 addr += 16;
6007 }
6008 }
6009 }
6010 else
6011 {
6012 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6013 int fptag;
6014
6015 fp->FCW = env->fpuc;
6016 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6017 fptag = 0;
6018 for (i=7; i>=0; i--) {
6019 fptag <<= 2;
6020 if (env->fptags[i]) {
6021 fptag |= 3;
6022 } else {
6023 /* the FPU automatically computes it */
6024 }
6025 }
6026 fp->FTW = fptag;
6027
6028 for(i = 0;i < 8; i++) {
6029 tmp = ST(i);
6030 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6031 }
6032 }
6033}
6034
6035//*****************************************************************************
6036#undef lduw
6037#undef ldl
6038#undef ldq
6039#define lduw(a) *(uint16_t *)(a)
6040#define ldl(a) *(uint32_t *)(a)
6041#define ldq(a) *(uint64_t *)(a)
6042//*****************************************************************************
6043void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6044{
6045 int i, fpus, fptag, nb_xmm_regs;
6046 CPU86_LDouble tmp;
6047 uint8_t *addr;
6048 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6049
6050 if (env->cpuid_features & CPUID_FXSR)
6051 {
6052 env->fpuc = lduw(ptr);
6053 fpus = lduw(ptr + 2);
6054 fptag = lduw(ptr + 4);
6055 env->fpstt = (fpus >> 11) & 7;
6056 env->fpus = fpus & ~0x3800;
6057 fptag ^= 0xff;
6058 for(i = 0;i < 8; i++) {
6059 env->fptags[i] = ((fptag >> i) & 1);
6060 }
6061
6062 addr = ptr + 0x20;
6063 for(i = 0;i < 8; i++) {
6064 tmp = helper_fldt_raw(addr);
6065 ST(i) = tmp;
6066 addr += 16;
6067 }
6068
6069 if (env->cr[4] & CR4_OSFXSR_MASK) {
6070 /* XXX: finish it, endianness */
6071 env->mxcsr = ldl(ptr + 0x18);
6072 //ldl(ptr + 0x1c);
6073 nb_xmm_regs = 8 << data64;
6074 addr = ptr + 0xa0;
6075 for(i = 0; i < nb_xmm_regs; i++) {
6076#if HC_ARCH_BITS == 32
6077 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6078 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6079 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6080 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6081 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6082#else
6083 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6084 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6085#endif
6086 addr += 16;
6087 }
6088 }
6089 }
6090 else
6091 {
6092 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6093 int fptag, j;
6094
6095 env->fpuc = fp->FCW;
6096 env->fpstt = (fp->FSW >> 11) & 7;
6097 env->fpus = fp->FSW & ~0x3800;
6098 fptag = fp->FTW;
6099 for(i = 0;i < 8; i++) {
6100 env->fptags[i] = ((fptag & 3) == 3);
6101 fptag >>= 2;
6102 }
6103 j = env->fpstt;
6104 for(i = 0;i < 8; i++) {
6105 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6106 ST(i) = tmp;
6107 }
6108 }
6109}
6110//*****************************************************************************
6111//*****************************************************************************
6112
6113#endif /* VBOX */
6114
6115/* Secure Virtual Machine helpers */
6116
6117#if defined(CONFIG_USER_ONLY)
6118
6119void helper_vmrun(int aflag, int next_eip_addend)
6120{
6121}
6122void helper_vmmcall(void)
6123{
6124}
6125void helper_vmload(int aflag)
6126{
6127}
6128void helper_vmsave(int aflag)
6129{
6130}
6131void helper_stgi(void)
6132{
6133}
6134void helper_clgi(void)
6135{
6136}
6137void helper_skinit(void)
6138{
6139}
6140void helper_invlpga(int aflag)
6141{
6142}
6143void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6144{
6145}
6146void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6147{
6148}
6149
6150void helper_svm_check_io(uint32_t port, uint32_t param,
6151 uint32_t next_eip_addend)
6152{
6153}
6154#else
6155
6156static inline void svm_save_seg(target_phys_addr_t addr,
6157 const SegmentCache *sc)
6158{
6159 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6160 sc->selector);
6161 stq_phys(addr + offsetof(struct vmcb_seg, base),
6162 sc->base);
6163 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6164 sc->limit);
6165 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6166 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6167}
6168
6169static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6170{
6171 unsigned int flags;
6172
6173 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6174 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6175 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6176 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6177 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6178}
6179
6180static inline void svm_load_seg_cache(target_phys_addr_t addr,
6181 CPUState *env, int seg_reg)
6182{
6183 SegmentCache sc1, *sc = &sc1;
6184 svm_load_seg(addr, sc);
6185 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6186 sc->base, sc->limit, sc->flags);
6187}
6188
6189void helper_vmrun(int aflag, int next_eip_addend)
6190{
6191 target_ulong addr;
6192 uint32_t event_inj;
6193 uint32_t int_ctl;
6194
6195 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6196
6197 if (aflag == 2)
6198 addr = EAX;
6199 else
6200 addr = (uint32_t)EAX;
6201
6202 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6203
6204 env->vm_vmcb = addr;
6205
6206 /* save the current CPU state in the hsave page */
6207 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6208 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6209
6210 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6211 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6212
6213 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6214 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6215 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6216 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6217 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6218 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6219
6220 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6221 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6222
6223 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6224 &env->segs[R_ES]);
6225 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6226 &env->segs[R_CS]);
6227 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6228 &env->segs[R_SS]);
6229 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6230 &env->segs[R_DS]);
6231
6232 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6233 EIP + next_eip_addend);
6234 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6235 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6236
6237 /* load the interception bitmaps so we do not need to access the
6238 vmcb in svm mode */
6239 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6240 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6241 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6242 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6243 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6244 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6245
6246 /* enable intercepts */
6247 env->hflags |= HF_SVMI_MASK;
6248
6249 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6250
6251 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6252 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6253
6254 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6255 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6256
6257 /* clear exit_info_2 so we behave like the real hardware */
6258 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6259
6260 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6261 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6262 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6263 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6264 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6265 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6266 if (int_ctl & V_INTR_MASKING_MASK) {
6267 env->v_tpr = int_ctl & V_TPR_MASK;
6268 env->hflags2 |= HF2_VINTR_MASK;
6269 if (env->eflags & IF_MASK)
6270 env->hflags2 |= HF2_HIF_MASK;
6271 }
6272
6273 cpu_load_efer(env,
6274 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6275 env->eflags = 0;
6276 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6277 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6278 CC_OP = CC_OP_EFLAGS;
6279
6280 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6281 env, R_ES);
6282 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6283 env, R_CS);
6284 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6285 env, R_SS);
6286 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6287 env, R_DS);
6288
6289 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6290 env->eip = EIP;
6291 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6292 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6293 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6294 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6295 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6296
6297 /* FIXME: guest state consistency checks */
6298
6299 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6300 case TLB_CONTROL_DO_NOTHING:
6301 break;
6302 case TLB_CONTROL_FLUSH_ALL_ASID:
6303 /* FIXME: this is not 100% correct but should work for now */
6304 tlb_flush(env, 1);
6305 break;
6306 }
6307
6308 env->hflags2 |= HF2_GIF_MASK;
6309
6310 if (int_ctl & V_IRQ_MASK) {
6311 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6312 }
6313
6314 /* maybe we need to inject an event */
6315 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6316 if (event_inj & SVM_EVTINJ_VALID) {
6317 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6318 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6319 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6320
6321 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6322 /* FIXME: need to implement valid_err */
6323 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6324 case SVM_EVTINJ_TYPE_INTR:
6325 env->exception_index = vector;
6326 env->error_code = event_inj_err;
6327 env->exception_is_int = 0;
6328 env->exception_next_eip = -1;
6329 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6330 /* XXX: is it always correct ? */
6331 do_interrupt(vector, 0, 0, 0, 1);
6332 break;
6333 case SVM_EVTINJ_TYPE_NMI:
6334 env->exception_index = EXCP02_NMI;
6335 env->error_code = event_inj_err;
6336 env->exception_is_int = 0;
6337 env->exception_next_eip = EIP;
6338 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6339 cpu_loop_exit();
6340 break;
6341 case SVM_EVTINJ_TYPE_EXEPT:
6342 env->exception_index = vector;
6343 env->error_code = event_inj_err;
6344 env->exception_is_int = 0;
6345 env->exception_next_eip = -1;
6346 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6347 cpu_loop_exit();
6348 break;
6349 case SVM_EVTINJ_TYPE_SOFT:
6350 env->exception_index = vector;
6351 env->error_code = event_inj_err;
6352 env->exception_is_int = 1;
6353 env->exception_next_eip = EIP;
6354 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6355 cpu_loop_exit();
6356 break;
6357 }
6358 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6359 }
6360}
6361
6362void helper_vmmcall(void)
6363{
6364 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6365 raise_exception(EXCP06_ILLOP);
6366}
6367
6368void helper_vmload(int aflag)
6369{
6370 target_ulong addr;
6371 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6372
6373 if (aflag == 2)
6374 addr = EAX;
6375 else
6376 addr = (uint32_t)EAX;
6377
6378 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6379 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6380 env->segs[R_FS].base);
6381
6382 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6383 env, R_FS);
6384 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6385 env, R_GS);
6386 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6387 &env->tr);
6388 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6389 &env->ldt);
6390
6391#ifdef TARGET_X86_64
6392 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6393 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6394 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6395 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6396#endif
6397 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6398 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6399 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6400 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6401}
6402
6403void helper_vmsave(int aflag)
6404{
6405 target_ulong addr;
6406 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6407
6408 if (aflag == 2)
6409 addr = EAX;
6410 else
6411 addr = (uint32_t)EAX;
6412
6413 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6414 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6415 env->segs[R_FS].base);
6416
6417 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6418 &env->segs[R_FS]);
6419 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6420 &env->segs[R_GS]);
6421 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6422 &env->tr);
6423 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6424 &env->ldt);
6425
6426#ifdef TARGET_X86_64
6427 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6428 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6429 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6430 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6431#endif
6432 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6433 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6434 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6435 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6436}
6437
6438void helper_stgi(void)
6439{
6440 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6441 env->hflags2 |= HF2_GIF_MASK;
6442}
6443
6444void helper_clgi(void)
6445{
6446 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6447 env->hflags2 &= ~HF2_GIF_MASK;
6448}
6449
6450void helper_skinit(void)
6451{
6452 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6453 /* XXX: not implemented */
6454 raise_exception(EXCP06_ILLOP);
6455}
6456
6457void helper_invlpga(int aflag)
6458{
6459 target_ulong addr;
6460 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6461
6462 if (aflag == 2)
6463 addr = EAX;
6464 else
6465 addr = (uint32_t)EAX;
6466
6467 /* XXX: could use the ASID to see if it is needed to do the
6468 flush */
6469 tlb_flush_page(env, addr);
6470}
6471
6472void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6473{
6474 if (likely(!(env->hflags & HF_SVMI_MASK)))
6475 return;
6476#ifndef VBOX
6477 switch(type) {
6478 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6479 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6480 helper_vmexit(type, param);
6481 }
6482 break;
6483 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6484 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6485 helper_vmexit(type, param);
6486 }
6487 break;
6488 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6489 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6490 helper_vmexit(type, param);
6491 }
6492 break;
6493 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6494 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6495 helper_vmexit(type, param);
6496 }
6497 break;
6498 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6499 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6500 helper_vmexit(type, param);
6501 }
6502 break;
6503 case SVM_EXIT_MSR:
6504 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6505 /* FIXME: this should be read in at vmrun (faster this way?) */
6506 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6507 uint32_t t0, t1;
6508 switch((uint32_t)ECX) {
6509 case 0 ... 0x1fff:
6510 t0 = (ECX * 2) % 8;
6511 t1 = ECX / 8;
6512 break;
6513 case 0xc0000000 ... 0xc0001fff:
6514 t0 = (8192 + ECX - 0xc0000000) * 2;
6515 t1 = (t0 / 8);
6516 t0 %= 8;
6517 break;
6518 case 0xc0010000 ... 0xc0011fff:
6519 t0 = (16384 + ECX - 0xc0010000) * 2;
6520 t1 = (t0 / 8);
6521 t0 %= 8;
6522 break;
6523 default:
6524 helper_vmexit(type, param);
6525 t0 = 0;
6526 t1 = 0;
6527 break;
6528 }
6529 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6530 helper_vmexit(type, param);
6531 }
6532 break;
6533 default:
6534 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6535 helper_vmexit(type, param);
6536 }
6537 break;
6538 }
6539#else /* VBOX */
6540 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6541#endif /* VBOX */
6542}
6543
6544void helper_svm_check_io(uint32_t port, uint32_t param,
6545 uint32_t next_eip_addend)
6546{
6547 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6548 /* FIXME: this should be read in at vmrun (faster this way?) */
6549 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6550 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6551 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6552 /* next EIP */
6553 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6554 env->eip + next_eip_addend);
6555 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6556 }
6557 }
6558}
6559
6560/* Note: currently only 32 bits of exit_code are used */
6561void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6562{
6563 uint32_t int_ctl;
6564
6565 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6566 exit_code, exit_info_1,
6567 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6568 EIP);
6569
6570 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6571 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6572 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6573 } else {
6574 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6575 }
6576
6577 /* Save the VM state in the vmcb */
6578 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6579 &env->segs[R_ES]);
6580 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6581 &env->segs[R_CS]);
6582 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6583 &env->segs[R_SS]);
6584 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6585 &env->segs[R_DS]);
6586
6587 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6588 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6589
6590 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6591 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6592
6593 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6594 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6595 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6596 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6597 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6598
6599 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6600 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6601 int_ctl |= env->v_tpr & V_TPR_MASK;
6602 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6603 int_ctl |= V_IRQ_MASK;
6604 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6605
6606 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6607 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6608 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6609 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6610 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6611 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6612 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6613
6614 /* Reload the host state from vm_hsave */
6615 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6616 env->hflags &= ~HF_SVMI_MASK;
6617 env->intercept = 0;
6618 env->intercept_exceptions = 0;
6619 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6620 env->tsc_offset = 0;
6621
6622 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6623 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6624
6625 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6626 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6627
6628 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6629 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6630 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6631 /* we need to set the efer after the crs so the hidden flags get
6632 set properly */
6633 cpu_load_efer(env,
6634 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6635 env->eflags = 0;
6636 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6637 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6638 CC_OP = CC_OP_EFLAGS;
6639
6640 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6641 env, R_ES);
6642 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6643 env, R_CS);
6644 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6645 env, R_SS);
6646 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6647 env, R_DS);
6648
6649 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6650 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6651 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6652
6653 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6654 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6655
6656 /* other setups */
6657 cpu_x86_set_cpl(env, 0);
6658 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6659 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6660
6661 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6662 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6663 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6664 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6665 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6666
6667 env->hflags2 &= ~HF2_GIF_MASK;
6668 /* FIXME: Resets the current ASID register to zero (host ASID). */
6669
6670 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6671
6672 /* Clears the TSC_OFFSET inside the processor. */
6673
6674 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6675 from the page table indicated the host's CR3. If the PDPEs contain
6676 illegal state, the processor causes a shutdown. */
6677
6678 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6679 env->cr[0] |= CR0_PE_MASK;
6680 env->eflags &= ~VM_MASK;
6681
6682 /* Disables all breakpoints in the host DR7 register. */
6683
6684 /* Checks the reloaded host state for consistency. */
6685
6686 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6687 host's code segment or non-canonical (in the case of long mode), a
6688 #GP fault is delivered inside the host.) */
6689
6690 /* remove any pending exception */
6691 env->exception_index = -1;
6692 env->error_code = 0;
6693 env->old_exception = -1;
6694
6695 cpu_loop_exit();
6696}
6697
6698#endif
6699
6700/* MMX/SSE */
6701/* XXX: optimize by storing fptt and fptags in the static cpu state */
6702void helper_enter_mmx(void)
6703{
6704 env->fpstt = 0;
6705 *(uint32_t *)(env->fptags) = 0;
6706 *(uint32_t *)(env->fptags + 4) = 0;
6707}
6708
6709void helper_emms(void)
6710{
6711 /* set to empty state */
6712 *(uint32_t *)(env->fptags) = 0x01010101;
6713 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6714}
6715
6716/* XXX: suppress */
6717void helper_movq(void *d, void *s)
6718{
6719 *(uint64_t *)d = *(uint64_t *)s;
6720}
6721
6722#define SHIFT 0
6723#include "ops_sse.h"
6724
6725#define SHIFT 1
6726#include "ops_sse.h"
6727
6728#define SHIFT 0
6729#include "helper_template.h"
6730#undef SHIFT
6731
6732#define SHIFT 1
6733#include "helper_template.h"
6734#undef SHIFT
6735
6736#define SHIFT 2
6737#include "helper_template.h"
6738#undef SHIFT
6739
6740#ifdef TARGET_X86_64
6741
6742#define SHIFT 3
6743#include "helper_template.h"
6744#undef SHIFT
6745
6746#endif
6747
6748/* bit operations */
6749target_ulong helper_bsf(target_ulong t0)
6750{
6751 int count;
6752 target_ulong res;
6753
6754 res = t0;
6755 count = 0;
6756 while ((res & 1) == 0) {
6757 count++;
6758 res >>= 1;
6759 }
6760 return count;
6761}
6762
6763target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6764{
6765 int count;
6766 target_ulong res, mask;
6767
6768 if (wordsize > 0 && t0 == 0) {
6769 return wordsize;
6770 }
6771 res = t0;
6772 count = TARGET_LONG_BITS - 1;
6773 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6774 while ((res & mask) == 0) {
6775 count--;
6776 res <<= 1;
6777 }
6778 if (wordsize > 0) {
6779 return wordsize - 1 - count;
6780 }
6781 return count;
6782}
6783
6784target_ulong helper_bsr(target_ulong t0)
6785{
6786 return helper_lzcnt(t0, 0);
6787}
6788
6789static int compute_all_eflags(void)
6790{
6791 return CC_SRC;
6792}
6793
6794static int compute_c_eflags(void)
6795{
6796 return CC_SRC & CC_C;
6797}
6798
6799uint32_t helper_cc_compute_all(int op)
6800{
6801 switch (op) {
6802 default: /* should never happen */ return 0;
6803
6804 case CC_OP_EFLAGS: return compute_all_eflags();
6805
6806 case CC_OP_MULB: return compute_all_mulb();
6807 case CC_OP_MULW: return compute_all_mulw();
6808 case CC_OP_MULL: return compute_all_mull();
6809
6810 case CC_OP_ADDB: return compute_all_addb();
6811 case CC_OP_ADDW: return compute_all_addw();
6812 case CC_OP_ADDL: return compute_all_addl();
6813
6814 case CC_OP_ADCB: return compute_all_adcb();
6815 case CC_OP_ADCW: return compute_all_adcw();
6816 case CC_OP_ADCL: return compute_all_adcl();
6817
6818 case CC_OP_SUBB: return compute_all_subb();
6819 case CC_OP_SUBW: return compute_all_subw();
6820 case CC_OP_SUBL: return compute_all_subl();
6821
6822 case CC_OP_SBBB: return compute_all_sbbb();
6823 case CC_OP_SBBW: return compute_all_sbbw();
6824 case CC_OP_SBBL: return compute_all_sbbl();
6825
6826 case CC_OP_LOGICB: return compute_all_logicb();
6827 case CC_OP_LOGICW: return compute_all_logicw();
6828 case CC_OP_LOGICL: return compute_all_logicl();
6829
6830 case CC_OP_INCB: return compute_all_incb();
6831 case CC_OP_INCW: return compute_all_incw();
6832 case CC_OP_INCL: return compute_all_incl();
6833
6834 case CC_OP_DECB: return compute_all_decb();
6835 case CC_OP_DECW: return compute_all_decw();
6836 case CC_OP_DECL: return compute_all_decl();
6837
6838 case CC_OP_SHLB: return compute_all_shlb();
6839 case CC_OP_SHLW: return compute_all_shlw();
6840 case CC_OP_SHLL: return compute_all_shll();
6841
6842 case CC_OP_SARB: return compute_all_sarb();
6843 case CC_OP_SARW: return compute_all_sarw();
6844 case CC_OP_SARL: return compute_all_sarl();
6845
6846#ifdef TARGET_X86_64
6847 case CC_OP_MULQ: return compute_all_mulq();
6848
6849 case CC_OP_ADDQ: return compute_all_addq();
6850
6851 case CC_OP_ADCQ: return compute_all_adcq();
6852
6853 case CC_OP_SUBQ: return compute_all_subq();
6854
6855 case CC_OP_SBBQ: return compute_all_sbbq();
6856
6857 case CC_OP_LOGICQ: return compute_all_logicq();
6858
6859 case CC_OP_INCQ: return compute_all_incq();
6860
6861 case CC_OP_DECQ: return compute_all_decq();
6862
6863 case CC_OP_SHLQ: return compute_all_shlq();
6864
6865 case CC_OP_SARQ: return compute_all_sarq();
6866#endif
6867 }
6868}
6869
6870uint32_t helper_cc_compute_c(int op)
6871{
6872 switch (op) {
6873 default: /* should never happen */ return 0;
6874
6875 case CC_OP_EFLAGS: return compute_c_eflags();
6876
6877 case CC_OP_MULB: return compute_c_mull();
6878 case CC_OP_MULW: return compute_c_mull();
6879 case CC_OP_MULL: return compute_c_mull();
6880
6881 case CC_OP_ADDB: return compute_c_addb();
6882 case CC_OP_ADDW: return compute_c_addw();
6883 case CC_OP_ADDL: return compute_c_addl();
6884
6885 case CC_OP_ADCB: return compute_c_adcb();
6886 case CC_OP_ADCW: return compute_c_adcw();
6887 case CC_OP_ADCL: return compute_c_adcl();
6888
6889 case CC_OP_SUBB: return compute_c_subb();
6890 case CC_OP_SUBW: return compute_c_subw();
6891 case CC_OP_SUBL: return compute_c_subl();
6892
6893 case CC_OP_SBBB: return compute_c_sbbb();
6894 case CC_OP_SBBW: return compute_c_sbbw();
6895 case CC_OP_SBBL: return compute_c_sbbl();
6896
6897 case CC_OP_LOGICB: return compute_c_logicb();
6898 case CC_OP_LOGICW: return compute_c_logicw();
6899 case CC_OP_LOGICL: return compute_c_logicl();
6900
6901 case CC_OP_INCB: return compute_c_incl();
6902 case CC_OP_INCW: return compute_c_incl();
6903 case CC_OP_INCL: return compute_c_incl();
6904
6905 case CC_OP_DECB: return compute_c_incl();
6906 case CC_OP_DECW: return compute_c_incl();
6907 case CC_OP_DECL: return compute_c_incl();
6908
6909 case CC_OP_SHLB: return compute_c_shlb();
6910 case CC_OP_SHLW: return compute_c_shlw();
6911 case CC_OP_SHLL: return compute_c_shll();
6912
6913 case CC_OP_SARB: return compute_c_sarl();
6914 case CC_OP_SARW: return compute_c_sarl();
6915 case CC_OP_SARL: return compute_c_sarl();
6916
6917#ifdef TARGET_X86_64
6918 case CC_OP_MULQ: return compute_c_mull();
6919
6920 case CC_OP_ADDQ: return compute_c_addq();
6921
6922 case CC_OP_ADCQ: return compute_c_adcq();
6923
6924 case CC_OP_SUBQ: return compute_c_subq();
6925
6926 case CC_OP_SBBQ: return compute_c_sbbq();
6927
6928 case CC_OP_LOGICQ: return compute_c_logicq();
6929
6930 case CC_OP_INCQ: return compute_c_incl();
6931
6932 case CC_OP_DECQ: return compute_c_incl();
6933
6934 case CC_OP_SHLQ: return compute_c_shlq();
6935
6936 case CC_OP_SARQ: return compute_c_sarl();
6937#endif
6938 }
6939}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette