VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 37689

Last change on this file since 37689 was 37689, checked in by vboxsync, 13 years ago

recompiler: Merged in changes from 0.13.0.

  • Property svn:eol-style set to native
File size: 193.6 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205#endif /* VBOX */
206
207/* return non zero if error */
208static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
209 int selector)
210{
211 SegmentCache *dt;
212 int index;
213 target_ulong ptr;
214
215#ifdef VBOX
216 /* Trying to load a selector with CPL=1? */
217 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
218 {
219 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
220 selector = selector & 0xfffc;
221 }
222#endif /* VBOX */
223
224 if (selector & 0x4)
225 dt = &env->ldt;
226 else
227 dt = &env->gdt;
228 index = selector & ~7;
229 if ((index + 7) > dt->limit)
230 return -1;
231 ptr = dt->base + index;
232 *e1_ptr = ldl_kernel(ptr);
233 *e2_ptr = ldl_kernel(ptr + 4);
234 return 0;
235}
236
237static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
238{
239 unsigned int limit;
240 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
241 if (e2 & DESC_G_MASK)
242 limit = (limit << 12) | 0xfff;
243 return limit;
244}
245
246static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
247{
248 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
249}
250
251static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
252{
253 sc->base = get_seg_base(e1, e2);
254 sc->limit = get_seg_limit(e1, e2);
255 sc->flags = e2;
256}
257
258/* init the segment cache in vm86 mode. */
259static inline void load_seg_vm(int seg, int selector)
260{
261 selector &= 0xffff;
262#ifdef VBOX
263 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
264 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
265 flags |= (3 << DESC_DPL_SHIFT);
266
267 cpu_x86_load_seg_cache(env, seg, selector,
268 (selector << 4), 0xffff, flags);
269#else /* VBOX */
270 cpu_x86_load_seg_cache(env, seg, selector,
271 (selector << 4), 0xffff, 0);
272#endif /* VBOX */
273}
274
275static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
276 uint32_t *esp_ptr, int dpl)
277{
278#ifndef VBOX
279 int type, index, shift;
280#else
281 unsigned int type, index, shift;
282#endif
283
284#if 0
285 {
286 int i;
287 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
288 for(i=0;i<env->tr.limit;i++) {
289 printf("%02x ", env->tr.base[i]);
290 if ((i & 7) == 7) printf("\n");
291 }
292 printf("\n");
293 }
294#endif
295
296 if (!(env->tr.flags & DESC_P_MASK))
297 cpu_abort(env, "invalid tss");
298 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
299 if ((type & 7) != 1)
300 cpu_abort(env, "invalid tss type");
301 shift = type >> 3;
302 index = (dpl * 4 + 2) << shift;
303 if (index + (4 << shift) - 1 > env->tr.limit)
304 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
305 if (shift == 0) {
306 *esp_ptr = lduw_kernel(env->tr.base + index);
307 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
308 } else {
309 *esp_ptr = ldl_kernel(env->tr.base + index);
310 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
311 }
312}
313
314/* XXX: merge with load_seg() */
315static void tss_load_seg(int seg_reg, int selector)
316{
317 uint32_t e1, e2;
318 int rpl, dpl, cpl;
319
320#ifdef VBOX
321 e1 = e2 = 0; /* gcc warning? */
322 cpl = env->hflags & HF_CPL_MASK;
323 /* Trying to load a selector with CPL=1? */
324 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
325 {
326 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
327 selector = selector & 0xfffc;
328 }
329#endif /* VBOX */
330
331 if ((selector & 0xfffc) != 0) {
332 if (load_segment(&e1, &e2, selector) != 0)
333 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
334 if (!(e2 & DESC_S_MASK))
335 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
336 rpl = selector & 3;
337 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
338 cpl = env->hflags & HF_CPL_MASK;
339 if (seg_reg == R_CS) {
340 if (!(e2 & DESC_CS_MASK))
341 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
342 /* XXX: is it correct ? */
343 if (dpl != rpl)
344 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
345 if ((e2 & DESC_C_MASK) && dpl > rpl)
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 } else if (seg_reg == R_SS) {
348 /* SS must be writable data */
349 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 if (dpl != cpl || dpl != rpl)
352 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
353 } else {
354 /* not readable code */
355 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 /* if data or non conforming code, checks the rights */
358 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
359 if (dpl < cpl || dpl < rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 }
362 }
363 if (!(e2 & DESC_P_MASK))
364 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
365 cpu_x86_load_seg_cache(env, seg_reg, selector,
366 get_seg_base(e1, e2),
367 get_seg_limit(e1, e2),
368 e2);
369 } else {
370 if (seg_reg == R_SS || seg_reg == R_CS)
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372#ifdef VBOX
373# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
374 cpu_x86_load_seg_cache(env, seg_reg, selector,
375 0, 0, 0);
376# endif
377#endif /* VBOX */
378 }
379}
380
381#define SWITCH_TSS_JMP 0
382#define SWITCH_TSS_IRET 1
383#define SWITCH_TSS_CALL 2
384
385/* XXX: restore CPU state in registers (PowerPC case) */
386static void switch_tss(int tss_selector,
387 uint32_t e1, uint32_t e2, int source,
388 uint32_t next_eip)
389{
390 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
391 target_ulong tss_base;
392 uint32_t new_regs[8], new_segs[6];
393 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
394 uint32_t old_eflags, eflags_mask;
395 SegmentCache *dt;
396#ifndef VBOX
397 int index;
398#else
399 unsigned int index;
400#endif
401 target_ulong ptr;
402
403 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
404 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
405
406 /* if task gate, we read the TSS segment and we load it */
407 if (type == 5) {
408 if (!(e2 & DESC_P_MASK))
409 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
410 tss_selector = e1 >> 16;
411 if (tss_selector & 4)
412 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
413 if (load_segment(&e1, &e2, tss_selector) != 0)
414 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
415 if (e2 & DESC_S_MASK)
416 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
417 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
418 if ((type & 7) != 1)
419 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
420 }
421
422 if (!(e2 & DESC_P_MASK))
423 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
424
425 if (type & 8)
426 tss_limit_max = 103;
427 else
428 tss_limit_max = 43;
429 tss_limit = get_seg_limit(e1, e2);
430 tss_base = get_seg_base(e1, e2);
431 if ((tss_selector & 4) != 0 ||
432 tss_limit < tss_limit_max)
433 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
434 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
435 if (old_type & 8)
436 old_tss_limit_max = 103;
437 else
438 old_tss_limit_max = 43;
439
440 /* read all the registers from the new TSS */
441 if (type & 8) {
442 /* 32 bit */
443 new_cr3 = ldl_kernel(tss_base + 0x1c);
444 new_eip = ldl_kernel(tss_base + 0x20);
445 new_eflags = ldl_kernel(tss_base + 0x24);
446 for(i = 0; i < 8; i++)
447 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
448 for(i = 0; i < 6; i++)
449 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
450 new_ldt = lduw_kernel(tss_base + 0x60);
451 new_trap = ldl_kernel(tss_base + 0x64);
452 } else {
453 /* 16 bit */
454 new_cr3 = 0;
455 new_eip = lduw_kernel(tss_base + 0x0e);
456 new_eflags = lduw_kernel(tss_base + 0x10);
457 for(i = 0; i < 8; i++)
458 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
459 for(i = 0; i < 4; i++)
460 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
461 new_ldt = lduw_kernel(tss_base + 0x2a);
462 new_segs[R_FS] = 0;
463 new_segs[R_GS] = 0;
464 new_trap = 0;
465 }
466
467 /* NOTE: we must avoid memory exceptions during the task switch,
468 so we make dummy accesses before */
469 /* XXX: it can still fail in some cases, so a bigger hack is
470 necessary to valid the TLB after having done the accesses */
471
472 v1 = ldub_kernel(env->tr.base);
473 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
474 stb_kernel(env->tr.base, v1);
475 stb_kernel(env->tr.base + old_tss_limit_max, v2);
476
477 /* clear busy bit (it is restartable) */
478 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
479 target_ulong ptr;
480 uint32_t e2;
481 ptr = env->gdt.base + (env->tr.selector & ~7);
482 e2 = ldl_kernel(ptr + 4);
483 e2 &= ~DESC_TSS_BUSY_MASK;
484 stl_kernel(ptr + 4, e2);
485 }
486 old_eflags = compute_eflags();
487 if (source == SWITCH_TSS_IRET)
488 old_eflags &= ~NT_MASK;
489
490 /* save the current state in the old TSS */
491 if (type & 8) {
492 /* 32 bit */
493 stl_kernel(env->tr.base + 0x20, next_eip);
494 stl_kernel(env->tr.base + 0x24, old_eflags);
495 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
496 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
497 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
498 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
499 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
500 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
501 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
502 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
503 for(i = 0; i < 6; i++)
504 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
505#ifdef VBOX
506 /* Must store the ldt as it gets reloaded and might have been changed. */
507 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
508#endif
509#if defined(VBOX) && defined(DEBUG)
510 printf("TSS 32 bits switch\n");
511 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
512#endif
513 } else {
514 /* 16 bit */
515 stw_kernel(env->tr.base + 0x0e, next_eip);
516 stw_kernel(env->tr.base + 0x10, old_eflags);
517 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
518 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
519 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
520 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
521 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
522 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
523 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
524 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
525 for(i = 0; i < 4; i++)
526 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
527#ifdef VBOX
528 /* Must store the ldt as it gets reloaded and might have been changed. */
529 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
530#endif
531 }
532
533 /* now if an exception occurs, it will occurs in the next task
534 context */
535
536 if (source == SWITCH_TSS_CALL) {
537 stw_kernel(tss_base, env->tr.selector);
538 new_eflags |= NT_MASK;
539 }
540
541 /* set busy bit */
542 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
543 target_ulong ptr;
544 uint32_t e2;
545 ptr = env->gdt.base + (tss_selector & ~7);
546 e2 = ldl_kernel(ptr + 4);
547 e2 |= DESC_TSS_BUSY_MASK;
548 stl_kernel(ptr + 4, e2);
549 }
550
551 /* set the new CPU state */
552 /* from this point, any exception which occurs can give problems */
553 env->cr[0] |= CR0_TS_MASK;
554 env->hflags |= HF_TS_MASK;
555 env->tr.selector = tss_selector;
556 env->tr.base = tss_base;
557 env->tr.limit = tss_limit;
558 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
559
560 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
561 cpu_x86_update_cr3(env, new_cr3);
562 }
563
564 /* load all registers without an exception, then reload them with
565 possible exception */
566 env->eip = new_eip;
567 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
568 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
569 if (!(type & 8))
570 eflags_mask &= 0xffff;
571 load_eflags(new_eflags, eflags_mask);
572 /* XXX: what to do in 16 bit case ? */
573 EAX = new_regs[0];
574 ECX = new_regs[1];
575 EDX = new_regs[2];
576 EBX = new_regs[3];
577 ESP = new_regs[4];
578 EBP = new_regs[5];
579 ESI = new_regs[6];
580 EDI = new_regs[7];
581 if (new_eflags & VM_MASK) {
582 for(i = 0; i < 6; i++)
583 load_seg_vm(i, new_segs[i]);
584 /* in vm86, CPL is always 3 */
585 cpu_x86_set_cpl(env, 3);
586 } else {
587 /* CPL is set the RPL of CS */
588 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
589 /* first just selectors as the rest may trigger exceptions */
590 for(i = 0; i < 6; i++)
591 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
592 }
593
594 env->ldt.selector = new_ldt & ~4;
595 env->ldt.base = 0;
596 env->ldt.limit = 0;
597 env->ldt.flags = 0;
598
599 /* load the LDT */
600 if (new_ldt & 4)
601 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
602
603 if ((new_ldt & 0xfffc) != 0) {
604 dt = &env->gdt;
605 index = new_ldt & ~7;
606 if ((index + 7) > dt->limit)
607 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
608 ptr = dt->base + index;
609 e1 = ldl_kernel(ptr);
610 e2 = ldl_kernel(ptr + 4);
611 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
612 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
613 if (!(e2 & DESC_P_MASK))
614 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
615 load_seg_cache_raw_dt(&env->ldt, e1, e2);
616 }
617
618 /* load the segments */
619 if (!(new_eflags & VM_MASK)) {
620 tss_load_seg(R_CS, new_segs[R_CS]);
621 tss_load_seg(R_SS, new_segs[R_SS]);
622 tss_load_seg(R_ES, new_segs[R_ES]);
623 tss_load_seg(R_DS, new_segs[R_DS]);
624 tss_load_seg(R_FS, new_segs[R_FS]);
625 tss_load_seg(R_GS, new_segs[R_GS]);
626 }
627
628 /* check that EIP is in the CS segment limits */
629 if (new_eip > env->segs[R_CS].limit) {
630 /* XXX: different exception if CALL ? */
631 raise_exception_err(EXCP0D_GPF, 0);
632 }
633
634#ifndef CONFIG_USER_ONLY
635 /* reset local breakpoints */
636 if (env->dr[7] & 0x55) {
637 for (i = 0; i < 4; i++) {
638 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
639 hw_breakpoint_remove(env, i);
640 }
641 env->dr[7] &= ~0x55;
642 }
643#endif
644}
645
646/* check if Port I/O is allowed in TSS */
647static inline void check_io(int addr, int size)
648{
649#ifndef VBOX
650 int io_offset, val, mask;
651#else
652 int val, mask;
653 unsigned int io_offset;
654#endif /* VBOX */
655
656 /* TSS must be a valid 32 bit one */
657 if (!(env->tr.flags & DESC_P_MASK) ||
658 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
659 env->tr.limit < 103)
660 goto fail;
661 io_offset = lduw_kernel(env->tr.base + 0x66);
662 io_offset += (addr >> 3);
663 /* Note: the check needs two bytes */
664 if ((io_offset + 1) > env->tr.limit)
665 goto fail;
666 val = lduw_kernel(env->tr.base + io_offset);
667 val >>= (addr & 7);
668 mask = (1 << size) - 1;
669 /* all bits must be zero to allow the I/O */
670 if ((val & mask) != 0) {
671 fail:
672 raise_exception_err(EXCP0D_GPF, 0);
673 }
674}
675
676#ifdef VBOX
677
678/* Keep in sync with gen_check_external_event() */
679void helper_check_external_event()
680{
681 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
682 | CPU_INTERRUPT_EXTERNAL_TIMER
683 | CPU_INTERRUPT_EXTERNAL_DMA))
684 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
685 && (env->eflags & IF_MASK)
686 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
687 {
688 helper_external_event();
689 }
690
691}
692
693void helper_sync_seg(uint32_t reg)
694{
695 if (env->segs[reg].newselector)
696 sync_seg(env, reg, env->segs[reg].newselector);
697}
698
699#endif /* VBOX */
700
701void helper_check_iob(uint32_t t0)
702{
703 check_io(t0, 1);
704}
705
706void helper_check_iow(uint32_t t0)
707{
708 check_io(t0, 2);
709}
710
711void helper_check_iol(uint32_t t0)
712{
713 check_io(t0, 4);
714}
715
716void helper_outb(uint32_t port, uint32_t data)
717{
718#ifndef VBOX
719 cpu_outb(port, data & 0xff);
720#else
721 cpu_outb(env, port, data & 0xff);
722#endif
723}
724
725target_ulong helper_inb(uint32_t port)
726{
727#ifndef VBOX
728 return cpu_inb(port);
729#else
730 return cpu_inb(env, port);
731#endif
732}
733
734void helper_outw(uint32_t port, uint32_t data)
735{
736#ifndef VBOX
737 cpu_outw(port, data & 0xffff);
738#else
739 cpu_outw(env, port, data & 0xffff);
740#endif
741}
742
743target_ulong helper_inw(uint32_t port)
744{
745#ifndef VBOX
746 return cpu_inw(port);
747#else
748 return cpu_inw(env, port);
749#endif
750}
751
752void helper_outl(uint32_t port, uint32_t data)
753{
754#ifndef VBOX
755 cpu_outl(port, data);
756#else
757 cpu_outl(env, port, data);
758#endif
759}
760
761target_ulong helper_inl(uint32_t port)
762{
763#ifndef VBOX
764 return cpu_inl(port);
765#else
766 return cpu_inl(env, port);
767#endif
768}
769
770static inline unsigned int get_sp_mask(unsigned int e2)
771{
772 if (e2 & DESC_B_MASK)
773 return 0xffffffff;
774 else
775 return 0xffff;
776}
777
778static int exeption_has_error_code(int intno)
779{
780 switch(intno) {
781 case 8:
782 case 10:
783 case 11:
784 case 12:
785 case 13:
786 case 14:
787 case 17:
788 return 1;
789 }
790 return 0;
791}
792
793#ifdef TARGET_X86_64
794#define SET_ESP(val, sp_mask)\
795do {\
796 if ((sp_mask) == 0xffff)\
797 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
798 else if ((sp_mask) == 0xffffffffLL)\
799 ESP = (uint32_t)(val);\
800 else\
801 ESP = (val);\
802} while (0)
803#else
804#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
805#endif
806
807/* in 64-bit machines, this can overflow. So this segment addition macro
808 * can be used to trim the value to 32-bit whenever needed */
809#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
810
811/* XXX: add a is_user flag to have proper security support */
812#define PUSHW(ssp, sp, sp_mask, val)\
813{\
814 sp -= 2;\
815 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
816}
817
818#define PUSHL(ssp, sp, sp_mask, val)\
819{\
820 sp -= 4;\
821 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
822}
823
824#define POPW(ssp, sp, sp_mask, val)\
825{\
826 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
827 sp += 2;\
828}
829
830#define POPL(ssp, sp, sp_mask, val)\
831{\
832 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
833 sp += 4;\
834}
835
836/* protected mode interrupt */
837static void do_interrupt_protected(int intno, int is_int, int error_code,
838 unsigned int next_eip, int is_hw)
839{
840 SegmentCache *dt;
841 target_ulong ptr, ssp;
842 int type, dpl, selector, ss_dpl, cpl;
843 int has_error_code, new_stack, shift;
844 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
845 uint32_t old_eip, sp_mask;
846
847#ifdef VBOX
848 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
849 cpu_loop_exit();
850#endif
851
852 has_error_code = 0;
853 if (!is_int && !is_hw)
854 has_error_code = exeption_has_error_code(intno);
855 if (is_int)
856 old_eip = next_eip;
857 else
858 old_eip = env->eip;
859
860 dt = &env->idt;
861#ifndef VBOX
862 if (intno * 8 + 7 > dt->limit)
863#else
864 if ((unsigned)intno * 8 + 7 > dt->limit)
865#endif
866 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
867 ptr = dt->base + intno * 8;
868 e1 = ldl_kernel(ptr);
869 e2 = ldl_kernel(ptr + 4);
870 /* check gate type */
871 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
872 switch(type) {
873 case 5: /* task gate */
874 /* must do that check here to return the correct error code */
875 if (!(e2 & DESC_P_MASK))
876 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
877 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
878 if (has_error_code) {
879 int type;
880 uint32_t mask;
881 /* push the error code */
882 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
883 shift = type >> 3;
884 if (env->segs[R_SS].flags & DESC_B_MASK)
885 mask = 0xffffffff;
886 else
887 mask = 0xffff;
888 esp = (ESP - (2 << shift)) & mask;
889 ssp = env->segs[R_SS].base + esp;
890 if (shift)
891 stl_kernel(ssp, error_code);
892 else
893 stw_kernel(ssp, error_code);
894 SET_ESP(esp, mask);
895 }
896 return;
897 case 6: /* 286 interrupt gate */
898 case 7: /* 286 trap gate */
899 case 14: /* 386 interrupt gate */
900 case 15: /* 386 trap gate */
901 break;
902 default:
903 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
904 break;
905 }
906 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
907 cpl = env->hflags & HF_CPL_MASK;
908 /* check privilege if software int */
909 if (is_int && dpl < cpl)
910 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
911 /* check valid bit */
912 if (!(e2 & DESC_P_MASK))
913 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
914 selector = e1 >> 16;
915 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
916 if ((selector & 0xfffc) == 0)
917 raise_exception_err(EXCP0D_GPF, 0);
918
919 if (load_segment(&e1, &e2, selector) != 0)
920 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
922 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
923 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
924 if (dpl > cpl)
925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926 if (!(e2 & DESC_P_MASK))
927 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
928 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
929 /* to inner privilege */
930 get_ss_esp_from_tss(&ss, &esp, dpl);
931 if ((ss & 0xfffc) == 0)
932 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
933 if ((ss & 3) != dpl)
934 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
935 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
936 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
937 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
938 if (ss_dpl != dpl)
939 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
940 if (!(ss_e2 & DESC_S_MASK) ||
941 (ss_e2 & DESC_CS_MASK) ||
942 !(ss_e2 & DESC_W_MASK))
943 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
944 if (!(ss_e2 & DESC_P_MASK))
945#ifdef VBOX /* See page 3-477 of 253666.pdf */
946 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
947#else
948 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
949#endif
950 new_stack = 1;
951 sp_mask = get_sp_mask(ss_e2);
952 ssp = get_seg_base(ss_e1, ss_e2);
953#if defined(VBOX) && defined(DEBUG)
954 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
955#endif
956 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
957 /* to same privilege */
958 if (env->eflags & VM_MASK)
959 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960 new_stack = 0;
961 sp_mask = get_sp_mask(env->segs[R_SS].flags);
962 ssp = env->segs[R_SS].base;
963 esp = ESP;
964 dpl = cpl;
965 } else {
966 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
967 new_stack = 0; /* avoid warning */
968 sp_mask = 0; /* avoid warning */
969 ssp = 0; /* avoid warning */
970 esp = 0; /* avoid warning */
971 }
972
973 shift = type >> 3;
974
975#if 0
976 /* XXX: check that enough room is available */
977 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
978 if (env->eflags & VM_MASK)
979 push_size += 8;
980 push_size <<= shift;
981#endif
982 if (shift == 1) {
983 if (new_stack) {
984 if (env->eflags & VM_MASK) {
985 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
986 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
987 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
988 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
989 }
990 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
991 PUSHL(ssp, esp, sp_mask, ESP);
992 }
993 PUSHL(ssp, esp, sp_mask, compute_eflags());
994 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
995 PUSHL(ssp, esp, sp_mask, old_eip);
996 if (has_error_code) {
997 PUSHL(ssp, esp, sp_mask, error_code);
998 }
999 } else {
1000 if (new_stack) {
1001 if (env->eflags & VM_MASK) {
1002 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1003 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1004 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1005 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1006 }
1007 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1008 PUSHW(ssp, esp, sp_mask, ESP);
1009 }
1010 PUSHW(ssp, esp, sp_mask, compute_eflags());
1011 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1012 PUSHW(ssp, esp, sp_mask, old_eip);
1013 if (has_error_code) {
1014 PUSHW(ssp, esp, sp_mask, error_code);
1015 }
1016 }
1017
1018 if (new_stack) {
1019 if (env->eflags & VM_MASK) {
1020 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1021 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1022 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1023 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1024 }
1025 ss = (ss & ~3) | dpl;
1026 cpu_x86_load_seg_cache(env, R_SS, ss,
1027 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1028 }
1029 SET_ESP(esp, sp_mask);
1030
1031 selector = (selector & ~3) | dpl;
1032 cpu_x86_load_seg_cache(env, R_CS, selector,
1033 get_seg_base(e1, e2),
1034 get_seg_limit(e1, e2),
1035 e2);
1036 cpu_x86_set_cpl(env, dpl);
1037 env->eip = offset;
1038
1039 /* interrupt gate clear IF mask */
1040 if ((type & 1) == 0) {
1041 env->eflags &= ~IF_MASK;
1042 }
1043#ifndef VBOX
1044 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1045#else
1046 /*
1047 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1048 * gets confused by seemingly changed EFLAGS. See #3491 and
1049 * public bug #2341.
1050 */
1051 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1052#endif
1053}
1054
1055#ifdef VBOX
1056
1057/* check if VME interrupt redirection is enabled in TSS */
1058DECLINLINE(bool) is_vme_irq_redirected(int intno)
1059{
1060 unsigned int io_offset, intredir_offset;
1061 unsigned char val, mask;
1062
1063 /* TSS must be a valid 32 bit one */
1064 if (!(env->tr.flags & DESC_P_MASK) ||
1065 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1066 env->tr.limit < 103)
1067 goto fail;
1068 io_offset = lduw_kernel(env->tr.base + 0x66);
1069 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1070 if (io_offset < 0x68 + 0x20)
1071 io_offset = 0x68 + 0x20;
1072 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1073 intredir_offset = io_offset - 0x20;
1074
1075 intredir_offset += (intno >> 3);
1076 if ((intredir_offset) > env->tr.limit)
1077 goto fail;
1078
1079 val = ldub_kernel(env->tr.base + intredir_offset);
1080 mask = 1 << (unsigned char)(intno & 7);
1081
1082 /* bit set means no redirection. */
1083 if ((val & mask) != 0) {
1084 return false;
1085 }
1086 return true;
1087
1088fail:
1089 raise_exception_err(EXCP0D_GPF, 0);
1090 return true;
1091}
1092
1093/* V86 mode software interrupt with CR4.VME=1 */
1094static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1095{
1096 target_ulong ptr, ssp;
1097 int selector;
1098 uint32_t offset, esp;
1099 uint32_t old_cs, old_eflags;
1100 uint32_t iopl;
1101
1102 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1103
1104 if (!is_vme_irq_redirected(intno))
1105 {
1106 if (iopl == 3)
1107 {
1108 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1109 return;
1110 }
1111 else
1112 raise_exception_err(EXCP0D_GPF, 0);
1113 }
1114
1115 /* virtual mode idt is at linear address 0 */
1116 ptr = 0 + intno * 4;
1117 offset = lduw_kernel(ptr);
1118 selector = lduw_kernel(ptr + 2);
1119 esp = ESP;
1120 ssp = env->segs[R_SS].base;
1121 old_cs = env->segs[R_CS].selector;
1122
1123 old_eflags = compute_eflags();
1124 if (iopl < 3)
1125 {
1126 /* copy VIF into IF and set IOPL to 3 */
1127 if (env->eflags & VIF_MASK)
1128 old_eflags |= IF_MASK;
1129 else
1130 old_eflags &= ~IF_MASK;
1131
1132 old_eflags |= (3 << IOPL_SHIFT);
1133 }
1134
1135 /* XXX: use SS segment size ? */
1136 PUSHW(ssp, esp, 0xffff, old_eflags);
1137 PUSHW(ssp, esp, 0xffff, old_cs);
1138 PUSHW(ssp, esp, 0xffff, next_eip);
1139
1140 /* update processor state */
1141 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1142 env->eip = offset;
1143 env->segs[R_CS].selector = selector;
1144 env->segs[R_CS].base = (selector << 4);
1145 env->eflags &= ~(TF_MASK | RF_MASK);
1146
1147 if (iopl < 3)
1148 env->eflags &= ~VIF_MASK;
1149 else
1150 env->eflags &= ~IF_MASK;
1151}
1152
1153#endif /* VBOX */
1154
1155#ifdef TARGET_X86_64
1156
1157#define PUSHQ(sp, val)\
1158{\
1159 sp -= 8;\
1160 stq_kernel(sp, (val));\
1161}
1162
1163#define POPQ(sp, val)\
1164{\
1165 val = ldq_kernel(sp);\
1166 sp += 8;\
1167}
1168
1169static inline target_ulong get_rsp_from_tss(int level)
1170{
1171 int index;
1172
1173#if 0
1174 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1175 env->tr.base, env->tr.limit);
1176#endif
1177
1178 if (!(env->tr.flags & DESC_P_MASK))
1179 cpu_abort(env, "invalid tss");
1180 index = 8 * level + 4;
1181 if ((index + 7) > env->tr.limit)
1182 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1183 return ldq_kernel(env->tr.base + index);
1184}
1185
1186/* 64 bit interrupt */
1187static void do_interrupt64(int intno, int is_int, int error_code,
1188 target_ulong next_eip, int is_hw)
1189{
1190 SegmentCache *dt;
1191 target_ulong ptr;
1192 int type, dpl, selector, cpl, ist;
1193 int has_error_code, new_stack;
1194 uint32_t e1, e2, e3, ss;
1195 target_ulong old_eip, esp, offset;
1196
1197#ifdef VBOX
1198 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1199 cpu_loop_exit();
1200#endif
1201
1202 has_error_code = 0;
1203 if (!is_int && !is_hw)
1204 has_error_code = exeption_has_error_code(intno);
1205 if (is_int)
1206 old_eip = next_eip;
1207 else
1208 old_eip = env->eip;
1209
1210 dt = &env->idt;
1211 if (intno * 16 + 15 > dt->limit)
1212 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1213 ptr = dt->base + intno * 16;
1214 e1 = ldl_kernel(ptr);
1215 e2 = ldl_kernel(ptr + 4);
1216 e3 = ldl_kernel(ptr + 8);
1217 /* check gate type */
1218 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1219 switch(type) {
1220 case 14: /* 386 interrupt gate */
1221 case 15: /* 386 trap gate */
1222 break;
1223 default:
1224 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1225 break;
1226 }
1227 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1228 cpl = env->hflags & HF_CPL_MASK;
1229 /* check privilege if software int */
1230 if (is_int && dpl < cpl)
1231 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1232 /* check valid bit */
1233 if (!(e2 & DESC_P_MASK))
1234 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1235 selector = e1 >> 16;
1236 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1237 ist = e2 & 7;
1238 if ((selector & 0xfffc) == 0)
1239 raise_exception_err(EXCP0D_GPF, 0);
1240
1241 if (load_segment(&e1, &e2, selector) != 0)
1242 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1243 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1244 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1245 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1246 if (dpl > cpl)
1247 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1248 if (!(e2 & DESC_P_MASK))
1249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1250 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1251 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1252 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1253 /* to inner privilege */
1254 if (ist != 0)
1255 esp = get_rsp_from_tss(ist + 3);
1256 else
1257 esp = get_rsp_from_tss(dpl);
1258 esp &= ~0xfLL; /* align stack */
1259 ss = 0;
1260 new_stack = 1;
1261 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1262 /* to same privilege */
1263 if (env->eflags & VM_MASK)
1264 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1265 new_stack = 0;
1266 if (ist != 0)
1267 esp = get_rsp_from_tss(ist + 3);
1268 else
1269 esp = ESP;
1270 esp &= ~0xfLL; /* align stack */
1271 dpl = cpl;
1272 } else {
1273 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1274 new_stack = 0; /* avoid warning */
1275 esp = 0; /* avoid warning */
1276 }
1277
1278 PUSHQ(esp, env->segs[R_SS].selector);
1279 PUSHQ(esp, ESP);
1280 PUSHQ(esp, compute_eflags());
1281 PUSHQ(esp, env->segs[R_CS].selector);
1282 PUSHQ(esp, old_eip);
1283 if (has_error_code) {
1284 PUSHQ(esp, error_code);
1285 }
1286
1287 if (new_stack) {
1288 ss = 0 | dpl;
1289 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1290 }
1291 ESP = esp;
1292
1293 selector = (selector & ~3) | dpl;
1294 cpu_x86_load_seg_cache(env, R_CS, selector,
1295 get_seg_base(e1, e2),
1296 get_seg_limit(e1, e2),
1297 e2);
1298 cpu_x86_set_cpl(env, dpl);
1299 env->eip = offset;
1300
1301 /* interrupt gate clear IF mask */
1302 if ((type & 1) == 0) {
1303 env->eflags &= ~IF_MASK;
1304 }
1305#ifndef VBOX
1306 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1307#else /* VBOX */
1308 /*
1309 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1310 * gets confused by seemingly changed EFLAGS. See #3491 and
1311 * public bug #2341.
1312 */
1313 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1314#endif /* VBOX */
1315}
1316#endif
1317
1318#ifdef TARGET_X86_64
1319#if defined(CONFIG_USER_ONLY)
1320void helper_syscall(int next_eip_addend)
1321{
1322 env->exception_index = EXCP_SYSCALL;
1323 env->exception_next_eip = env->eip + next_eip_addend;
1324 cpu_loop_exit();
1325}
1326#else
1327void helper_syscall(int next_eip_addend)
1328{
1329 int selector;
1330
1331 if (!(env->efer & MSR_EFER_SCE)) {
1332 raise_exception_err(EXCP06_ILLOP, 0);
1333 }
1334 selector = (env->star >> 32) & 0xffff;
1335 if (env->hflags & HF_LMA_MASK) {
1336 int code64;
1337
1338 ECX = env->eip + next_eip_addend;
1339 env->regs[11] = compute_eflags();
1340
1341 code64 = env->hflags & HF_CS64_MASK;
1342
1343 cpu_x86_set_cpl(env, 0);
1344 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1345 0, 0xffffffff,
1346 DESC_G_MASK | DESC_P_MASK |
1347 DESC_S_MASK |
1348 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1349 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1350 0, 0xffffffff,
1351 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1352 DESC_S_MASK |
1353 DESC_W_MASK | DESC_A_MASK);
1354 env->eflags &= ~env->fmask;
1355 load_eflags(env->eflags, 0);
1356 if (code64)
1357 env->eip = env->lstar;
1358 else
1359 env->eip = env->cstar;
1360 } else {
1361 ECX = (uint32_t)(env->eip + next_eip_addend);
1362
1363 cpu_x86_set_cpl(env, 0);
1364 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1365 0, 0xffffffff,
1366 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1367 DESC_S_MASK |
1368 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1369 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1370 0, 0xffffffff,
1371 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1372 DESC_S_MASK |
1373 DESC_W_MASK | DESC_A_MASK);
1374 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1375 env->eip = (uint32_t)env->star;
1376 }
1377}
1378#endif
1379#endif
1380
1381#ifdef TARGET_X86_64
1382void helper_sysret(int dflag)
1383{
1384 int cpl, selector;
1385
1386 if (!(env->efer & MSR_EFER_SCE)) {
1387 raise_exception_err(EXCP06_ILLOP, 0);
1388 }
1389 cpl = env->hflags & HF_CPL_MASK;
1390 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1391 raise_exception_err(EXCP0D_GPF, 0);
1392 }
1393 selector = (env->star >> 48) & 0xffff;
1394 if (env->hflags & HF_LMA_MASK) {
1395 if (dflag == 2) {
1396 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1397 0, 0xffffffff,
1398 DESC_G_MASK | DESC_P_MASK |
1399 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1400 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1401 DESC_L_MASK);
1402 env->eip = ECX;
1403 } else {
1404 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1405 0, 0xffffffff,
1406 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1407 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1408 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1409 env->eip = (uint32_t)ECX;
1410 }
1411 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1412 0, 0xffffffff,
1413 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1414 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1415 DESC_W_MASK | DESC_A_MASK);
1416 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1417 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1418 cpu_x86_set_cpl(env, 3);
1419 } else {
1420 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1421 0, 0xffffffff,
1422 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1423 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1424 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1425 env->eip = (uint32_t)ECX;
1426 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1427 0, 0xffffffff,
1428 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1429 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1430 DESC_W_MASK | DESC_A_MASK);
1431 env->eflags |= IF_MASK;
1432 cpu_x86_set_cpl(env, 3);
1433 }
1434}
1435#endif
1436
1437#ifdef VBOX
1438
1439/**
1440 * Checks and processes external VMM events.
1441 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1442 */
1443void helper_external_event(void)
1444{
1445# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1446 uintptr_t uSP;
1447# ifdef RT_ARCH_AMD64
1448 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1449# else
1450 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1451# endif
1452 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1453# endif
1454 /* Keep in sync with flags checked by gen_check_external_event() */
1455 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1456 {
1457 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1458 ~CPU_INTERRUPT_EXTERNAL_HARD);
1459 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1460 }
1461 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1462 {
1463 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1464 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1465 cpu_exit(env);
1466 }
1467 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1468 {
1469 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1470 ~CPU_INTERRUPT_EXTERNAL_DMA);
1471 remR3DmaRun(env);
1472 }
1473 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1474 {
1475 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1476 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1477 remR3TimersRun(env);
1478 }
1479}
1480
1481/* helper for recording call instruction addresses for later scanning */
1482void helper_record_call()
1483{
1484 if ( !(env->state & CPU_RAW_RING0)
1485 && (env->cr[0] & CR0_PG_MASK)
1486 && !(env->eflags & X86_EFL_IF))
1487 remR3RecordCall(env);
1488}
1489
1490#endif /* VBOX */
1491
1492/* real mode interrupt */
1493static void do_interrupt_real(int intno, int is_int, int error_code,
1494 unsigned int next_eip)
1495{
1496 SegmentCache *dt;
1497 target_ulong ptr, ssp;
1498 int selector;
1499 uint32_t offset, esp;
1500 uint32_t old_cs, old_eip;
1501
1502 /* real mode (simpler !) */
1503 dt = &env->idt;
1504#ifndef VBOX
1505 if (intno * 4 + 3 > dt->limit)
1506#else
1507 if ((unsigned)intno * 4 + 3 > dt->limit)
1508#endif
1509 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1510 ptr = dt->base + intno * 4;
1511 offset = lduw_kernel(ptr);
1512 selector = lduw_kernel(ptr + 2);
1513 esp = ESP;
1514 ssp = env->segs[R_SS].base;
1515 if (is_int)
1516 old_eip = next_eip;
1517 else
1518 old_eip = env->eip;
1519 old_cs = env->segs[R_CS].selector;
1520 /* XXX: use SS segment size ? */
1521 PUSHW(ssp, esp, 0xffff, compute_eflags());
1522 PUSHW(ssp, esp, 0xffff, old_cs);
1523 PUSHW(ssp, esp, 0xffff, old_eip);
1524
1525 /* update processor state */
1526 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1527 env->eip = offset;
1528 env->segs[R_CS].selector = selector;
1529 env->segs[R_CS].base = (selector << 4);
1530 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1531}
1532
1533/* fake user mode interrupt */
1534void do_interrupt_user(int intno, int is_int, int error_code,
1535 target_ulong next_eip)
1536{
1537 SegmentCache *dt;
1538 target_ulong ptr;
1539 int dpl, cpl, shift;
1540 uint32_t e2;
1541
1542 dt = &env->idt;
1543 if (env->hflags & HF_LMA_MASK) {
1544 shift = 4;
1545 } else {
1546 shift = 3;
1547 }
1548 ptr = dt->base + (intno << shift);
1549 e2 = ldl_kernel(ptr + 4);
1550
1551 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1552 cpl = env->hflags & HF_CPL_MASK;
1553 /* check privilege if software int */
1554 if (is_int && dpl < cpl)
1555 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1556
1557 /* Since we emulate only user space, we cannot do more than
1558 exiting the emulation with the suitable exception and error
1559 code */
1560 if (is_int)
1561 EIP = next_eip;
1562}
1563
1564#if !defined(CONFIG_USER_ONLY)
1565static void handle_even_inj(int intno, int is_int, int error_code,
1566 int is_hw, int rm)
1567{
1568 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1569 if (!(event_inj & SVM_EVTINJ_VALID)) {
1570 int type;
1571 if (is_int)
1572 type = SVM_EVTINJ_TYPE_SOFT;
1573 else
1574 type = SVM_EVTINJ_TYPE_EXEPT;
1575 event_inj = intno | type | SVM_EVTINJ_VALID;
1576 if (!rm && exeption_has_error_code(intno)) {
1577 event_inj |= SVM_EVTINJ_VALID_ERR;
1578 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1579 }
1580 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1581 }
1582}
1583#endif
1584
1585/*
1586 * Begin execution of an interruption. is_int is TRUE if coming from
1587 * the int instruction. next_eip is the EIP value AFTER the interrupt
1588 * instruction. It is only relevant if is_int is TRUE.
1589 */
1590void do_interrupt(int intno, int is_int, int error_code,
1591 target_ulong next_eip, int is_hw)
1592{
1593 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1594 if ((env->cr[0] & CR0_PE_MASK)) {
1595 static int count;
1596 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1597 count, intno, error_code, is_int,
1598 env->hflags & HF_CPL_MASK,
1599 env->segs[R_CS].selector, EIP,
1600 (int)env->segs[R_CS].base + EIP,
1601 env->segs[R_SS].selector, ESP);
1602 if (intno == 0x0e) {
1603 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1604 } else {
1605 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1606 }
1607 qemu_log("\n");
1608 log_cpu_state(env, X86_DUMP_CCOP);
1609#if 0
1610 {
1611 int i;
1612 uint8_t *ptr;
1613 qemu_log(" code=");
1614 ptr = env->segs[R_CS].base + env->eip;
1615 for(i = 0; i < 16; i++) {
1616 qemu_log(" %02x", ldub(ptr + i));
1617 }
1618 qemu_log("\n");
1619 }
1620#endif
1621 count++;
1622 }
1623 }
1624#ifdef VBOX
1625 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1626 if (is_int) {
1627 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1628 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1629 } else {
1630 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1631 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1632 }
1633 }
1634#endif
1635 if (env->cr[0] & CR0_PE_MASK) {
1636#if !defined(CONFIG_USER_ONLY)
1637 if (env->hflags & HF_SVMI_MASK)
1638 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1639#endif
1640#ifdef TARGET_X86_64
1641 if (env->hflags & HF_LMA_MASK) {
1642 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1643 } else
1644#endif
1645 {
1646#ifdef VBOX
1647 /* int xx *, v86 code and VME enabled? */
1648 if ( (env->eflags & VM_MASK)
1649 && (env->cr[4] & CR4_VME_MASK)
1650 && is_int
1651 && !is_hw
1652 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1653 )
1654 do_soft_interrupt_vme(intno, error_code, next_eip);
1655 else
1656#endif /* VBOX */
1657 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1658 }
1659 } else {
1660#if !defined(CONFIG_USER_ONLY)
1661 if (env->hflags & HF_SVMI_MASK)
1662 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1663#endif
1664 do_interrupt_real(intno, is_int, error_code, next_eip);
1665 }
1666
1667#if !defined(CONFIG_USER_ONLY)
1668 if (env->hflags & HF_SVMI_MASK) {
1669 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1670 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1671 }
1672#endif
1673}
1674
1675/* This should come from sysemu.h - if we could include it here... */
1676void qemu_system_reset_request(void);
1677
1678/*
1679 * Check nested exceptions and change to double or triple fault if
1680 * needed. It should only be called, if this is not an interrupt.
1681 * Returns the new exception number.
1682 */
1683static int check_exception(int intno, int *error_code)
1684{
1685 int first_contributory = env->old_exception == 0 ||
1686 (env->old_exception >= 10 &&
1687 env->old_exception <= 13);
1688 int second_contributory = intno == 0 ||
1689 (intno >= 10 && intno <= 13);
1690
1691 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1692 env->old_exception, intno);
1693
1694#if !defined(CONFIG_USER_ONLY)
1695 if (env->old_exception == EXCP08_DBLE) {
1696 if (env->hflags & HF_SVMI_MASK)
1697 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1698
1699 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1700
1701# ifndef VBOX
1702 qemu_system_reset_request();
1703# else
1704 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1705# endif
1706 return EXCP_HLT;
1707 }
1708#endif
1709
1710 if ((first_contributory && second_contributory)
1711 || (env->old_exception == EXCP0E_PAGE &&
1712 (second_contributory || (intno == EXCP0E_PAGE)))) {
1713 intno = EXCP08_DBLE;
1714 *error_code = 0;
1715 }
1716
1717 if (second_contributory || (intno == EXCP0E_PAGE) ||
1718 (intno == EXCP08_DBLE))
1719 env->old_exception = intno;
1720
1721 return intno;
1722}
1723
1724/*
1725 * Signal an interruption. It is executed in the main CPU loop.
1726 * is_int is TRUE if coming from the int instruction. next_eip is the
1727 * EIP value AFTER the interrupt instruction. It is only relevant if
1728 * is_int is TRUE.
1729 */
1730static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1731 int next_eip_addend)
1732{
1733#if defined(VBOX) && defined(DEBUG)
1734 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1735#endif
1736 if (!is_int) {
1737 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1738 intno = check_exception(intno, &error_code);
1739 } else {
1740 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1741 }
1742
1743 env->exception_index = intno;
1744 env->error_code = error_code;
1745 env->exception_is_int = is_int;
1746 env->exception_next_eip = env->eip + next_eip_addend;
1747 cpu_loop_exit();
1748}
1749
1750/* shortcuts to generate exceptions */
1751
1752void raise_exception_err(int exception_index, int error_code)
1753{
1754 raise_interrupt(exception_index, 0, error_code, 0);
1755}
1756
1757void raise_exception(int exception_index)
1758{
1759 raise_interrupt(exception_index, 0, 0, 0);
1760}
1761
1762void raise_exception_env(int exception_index, CPUState *nenv)
1763{
1764 env = nenv;
1765 raise_exception(exception_index);
1766}
1767/* SMM support */
1768
1769#if defined(CONFIG_USER_ONLY)
1770
1771void do_smm_enter(void)
1772{
1773}
1774
1775void helper_rsm(void)
1776{
1777}
1778
1779#else
1780
1781#ifdef TARGET_X86_64
1782#define SMM_REVISION_ID 0x00020064
1783#else
1784#define SMM_REVISION_ID 0x00020000
1785#endif
1786
1787void do_smm_enter(void)
1788{
1789 target_ulong sm_state;
1790 SegmentCache *dt;
1791 int i, offset;
1792
1793 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1794 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1795
1796 env->hflags |= HF_SMM_MASK;
1797 cpu_smm_update(env);
1798
1799 sm_state = env->smbase + 0x8000;
1800
1801#ifdef TARGET_X86_64
1802 for(i = 0; i < 6; i++) {
1803 dt = &env->segs[i];
1804 offset = 0x7e00 + i * 16;
1805 stw_phys(sm_state + offset, dt->selector);
1806 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1807 stl_phys(sm_state + offset + 4, dt->limit);
1808 stq_phys(sm_state + offset + 8, dt->base);
1809 }
1810
1811 stq_phys(sm_state + 0x7e68, env->gdt.base);
1812 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1813
1814 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1815 stq_phys(sm_state + 0x7e78, env->ldt.base);
1816 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1817 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1818
1819 stq_phys(sm_state + 0x7e88, env->idt.base);
1820 stl_phys(sm_state + 0x7e84, env->idt.limit);
1821
1822 stw_phys(sm_state + 0x7e90, env->tr.selector);
1823 stq_phys(sm_state + 0x7e98, env->tr.base);
1824 stl_phys(sm_state + 0x7e94, env->tr.limit);
1825 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1826
1827 stq_phys(sm_state + 0x7ed0, env->efer);
1828
1829 stq_phys(sm_state + 0x7ff8, EAX);
1830 stq_phys(sm_state + 0x7ff0, ECX);
1831 stq_phys(sm_state + 0x7fe8, EDX);
1832 stq_phys(sm_state + 0x7fe0, EBX);
1833 stq_phys(sm_state + 0x7fd8, ESP);
1834 stq_phys(sm_state + 0x7fd0, EBP);
1835 stq_phys(sm_state + 0x7fc8, ESI);
1836 stq_phys(sm_state + 0x7fc0, EDI);
1837 for(i = 8; i < 16; i++)
1838 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1839 stq_phys(sm_state + 0x7f78, env->eip);
1840 stl_phys(sm_state + 0x7f70, compute_eflags());
1841 stl_phys(sm_state + 0x7f68, env->dr[6]);
1842 stl_phys(sm_state + 0x7f60, env->dr[7]);
1843
1844 stl_phys(sm_state + 0x7f48, env->cr[4]);
1845 stl_phys(sm_state + 0x7f50, env->cr[3]);
1846 stl_phys(sm_state + 0x7f58, env->cr[0]);
1847
1848 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1849 stl_phys(sm_state + 0x7f00, env->smbase);
1850#else
1851 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1852 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1853 stl_phys(sm_state + 0x7ff4, compute_eflags());
1854 stl_phys(sm_state + 0x7ff0, env->eip);
1855 stl_phys(sm_state + 0x7fec, EDI);
1856 stl_phys(sm_state + 0x7fe8, ESI);
1857 stl_phys(sm_state + 0x7fe4, EBP);
1858 stl_phys(sm_state + 0x7fe0, ESP);
1859 stl_phys(sm_state + 0x7fdc, EBX);
1860 stl_phys(sm_state + 0x7fd8, EDX);
1861 stl_phys(sm_state + 0x7fd4, ECX);
1862 stl_phys(sm_state + 0x7fd0, EAX);
1863 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1864 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1865
1866 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1867 stl_phys(sm_state + 0x7f64, env->tr.base);
1868 stl_phys(sm_state + 0x7f60, env->tr.limit);
1869 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1870
1871 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1872 stl_phys(sm_state + 0x7f80, env->ldt.base);
1873 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1874 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1875
1876 stl_phys(sm_state + 0x7f74, env->gdt.base);
1877 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1878
1879 stl_phys(sm_state + 0x7f58, env->idt.base);
1880 stl_phys(sm_state + 0x7f54, env->idt.limit);
1881
1882 for(i = 0; i < 6; i++) {
1883 dt = &env->segs[i];
1884 if (i < 3)
1885 offset = 0x7f84 + i * 12;
1886 else
1887 offset = 0x7f2c + (i - 3) * 12;
1888 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1889 stl_phys(sm_state + offset + 8, dt->base);
1890 stl_phys(sm_state + offset + 4, dt->limit);
1891 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1892 }
1893 stl_phys(sm_state + 0x7f14, env->cr[4]);
1894
1895 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1896 stl_phys(sm_state + 0x7ef8, env->smbase);
1897#endif
1898 /* init SMM cpu state */
1899
1900#ifdef TARGET_X86_64
1901 cpu_load_efer(env, 0);
1902#endif
1903 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1904 env->eip = 0x00008000;
1905 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1906 0xffffffff, 0);
1907 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1908 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1909 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1910 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1911 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1912
1913 cpu_x86_update_cr0(env,
1914 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1915 cpu_x86_update_cr4(env, 0);
1916 env->dr[7] = 0x00000400;
1917 CC_OP = CC_OP_EFLAGS;
1918}
1919
1920void helper_rsm(void)
1921{
1922#ifdef VBOX
1923 cpu_abort(env, "helper_rsm");
1924#else /* !VBOX */
1925 target_ulong sm_state;
1926 int i, offset;
1927 uint32_t val;
1928
1929 sm_state = env->smbase + 0x8000;
1930#ifdef TARGET_X86_64
1931 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1932
1933 for(i = 0; i < 6; i++) {
1934 offset = 0x7e00 + i * 16;
1935 cpu_x86_load_seg_cache(env, i,
1936 lduw_phys(sm_state + offset),
1937 ldq_phys(sm_state + offset + 8),
1938 ldl_phys(sm_state + offset + 4),
1939 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1940 }
1941
1942 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1943 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1944
1945 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1946 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1947 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1948 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1949
1950 env->idt.base = ldq_phys(sm_state + 0x7e88);
1951 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1952
1953 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1954 env->tr.base = ldq_phys(sm_state + 0x7e98);
1955 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1956 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1957
1958 EAX = ldq_phys(sm_state + 0x7ff8);
1959 ECX = ldq_phys(sm_state + 0x7ff0);
1960 EDX = ldq_phys(sm_state + 0x7fe8);
1961 EBX = ldq_phys(sm_state + 0x7fe0);
1962 ESP = ldq_phys(sm_state + 0x7fd8);
1963 EBP = ldq_phys(sm_state + 0x7fd0);
1964 ESI = ldq_phys(sm_state + 0x7fc8);
1965 EDI = ldq_phys(sm_state + 0x7fc0);
1966 for(i = 8; i < 16; i++)
1967 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1968 env->eip = ldq_phys(sm_state + 0x7f78);
1969 load_eflags(ldl_phys(sm_state + 0x7f70),
1970 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1971 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1972 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1973
1974 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1975 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1976 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1977
1978 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1979 if (val & 0x20000) {
1980 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1981 }
1982#else
1983 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1984 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1985 load_eflags(ldl_phys(sm_state + 0x7ff4),
1986 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1987 env->eip = ldl_phys(sm_state + 0x7ff0);
1988 EDI = ldl_phys(sm_state + 0x7fec);
1989 ESI = ldl_phys(sm_state + 0x7fe8);
1990 EBP = ldl_phys(sm_state + 0x7fe4);
1991 ESP = ldl_phys(sm_state + 0x7fe0);
1992 EBX = ldl_phys(sm_state + 0x7fdc);
1993 EDX = ldl_phys(sm_state + 0x7fd8);
1994 ECX = ldl_phys(sm_state + 0x7fd4);
1995 EAX = ldl_phys(sm_state + 0x7fd0);
1996 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1997 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1998
1999 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2000 env->tr.base = ldl_phys(sm_state + 0x7f64);
2001 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2002 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2003
2004 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2005 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2006 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2007 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2008
2009 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2010 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2011
2012 env->idt.base = ldl_phys(sm_state + 0x7f58);
2013 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2014
2015 for(i = 0; i < 6; i++) {
2016 if (i < 3)
2017 offset = 0x7f84 + i * 12;
2018 else
2019 offset = 0x7f2c + (i - 3) * 12;
2020 cpu_x86_load_seg_cache(env, i,
2021 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2022 ldl_phys(sm_state + offset + 8),
2023 ldl_phys(sm_state + offset + 4),
2024 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2025 }
2026 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2027
2028 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2029 if (val & 0x20000) {
2030 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2031 }
2032#endif
2033 CC_OP = CC_OP_EFLAGS;
2034 env->hflags &= ~HF_SMM_MASK;
2035 cpu_smm_update(env);
2036
2037 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2038 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2039#endif /* !VBOX */
2040}
2041
2042#endif /* !CONFIG_USER_ONLY */
2043
2044
2045/* division, flags are undefined */
2046
2047void helper_divb_AL(target_ulong t0)
2048{
2049 unsigned int num, den, q, r;
2050
2051 num = (EAX & 0xffff);
2052 den = (t0 & 0xff);
2053 if (den == 0) {
2054 raise_exception(EXCP00_DIVZ);
2055 }
2056 q = (num / den);
2057 if (q > 0xff)
2058 raise_exception(EXCP00_DIVZ);
2059 q &= 0xff;
2060 r = (num % den) & 0xff;
2061 EAX = (EAX & ~0xffff) | (r << 8) | q;
2062}
2063
2064void helper_idivb_AL(target_ulong t0)
2065{
2066 int num, den, q, r;
2067
2068 num = (int16_t)EAX;
2069 den = (int8_t)t0;
2070 if (den == 0) {
2071 raise_exception(EXCP00_DIVZ);
2072 }
2073 q = (num / den);
2074 if (q != (int8_t)q)
2075 raise_exception(EXCP00_DIVZ);
2076 q &= 0xff;
2077 r = (num % den) & 0xff;
2078 EAX = (EAX & ~0xffff) | (r << 8) | q;
2079}
2080
2081void helper_divw_AX(target_ulong t0)
2082{
2083 unsigned int num, den, q, r;
2084
2085 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2086 den = (t0 & 0xffff);
2087 if (den == 0) {
2088 raise_exception(EXCP00_DIVZ);
2089 }
2090 q = (num / den);
2091 if (q > 0xffff)
2092 raise_exception(EXCP00_DIVZ);
2093 q &= 0xffff;
2094 r = (num % den) & 0xffff;
2095 EAX = (EAX & ~0xffff) | q;
2096 EDX = (EDX & ~0xffff) | r;
2097}
2098
2099void helper_idivw_AX(target_ulong t0)
2100{
2101 int num, den, q, r;
2102
2103 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2104 den = (int16_t)t0;
2105 if (den == 0) {
2106 raise_exception(EXCP00_DIVZ);
2107 }
2108 q = (num / den);
2109 if (q != (int16_t)q)
2110 raise_exception(EXCP00_DIVZ);
2111 q &= 0xffff;
2112 r = (num % den) & 0xffff;
2113 EAX = (EAX & ~0xffff) | q;
2114 EDX = (EDX & ~0xffff) | r;
2115}
2116
2117void helper_divl_EAX(target_ulong t0)
2118{
2119 unsigned int den, r;
2120 uint64_t num, q;
2121
2122 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2123 den = t0;
2124 if (den == 0) {
2125 raise_exception(EXCP00_DIVZ);
2126 }
2127 q = (num / den);
2128 r = (num % den);
2129 if (q > 0xffffffff)
2130 raise_exception(EXCP00_DIVZ);
2131 EAX = (uint32_t)q;
2132 EDX = (uint32_t)r;
2133}
2134
2135void helper_idivl_EAX(target_ulong t0)
2136{
2137 int den, r;
2138 int64_t num, q;
2139
2140 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2141 den = t0;
2142 if (den == 0) {
2143 raise_exception(EXCP00_DIVZ);
2144 }
2145 q = (num / den);
2146 r = (num % den);
2147 if (q != (int32_t)q)
2148 raise_exception(EXCP00_DIVZ);
2149 EAX = (uint32_t)q;
2150 EDX = (uint32_t)r;
2151}
2152
2153/* bcd */
2154
2155/* XXX: exception */
2156void helper_aam(int base)
2157{
2158 int al, ah;
2159 al = EAX & 0xff;
2160 ah = al / base;
2161 al = al % base;
2162 EAX = (EAX & ~0xffff) | al | (ah << 8);
2163 CC_DST = al;
2164}
2165
2166void helper_aad(int base)
2167{
2168 int al, ah;
2169 al = EAX & 0xff;
2170 ah = (EAX >> 8) & 0xff;
2171 al = ((ah * base) + al) & 0xff;
2172 EAX = (EAX & ~0xffff) | al;
2173 CC_DST = al;
2174}
2175
2176void helper_aaa(void)
2177{
2178 int icarry;
2179 int al, ah, af;
2180 int eflags;
2181
2182 eflags = helper_cc_compute_all(CC_OP);
2183 af = eflags & CC_A;
2184 al = EAX & 0xff;
2185 ah = (EAX >> 8) & 0xff;
2186
2187 icarry = (al > 0xf9);
2188 if (((al & 0x0f) > 9 ) || af) {
2189 al = (al + 6) & 0x0f;
2190 ah = (ah + 1 + icarry) & 0xff;
2191 eflags |= CC_C | CC_A;
2192 } else {
2193 eflags &= ~(CC_C | CC_A);
2194 al &= 0x0f;
2195 }
2196 EAX = (EAX & ~0xffff) | al | (ah << 8);
2197 CC_SRC = eflags;
2198}
2199
2200void helper_aas(void)
2201{
2202 int icarry;
2203 int al, ah, af;
2204 int eflags;
2205
2206 eflags = helper_cc_compute_all(CC_OP);
2207 af = eflags & CC_A;
2208 al = EAX & 0xff;
2209 ah = (EAX >> 8) & 0xff;
2210
2211 icarry = (al < 6);
2212 if (((al & 0x0f) > 9 ) || af) {
2213 al = (al - 6) & 0x0f;
2214 ah = (ah - 1 - icarry) & 0xff;
2215 eflags |= CC_C | CC_A;
2216 } else {
2217 eflags &= ~(CC_C | CC_A);
2218 al &= 0x0f;
2219 }
2220 EAX = (EAX & ~0xffff) | al | (ah << 8);
2221 CC_SRC = eflags;
2222}
2223
2224void helper_daa(void)
2225{
2226 int al, af, cf;
2227 int eflags;
2228
2229 eflags = helper_cc_compute_all(CC_OP);
2230 cf = eflags & CC_C;
2231 af = eflags & CC_A;
2232 al = EAX & 0xff;
2233
2234 eflags = 0;
2235 if (((al & 0x0f) > 9 ) || af) {
2236 al = (al + 6) & 0xff;
2237 eflags |= CC_A;
2238 }
2239 if ((al > 0x9f) || cf) {
2240 al = (al + 0x60) & 0xff;
2241 eflags |= CC_C;
2242 }
2243 EAX = (EAX & ~0xff) | al;
2244 /* well, speed is not an issue here, so we compute the flags by hand */
2245 eflags |= (al == 0) << 6; /* zf */
2246 eflags |= parity_table[al]; /* pf */
2247 eflags |= (al & 0x80); /* sf */
2248 CC_SRC = eflags;
2249}
2250
2251void helper_das(void)
2252{
2253 int al, al1, af, cf;
2254 int eflags;
2255
2256 eflags = helper_cc_compute_all(CC_OP);
2257 cf = eflags & CC_C;
2258 af = eflags & CC_A;
2259 al = EAX & 0xff;
2260
2261 eflags = 0;
2262 al1 = al;
2263 if (((al & 0x0f) > 9 ) || af) {
2264 eflags |= CC_A;
2265 if (al < 6 || cf)
2266 eflags |= CC_C;
2267 al = (al - 6) & 0xff;
2268 }
2269 if ((al1 > 0x99) || cf) {
2270 al = (al - 0x60) & 0xff;
2271 eflags |= CC_C;
2272 }
2273 EAX = (EAX & ~0xff) | al;
2274 /* well, speed is not an issue here, so we compute the flags by hand */
2275 eflags |= (al == 0) << 6; /* zf */
2276 eflags |= parity_table[al]; /* pf */
2277 eflags |= (al & 0x80); /* sf */
2278 CC_SRC = eflags;
2279}
2280
2281void helper_into(int next_eip_addend)
2282{
2283 int eflags;
2284 eflags = helper_cc_compute_all(CC_OP);
2285 if (eflags & CC_O) {
2286 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2287 }
2288}
2289
2290void helper_cmpxchg8b(target_ulong a0)
2291{
2292 uint64_t d;
2293 int eflags;
2294
2295 eflags = helper_cc_compute_all(CC_OP);
2296 d = ldq(a0);
2297 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2298 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2299 eflags |= CC_Z;
2300 } else {
2301 /* always do the store */
2302 stq(a0, d);
2303 EDX = (uint32_t)(d >> 32);
2304 EAX = (uint32_t)d;
2305 eflags &= ~CC_Z;
2306 }
2307 CC_SRC = eflags;
2308}
2309
2310#ifdef TARGET_X86_64
2311void helper_cmpxchg16b(target_ulong a0)
2312{
2313 uint64_t d0, d1;
2314 int eflags;
2315
2316 if ((a0 & 0xf) != 0)
2317 raise_exception(EXCP0D_GPF);
2318 eflags = helper_cc_compute_all(CC_OP);
2319 d0 = ldq(a0);
2320 d1 = ldq(a0 + 8);
2321 if (d0 == EAX && d1 == EDX) {
2322 stq(a0, EBX);
2323 stq(a0 + 8, ECX);
2324 eflags |= CC_Z;
2325 } else {
2326 /* always do the store */
2327 stq(a0, d0);
2328 stq(a0 + 8, d1);
2329 EDX = d1;
2330 EAX = d0;
2331 eflags &= ~CC_Z;
2332 }
2333 CC_SRC = eflags;
2334}
2335#endif
2336
2337void helper_single_step(void)
2338{
2339#ifndef CONFIG_USER_ONLY
2340 check_hw_breakpoints(env, 1);
2341 env->dr[6] |= DR6_BS;
2342#endif
2343 raise_exception(EXCP01_DB);
2344}
2345
2346void helper_cpuid(void)
2347{
2348 uint32_t eax, ebx, ecx, edx;
2349
2350 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2351
2352 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2353 EAX = eax;
2354 EBX = ebx;
2355 ECX = ecx;
2356 EDX = edx;
2357}
2358
2359void helper_enter_level(int level, int data32, target_ulong t1)
2360{
2361 target_ulong ssp;
2362 uint32_t esp_mask, esp, ebp;
2363
2364 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2365 ssp = env->segs[R_SS].base;
2366 ebp = EBP;
2367 esp = ESP;
2368 if (data32) {
2369 /* 32 bit */
2370 esp -= 4;
2371 while (--level) {
2372 esp -= 4;
2373 ebp -= 4;
2374 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2375 }
2376 esp -= 4;
2377 stl(ssp + (esp & esp_mask), t1);
2378 } else {
2379 /* 16 bit */
2380 esp -= 2;
2381 while (--level) {
2382 esp -= 2;
2383 ebp -= 2;
2384 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2385 }
2386 esp -= 2;
2387 stw(ssp + (esp & esp_mask), t1);
2388 }
2389}
2390
2391#ifdef TARGET_X86_64
2392void helper_enter64_level(int level, int data64, target_ulong t1)
2393{
2394 target_ulong esp, ebp;
2395 ebp = EBP;
2396 esp = ESP;
2397
2398 if (data64) {
2399 /* 64 bit */
2400 esp -= 8;
2401 while (--level) {
2402 esp -= 8;
2403 ebp -= 8;
2404 stq(esp, ldq(ebp));
2405 }
2406 esp -= 8;
2407 stq(esp, t1);
2408 } else {
2409 /* 16 bit */
2410 esp -= 2;
2411 while (--level) {
2412 esp -= 2;
2413 ebp -= 2;
2414 stw(esp, lduw(ebp));
2415 }
2416 esp -= 2;
2417 stw(esp, t1);
2418 }
2419}
2420#endif
2421
2422void helper_lldt(int selector)
2423{
2424 SegmentCache *dt;
2425 uint32_t e1, e2;
2426#ifndef VBOX
2427 int index, entry_limit;
2428#else
2429 unsigned int index, entry_limit;
2430#endif
2431 target_ulong ptr;
2432
2433#ifdef VBOX
2434 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2435 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2436#endif
2437
2438 selector &= 0xffff;
2439 if ((selector & 0xfffc) == 0) {
2440 /* XXX: NULL selector case: invalid LDT */
2441 env->ldt.base = 0;
2442 env->ldt.limit = 0;
2443 } else {
2444 if (selector & 0x4)
2445 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2446 dt = &env->gdt;
2447 index = selector & ~7;
2448#ifdef TARGET_X86_64
2449 if (env->hflags & HF_LMA_MASK)
2450 entry_limit = 15;
2451 else
2452#endif
2453 entry_limit = 7;
2454 if ((index + entry_limit) > dt->limit)
2455 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2456 ptr = dt->base + index;
2457 e1 = ldl_kernel(ptr);
2458 e2 = ldl_kernel(ptr + 4);
2459 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2460 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2461 if (!(e2 & DESC_P_MASK))
2462 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2463#ifdef TARGET_X86_64
2464 if (env->hflags & HF_LMA_MASK) {
2465 uint32_t e3;
2466 e3 = ldl_kernel(ptr + 8);
2467 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2468 env->ldt.base |= (target_ulong)e3 << 32;
2469 } else
2470#endif
2471 {
2472 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2473 }
2474 }
2475 env->ldt.selector = selector;
2476#ifdef VBOX
2477 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2478 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2479#endif
2480}
2481
2482void helper_ltr(int selector)
2483{
2484 SegmentCache *dt;
2485 uint32_t e1, e2;
2486#ifndef VBOX
2487 int index, type, entry_limit;
2488#else
2489 unsigned int index;
2490 int type, entry_limit;
2491#endif
2492 target_ulong ptr;
2493
2494#ifdef VBOX
2495 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2496 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2497 env->tr.flags, (RTSEL)(selector & 0xffff)));
2498#endif
2499 selector &= 0xffff;
2500 if ((selector & 0xfffc) == 0) {
2501 /* NULL selector case: invalid TR */
2502 env->tr.base = 0;
2503 env->tr.limit = 0;
2504 env->tr.flags = 0;
2505 } else {
2506 if (selector & 0x4)
2507 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2508 dt = &env->gdt;
2509 index = selector & ~7;
2510#ifdef TARGET_X86_64
2511 if (env->hflags & HF_LMA_MASK)
2512 entry_limit = 15;
2513 else
2514#endif
2515 entry_limit = 7;
2516 if ((index + entry_limit) > dt->limit)
2517 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2518 ptr = dt->base + index;
2519 e1 = ldl_kernel(ptr);
2520 e2 = ldl_kernel(ptr + 4);
2521 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2522 if ((e2 & DESC_S_MASK) ||
2523 (type != 1 && type != 9))
2524 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2525 if (!(e2 & DESC_P_MASK))
2526 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2527#ifdef TARGET_X86_64
2528 if (env->hflags & HF_LMA_MASK) {
2529 uint32_t e3, e4;
2530 e3 = ldl_kernel(ptr + 8);
2531 e4 = ldl_kernel(ptr + 12);
2532 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2533 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2534 load_seg_cache_raw_dt(&env->tr, e1, e2);
2535 env->tr.base |= (target_ulong)e3 << 32;
2536 } else
2537#endif
2538 {
2539 load_seg_cache_raw_dt(&env->tr, e1, e2);
2540 }
2541 e2 |= DESC_TSS_BUSY_MASK;
2542 stl_kernel(ptr + 4, e2);
2543 }
2544 env->tr.selector = selector;
2545#ifdef VBOX
2546 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2547 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2548 env->tr.flags, (RTSEL)(selector & 0xffff)));
2549#endif
2550}
2551
2552/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2553void helper_load_seg(int seg_reg, int selector)
2554{
2555 uint32_t e1, e2;
2556 int cpl, dpl, rpl;
2557 SegmentCache *dt;
2558#ifndef VBOX
2559 int index;
2560#else
2561 unsigned int index;
2562#endif
2563 target_ulong ptr;
2564
2565 selector &= 0xffff;
2566 cpl = env->hflags & HF_CPL_MASK;
2567#ifdef VBOX
2568
2569 /* Trying to load a selector with CPL=1? */
2570 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2571 {
2572 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2573 selector = selector & 0xfffc;
2574 }
2575#endif /* VBOX */
2576 if ((selector & 0xfffc) == 0) {
2577 /* null selector case */
2578 if (seg_reg == R_SS
2579#ifdef TARGET_X86_64
2580 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2581#endif
2582 )
2583 raise_exception_err(EXCP0D_GPF, 0);
2584 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2585 } else {
2586
2587 if (selector & 0x4)
2588 dt = &env->ldt;
2589 else
2590 dt = &env->gdt;
2591 index = selector & ~7;
2592 if ((index + 7) > dt->limit)
2593 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2594 ptr = dt->base + index;
2595 e1 = ldl_kernel(ptr);
2596 e2 = ldl_kernel(ptr + 4);
2597
2598 if (!(e2 & DESC_S_MASK))
2599 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2600 rpl = selector & 3;
2601 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2602 if (seg_reg == R_SS) {
2603 /* must be writable segment */
2604 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2605 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2606 if (rpl != cpl || dpl != cpl)
2607 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2608 } else {
2609 /* must be readable segment */
2610 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2611 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2612
2613 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2614 /* if not conforming code, test rights */
2615 if (dpl < cpl || dpl < rpl)
2616 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2617 }
2618 }
2619
2620 if (!(e2 & DESC_P_MASK)) {
2621 if (seg_reg == R_SS)
2622 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2623 else
2624 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2625 }
2626
2627 /* set the access bit if not already set */
2628 if (!(e2 & DESC_A_MASK)) {
2629 e2 |= DESC_A_MASK;
2630 stl_kernel(ptr + 4, e2);
2631 }
2632
2633 cpu_x86_load_seg_cache(env, seg_reg, selector,
2634 get_seg_base(e1, e2),
2635 get_seg_limit(e1, e2),
2636 e2);
2637#if 0
2638 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2639 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2640#endif
2641 }
2642}
2643
2644/* protected mode jump */
2645void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2646 int next_eip_addend)
2647{
2648 int gate_cs, type;
2649 uint32_t e1, e2, cpl, dpl, rpl, limit;
2650 target_ulong next_eip;
2651
2652#ifdef VBOX /** @todo Why do we do this? */
2653 e1 = e2 = 0;
2654#endif
2655 if ((new_cs & 0xfffc) == 0)
2656 raise_exception_err(EXCP0D_GPF, 0);
2657 if (load_segment(&e1, &e2, new_cs) != 0)
2658 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2659 cpl = env->hflags & HF_CPL_MASK;
2660 if (e2 & DESC_S_MASK) {
2661 if (!(e2 & DESC_CS_MASK))
2662 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2663 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2664 if (e2 & DESC_C_MASK) {
2665 /* conforming code segment */
2666 if (dpl > cpl)
2667 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2668 } else {
2669 /* non conforming code segment */
2670 rpl = new_cs & 3;
2671 if (rpl > cpl)
2672 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2673 if (dpl != cpl)
2674 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2675 }
2676 if (!(e2 & DESC_P_MASK))
2677 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2678 limit = get_seg_limit(e1, e2);
2679 if (new_eip > limit &&
2680 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2681 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2682 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2683 get_seg_base(e1, e2), limit, e2);
2684 EIP = new_eip;
2685 } else {
2686 /* jump to call or task gate */
2687 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2688 rpl = new_cs & 3;
2689 cpl = env->hflags & HF_CPL_MASK;
2690 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2691 switch(type) {
2692 case 1: /* 286 TSS */
2693 case 9: /* 386 TSS */
2694 case 5: /* task gate */
2695 if (dpl < cpl || dpl < rpl)
2696 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2697 next_eip = env->eip + next_eip_addend;
2698 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2699 CC_OP = CC_OP_EFLAGS;
2700 break;
2701 case 4: /* 286 call gate */
2702 case 12: /* 386 call gate */
2703 if ((dpl < cpl) || (dpl < rpl))
2704 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2705 if (!(e2 & DESC_P_MASK))
2706 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2707 gate_cs = e1 >> 16;
2708 new_eip = (e1 & 0xffff);
2709 if (type == 12)
2710 new_eip |= (e2 & 0xffff0000);
2711 if (load_segment(&e1, &e2, gate_cs) != 0)
2712 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2713 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2714 /* must be code segment */
2715 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2716 (DESC_S_MASK | DESC_CS_MASK)))
2717 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2718 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2719 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2720 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2721 if (!(e2 & DESC_P_MASK))
2722#ifdef VBOX /* See page 3-514 of 253666.pdf */
2723 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2724#else
2725 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2726#endif
2727 limit = get_seg_limit(e1, e2);
2728 if (new_eip > limit)
2729 raise_exception_err(EXCP0D_GPF, 0);
2730 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2731 get_seg_base(e1, e2), limit, e2);
2732 EIP = new_eip;
2733 break;
2734 default:
2735 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2736 break;
2737 }
2738 }
2739}
2740
2741/* real mode call */
2742void helper_lcall_real(int new_cs, target_ulong new_eip1,
2743 int shift, int next_eip)
2744{
2745 int new_eip;
2746 uint32_t esp, esp_mask;
2747 target_ulong ssp;
2748
2749 new_eip = new_eip1;
2750 esp = ESP;
2751 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2752 ssp = env->segs[R_SS].base;
2753 if (shift) {
2754 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2755 PUSHL(ssp, esp, esp_mask, next_eip);
2756 } else {
2757 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2758 PUSHW(ssp, esp, esp_mask, next_eip);
2759 }
2760
2761 SET_ESP(esp, esp_mask);
2762 env->eip = new_eip;
2763 env->segs[R_CS].selector = new_cs;
2764 env->segs[R_CS].base = (new_cs << 4);
2765}
2766
2767/* protected mode call */
2768void helper_lcall_protected(int new_cs, target_ulong new_eip,
2769 int shift, int next_eip_addend)
2770{
2771 int new_stack, i;
2772 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2773 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2774 uint32_t val, limit, old_sp_mask;
2775 target_ulong ssp, old_ssp, next_eip;
2776
2777#ifdef VBOX /** @todo Why do we do this? */
2778 e1 = e2 = 0;
2779#endif
2780 next_eip = env->eip + next_eip_addend;
2781 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2782 LOG_PCALL_STATE(env);
2783 if ((new_cs & 0xfffc) == 0)
2784 raise_exception_err(EXCP0D_GPF, 0);
2785 if (load_segment(&e1, &e2, new_cs) != 0)
2786 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2787 cpl = env->hflags & HF_CPL_MASK;
2788 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2789 if (e2 & DESC_S_MASK) {
2790 if (!(e2 & DESC_CS_MASK))
2791 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2792 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2793 if (e2 & DESC_C_MASK) {
2794 /* conforming code segment */
2795 if (dpl > cpl)
2796 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2797 } else {
2798 /* non conforming code segment */
2799 rpl = new_cs & 3;
2800 if (rpl > cpl)
2801 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2802 if (dpl != cpl)
2803 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2804 }
2805 if (!(e2 & DESC_P_MASK))
2806 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2807
2808#ifdef TARGET_X86_64
2809 /* XXX: check 16/32 bit cases in long mode */
2810 if (shift == 2) {
2811 target_ulong rsp;
2812 /* 64 bit case */
2813 rsp = ESP;
2814 PUSHQ(rsp, env->segs[R_CS].selector);
2815 PUSHQ(rsp, next_eip);
2816 /* from this point, not restartable */
2817 ESP = rsp;
2818 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2819 get_seg_base(e1, e2),
2820 get_seg_limit(e1, e2), e2);
2821 EIP = new_eip;
2822 } else
2823#endif
2824 {
2825 sp = ESP;
2826 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2827 ssp = env->segs[R_SS].base;
2828 if (shift) {
2829 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2830 PUSHL(ssp, sp, sp_mask, next_eip);
2831 } else {
2832 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2833 PUSHW(ssp, sp, sp_mask, next_eip);
2834 }
2835
2836 limit = get_seg_limit(e1, e2);
2837 if (new_eip > limit)
2838 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2839 /* from this point, not restartable */
2840 SET_ESP(sp, sp_mask);
2841 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2842 get_seg_base(e1, e2), limit, e2);
2843 EIP = new_eip;
2844 }
2845 } else {
2846 /* check gate type */
2847 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2848 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2849 rpl = new_cs & 3;
2850 switch(type) {
2851 case 1: /* available 286 TSS */
2852 case 9: /* available 386 TSS */
2853 case 5: /* task gate */
2854 if (dpl < cpl || dpl < rpl)
2855 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2856 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2857 CC_OP = CC_OP_EFLAGS;
2858 return;
2859 case 4: /* 286 call gate */
2860 case 12: /* 386 call gate */
2861 break;
2862 default:
2863 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2864 break;
2865 }
2866 shift = type >> 3;
2867
2868 if (dpl < cpl || dpl < rpl)
2869 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2870 /* check valid bit */
2871 if (!(e2 & DESC_P_MASK))
2872 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2873 selector = e1 >> 16;
2874 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2875 param_count = e2 & 0x1f;
2876 if ((selector & 0xfffc) == 0)
2877 raise_exception_err(EXCP0D_GPF, 0);
2878
2879 if (load_segment(&e1, &e2, selector) != 0)
2880 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2881 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2882 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2883 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2884 if (dpl > cpl)
2885 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2886 if (!(e2 & DESC_P_MASK))
2887 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2888
2889 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2890 /* to inner privilege */
2891 get_ss_esp_from_tss(&ss, &sp, dpl);
2892 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2893 ss, sp, param_count, ESP);
2894 if ((ss & 0xfffc) == 0)
2895 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2896 if ((ss & 3) != dpl)
2897 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2898 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2899 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2900 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2901 if (ss_dpl != dpl)
2902 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2903 if (!(ss_e2 & DESC_S_MASK) ||
2904 (ss_e2 & DESC_CS_MASK) ||
2905 !(ss_e2 & DESC_W_MASK))
2906 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2907 if (!(ss_e2 & DESC_P_MASK))
2908#ifdef VBOX /* See page 3-99 of 253666.pdf */
2909 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2910#else
2911 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2912#endif
2913
2914 // push_size = ((param_count * 2) + 8) << shift;
2915
2916 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2917 old_ssp = env->segs[R_SS].base;
2918
2919 sp_mask = get_sp_mask(ss_e2);
2920 ssp = get_seg_base(ss_e1, ss_e2);
2921 if (shift) {
2922 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2923 PUSHL(ssp, sp, sp_mask, ESP);
2924 for(i = param_count - 1; i >= 0; i--) {
2925 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2926 PUSHL(ssp, sp, sp_mask, val);
2927 }
2928 } else {
2929 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2930 PUSHW(ssp, sp, sp_mask, ESP);
2931 for(i = param_count - 1; i >= 0; i--) {
2932 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2933 PUSHW(ssp, sp, sp_mask, val);
2934 }
2935 }
2936 new_stack = 1;
2937 } else {
2938 /* to same privilege */
2939 sp = ESP;
2940 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2941 ssp = env->segs[R_SS].base;
2942 // push_size = (4 << shift);
2943 new_stack = 0;
2944 }
2945
2946 if (shift) {
2947 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2948 PUSHL(ssp, sp, sp_mask, next_eip);
2949 } else {
2950 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2951 PUSHW(ssp, sp, sp_mask, next_eip);
2952 }
2953
2954 /* from this point, not restartable */
2955
2956 if (new_stack) {
2957 ss = (ss & ~3) | dpl;
2958 cpu_x86_load_seg_cache(env, R_SS, ss,
2959 ssp,
2960 get_seg_limit(ss_e1, ss_e2),
2961 ss_e2);
2962 }
2963
2964 selector = (selector & ~3) | dpl;
2965 cpu_x86_load_seg_cache(env, R_CS, selector,
2966 get_seg_base(e1, e2),
2967 get_seg_limit(e1, e2),
2968 e2);
2969 cpu_x86_set_cpl(env, dpl);
2970 SET_ESP(sp, sp_mask);
2971 EIP = offset;
2972 }
2973}
2974
2975/* real and vm86 mode iret */
2976void helper_iret_real(int shift)
2977{
2978 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2979 target_ulong ssp;
2980 int eflags_mask;
2981#ifdef VBOX
2982 bool fVME = false;
2983
2984 remR3TrapClear(env->pVM);
2985#endif /* VBOX */
2986
2987 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2988 sp = ESP;
2989 ssp = env->segs[R_SS].base;
2990 if (shift == 1) {
2991 /* 32 bits */
2992 POPL(ssp, sp, sp_mask, new_eip);
2993 POPL(ssp, sp, sp_mask, new_cs);
2994 new_cs &= 0xffff;
2995 POPL(ssp, sp, sp_mask, new_eflags);
2996 } else {
2997 /* 16 bits */
2998 POPW(ssp, sp, sp_mask, new_eip);
2999 POPW(ssp, sp, sp_mask, new_cs);
3000 POPW(ssp, sp, sp_mask, new_eflags);
3001 }
3002#ifdef VBOX
3003 if ( (env->eflags & VM_MASK)
3004 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3005 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3006 {
3007 fVME = true;
3008 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3009 /* if TF will be set -> #GP */
3010 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3011 || (new_eflags & TF_MASK))
3012 raise_exception(EXCP0D_GPF);
3013 }
3014#endif /* VBOX */
3015 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3016 env->segs[R_CS].selector = new_cs;
3017 env->segs[R_CS].base = (new_cs << 4);
3018 env->eip = new_eip;
3019#ifdef VBOX
3020 if (fVME)
3021 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3022 else
3023#endif
3024 if (env->eflags & VM_MASK)
3025 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3026 else
3027 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3028 if (shift == 0)
3029 eflags_mask &= 0xffff;
3030 load_eflags(new_eflags, eflags_mask);
3031 env->hflags2 &= ~HF2_NMI_MASK;
3032#ifdef VBOX
3033 if (fVME)
3034 {
3035 if (new_eflags & IF_MASK)
3036 env->eflags |= VIF_MASK;
3037 else
3038 env->eflags &= ~VIF_MASK;
3039 }
3040#endif /* VBOX */
3041}
3042
3043static inline void validate_seg(int seg_reg, int cpl)
3044{
3045 int dpl;
3046 uint32_t e2;
3047
3048 /* XXX: on x86_64, we do not want to nullify FS and GS because
3049 they may still contain a valid base. I would be interested to
3050 know how a real x86_64 CPU behaves */
3051 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3052 (env->segs[seg_reg].selector & 0xfffc) == 0)
3053 return;
3054
3055 e2 = env->segs[seg_reg].flags;
3056 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3057 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3058 /* data or non conforming code segment */
3059 if (dpl < cpl) {
3060 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3061 }
3062 }
3063}
3064
3065/* protected mode iret */
3066static inline void helper_ret_protected(int shift, int is_iret, int addend)
3067{
3068 uint32_t new_cs, new_eflags, new_ss;
3069 uint32_t new_es, new_ds, new_fs, new_gs;
3070 uint32_t e1, e2, ss_e1, ss_e2;
3071 int cpl, dpl, rpl, eflags_mask, iopl;
3072 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3073
3074#ifdef VBOX /** @todo Why do we do this? */
3075 ss_e1 = ss_e2 = e1 = e2 = 0;
3076#endif
3077
3078#ifdef TARGET_X86_64
3079 if (shift == 2)
3080 sp_mask = -1;
3081 else
3082#endif
3083 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3084 sp = ESP;
3085 ssp = env->segs[R_SS].base;
3086 new_eflags = 0; /* avoid warning */
3087#ifdef TARGET_X86_64
3088 if (shift == 2) {
3089 POPQ(sp, new_eip);
3090 POPQ(sp, new_cs);
3091 new_cs &= 0xffff;
3092 if (is_iret) {
3093 POPQ(sp, new_eflags);
3094 }
3095 } else
3096#endif
3097 if (shift == 1) {
3098 /* 32 bits */
3099 POPL(ssp, sp, sp_mask, new_eip);
3100 POPL(ssp, sp, sp_mask, new_cs);
3101 new_cs &= 0xffff;
3102 if (is_iret) {
3103 POPL(ssp, sp, sp_mask, new_eflags);
3104#if defined(VBOX) && defined(DEBUG)
3105 printf("iret: new CS %04X\n", new_cs);
3106 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3107 printf("iret: new EFLAGS %08X\n", new_eflags);
3108 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3109#endif
3110 if (new_eflags & VM_MASK)
3111 goto return_to_vm86;
3112 }
3113#ifdef VBOX
3114 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3115 {
3116# ifdef DEBUG
3117 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3118# endif
3119 new_cs = new_cs & 0xfffc;
3120 }
3121#endif
3122 } else {
3123 /* 16 bits */
3124 POPW(ssp, sp, sp_mask, new_eip);
3125 POPW(ssp, sp, sp_mask, new_cs);
3126 if (is_iret)
3127 POPW(ssp, sp, sp_mask, new_eflags);
3128 }
3129 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3130 new_cs, new_eip, shift, addend);
3131 LOG_PCALL_STATE(env);
3132 if ((new_cs & 0xfffc) == 0)
3133 {
3134#if defined(VBOX) && defined(DEBUG)
3135 printf("new_cs & 0xfffc) == 0\n");
3136#endif
3137 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3138 }
3139 if (load_segment(&e1, &e2, new_cs) != 0)
3140 {
3141#if defined(VBOX) && defined(DEBUG)
3142 printf("load_segment failed\n");
3143#endif
3144 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3145 }
3146 if (!(e2 & DESC_S_MASK) ||
3147 !(e2 & DESC_CS_MASK))
3148 {
3149#if defined(VBOX) && defined(DEBUG)
3150 printf("e2 mask %08x\n", e2);
3151#endif
3152 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3153 }
3154 cpl = env->hflags & HF_CPL_MASK;
3155 rpl = new_cs & 3;
3156 if (rpl < cpl)
3157 {
3158#if defined(VBOX) && defined(DEBUG)
3159 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3160#endif
3161 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3162 }
3163 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3164 if (e2 & DESC_C_MASK) {
3165 if (dpl > rpl)
3166 {
3167#if defined(VBOX) && defined(DEBUG)
3168 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3169#endif
3170 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3171 }
3172 } else {
3173 if (dpl != rpl)
3174 {
3175#if defined(VBOX) && defined(DEBUG)
3176 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3177#endif
3178 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3179 }
3180 }
3181 if (!(e2 & DESC_P_MASK))
3182 {
3183#if defined(VBOX) && defined(DEBUG)
3184 printf("DESC_P_MASK e2=%08x\n", e2);
3185#endif
3186 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3187 }
3188
3189 sp += addend;
3190 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3191 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3192 /* return to same privilege level */
3193 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3194 get_seg_base(e1, e2),
3195 get_seg_limit(e1, e2),
3196 e2);
3197 } else {
3198 /* return to different privilege level */
3199#ifdef TARGET_X86_64
3200 if (shift == 2) {
3201 POPQ(sp, new_esp);
3202 POPQ(sp, new_ss);
3203 new_ss &= 0xffff;
3204 } else
3205#endif
3206 if (shift == 1) {
3207 /* 32 bits */
3208 POPL(ssp, sp, sp_mask, new_esp);
3209 POPL(ssp, sp, sp_mask, new_ss);
3210 new_ss &= 0xffff;
3211 } else {
3212 /* 16 bits */
3213 POPW(ssp, sp, sp_mask, new_esp);
3214 POPW(ssp, sp, sp_mask, new_ss);
3215 }
3216 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3217 new_ss, new_esp);
3218 if ((new_ss & 0xfffc) == 0) {
3219#ifdef TARGET_X86_64
3220 /* NULL ss is allowed in long mode if cpl != 3*/
3221 /* XXX: test CS64 ? */
3222 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3223 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3224 0, 0xffffffff,
3225 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3226 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3227 DESC_W_MASK | DESC_A_MASK);
3228 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3229 } else
3230#endif
3231 {
3232 raise_exception_err(EXCP0D_GPF, 0);
3233 }
3234 } else {
3235 if ((new_ss & 3) != rpl)
3236 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3237 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3238 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3239 if (!(ss_e2 & DESC_S_MASK) ||
3240 (ss_e2 & DESC_CS_MASK) ||
3241 !(ss_e2 & DESC_W_MASK))
3242 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3243 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3244 if (dpl != rpl)
3245 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3246 if (!(ss_e2 & DESC_P_MASK))
3247 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3248 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3249 get_seg_base(ss_e1, ss_e2),
3250 get_seg_limit(ss_e1, ss_e2),
3251 ss_e2);
3252 }
3253
3254 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3255 get_seg_base(e1, e2),
3256 get_seg_limit(e1, e2),
3257 e2);
3258 cpu_x86_set_cpl(env, rpl);
3259 sp = new_esp;
3260#ifdef TARGET_X86_64
3261 if (env->hflags & HF_CS64_MASK)
3262 sp_mask = -1;
3263 else
3264#endif
3265 sp_mask = get_sp_mask(ss_e2);
3266
3267 /* validate data segments */
3268 validate_seg(R_ES, rpl);
3269 validate_seg(R_DS, rpl);
3270 validate_seg(R_FS, rpl);
3271 validate_seg(R_GS, rpl);
3272
3273 sp += addend;
3274 }
3275 SET_ESP(sp, sp_mask);
3276 env->eip = new_eip;
3277 if (is_iret) {
3278 /* NOTE: 'cpl' is the _old_ CPL */
3279 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3280 if (cpl == 0)
3281#ifdef VBOX
3282 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3283#else
3284 eflags_mask |= IOPL_MASK;
3285#endif
3286 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3287 if (cpl <= iopl)
3288 eflags_mask |= IF_MASK;
3289 if (shift == 0)
3290 eflags_mask &= 0xffff;
3291 load_eflags(new_eflags, eflags_mask);
3292 }
3293 return;
3294
3295 return_to_vm86:
3296 POPL(ssp, sp, sp_mask, new_esp);
3297 POPL(ssp, sp, sp_mask, new_ss);
3298 POPL(ssp, sp, sp_mask, new_es);
3299 POPL(ssp, sp, sp_mask, new_ds);
3300 POPL(ssp, sp, sp_mask, new_fs);
3301 POPL(ssp, sp, sp_mask, new_gs);
3302
3303 /* modify processor state */
3304 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3305 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3306 load_seg_vm(R_CS, new_cs & 0xffff);
3307 cpu_x86_set_cpl(env, 3);
3308 load_seg_vm(R_SS, new_ss & 0xffff);
3309 load_seg_vm(R_ES, new_es & 0xffff);
3310 load_seg_vm(R_DS, new_ds & 0xffff);
3311 load_seg_vm(R_FS, new_fs & 0xffff);
3312 load_seg_vm(R_GS, new_gs & 0xffff);
3313
3314 env->eip = new_eip & 0xffff;
3315 ESP = new_esp;
3316}
3317
3318void helper_iret_protected(int shift, int next_eip)
3319{
3320 int tss_selector, type;
3321 uint32_t e1, e2;
3322
3323#ifdef VBOX
3324 e1 = e2 = 0; /** @todo Why do we do this? */
3325 remR3TrapClear(env->pVM);
3326#endif
3327
3328 /* specific case for TSS */
3329 if (env->eflags & NT_MASK) {
3330#ifdef TARGET_X86_64
3331 if (env->hflags & HF_LMA_MASK)
3332 raise_exception_err(EXCP0D_GPF, 0);
3333#endif
3334 tss_selector = lduw_kernel(env->tr.base + 0);
3335 if (tss_selector & 4)
3336 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3337 if (load_segment(&e1, &e2, tss_selector) != 0)
3338 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3339 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3340 /* NOTE: we check both segment and busy TSS */
3341 if (type != 3)
3342 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3343 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3344 } else {
3345 helper_ret_protected(shift, 1, 0);
3346 }
3347 env->hflags2 &= ~HF2_NMI_MASK;
3348}
3349
3350void helper_lret_protected(int shift, int addend)
3351{
3352 helper_ret_protected(shift, 0, addend);
3353}
3354
3355void helper_sysenter(void)
3356{
3357 if (env->sysenter_cs == 0) {
3358 raise_exception_err(EXCP0D_GPF, 0);
3359 }
3360 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3361 cpu_x86_set_cpl(env, 0);
3362
3363#ifdef TARGET_X86_64
3364 if (env->hflags & HF_LMA_MASK) {
3365 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3366 0, 0xffffffff,
3367 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3368 DESC_S_MASK |
3369 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3370 } else
3371#endif
3372 {
3373 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3374 0, 0xffffffff,
3375 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3376 DESC_S_MASK |
3377 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3378 }
3379 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3380 0, 0xffffffff,
3381 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3382 DESC_S_MASK |
3383 DESC_W_MASK | DESC_A_MASK);
3384 ESP = env->sysenter_esp;
3385 EIP = env->sysenter_eip;
3386}
3387
3388void helper_sysexit(int dflag)
3389{
3390 int cpl;
3391
3392 cpl = env->hflags & HF_CPL_MASK;
3393 if (env->sysenter_cs == 0 || cpl != 0) {
3394 raise_exception_err(EXCP0D_GPF, 0);
3395 }
3396 cpu_x86_set_cpl(env, 3);
3397#ifdef TARGET_X86_64
3398 if (dflag == 2) {
3399 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3400 0, 0xffffffff,
3401 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3402 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3403 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3404 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3405 0, 0xffffffff,
3406 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3407 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3408 DESC_W_MASK | DESC_A_MASK);
3409 } else
3410#endif
3411 {
3412 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3413 0, 0xffffffff,
3414 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3415 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3416 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3417 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3418 0, 0xffffffff,
3419 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3420 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3421 DESC_W_MASK | DESC_A_MASK);
3422 }
3423 ESP = ECX;
3424 EIP = EDX;
3425}
3426
3427#if defined(CONFIG_USER_ONLY)
3428target_ulong helper_read_crN(int reg)
3429{
3430 return 0;
3431}
3432
3433void helper_write_crN(int reg, target_ulong t0)
3434{
3435}
3436
3437void helper_movl_drN_T0(int reg, target_ulong t0)
3438{
3439}
3440#else
3441target_ulong helper_read_crN(int reg)
3442{
3443 target_ulong val;
3444
3445 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3446 switch(reg) {
3447 default:
3448 val = env->cr[reg];
3449 break;
3450 case 8:
3451 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3452#ifndef VBOX
3453 val = cpu_get_apic_tpr(env->apic_state);
3454#else /* VBOX */
3455 val = cpu_get_apic_tpr(env);
3456#endif /* VBOX */
3457 } else {
3458 val = env->v_tpr;
3459 }
3460 break;
3461 }
3462 return val;
3463}
3464
3465void helper_write_crN(int reg, target_ulong t0)
3466{
3467 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3468 switch(reg) {
3469 case 0:
3470 cpu_x86_update_cr0(env, t0);
3471 break;
3472 case 3:
3473 cpu_x86_update_cr3(env, t0);
3474 break;
3475 case 4:
3476 cpu_x86_update_cr4(env, t0);
3477 break;
3478 case 8:
3479 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3480#ifndef VBOX
3481 cpu_set_apic_tpr(env->apic_state, t0);
3482#else /* VBOX */
3483 cpu_set_apic_tpr(env, t0);
3484#endif /* VBOX */
3485 }
3486 env->v_tpr = t0 & 0x0f;
3487 break;
3488 default:
3489 env->cr[reg] = t0;
3490 break;
3491 }
3492}
3493
3494void helper_movl_drN_T0(int reg, target_ulong t0)
3495{
3496 int i;
3497
3498 if (reg < 4) {
3499 hw_breakpoint_remove(env, reg);
3500 env->dr[reg] = t0;
3501 hw_breakpoint_insert(env, reg);
3502 } else if (reg == 7) {
3503 for (i = 0; i < 4; i++)
3504 hw_breakpoint_remove(env, i);
3505 env->dr[7] = t0;
3506 for (i = 0; i < 4; i++)
3507 hw_breakpoint_insert(env, i);
3508 } else
3509 env->dr[reg] = t0;
3510}
3511#endif
3512
3513void helper_lmsw(target_ulong t0)
3514{
3515 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3516 if already set to one. */
3517 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3518 helper_write_crN(0, t0);
3519}
3520
3521void helper_clts(void)
3522{
3523 env->cr[0] &= ~CR0_TS_MASK;
3524 env->hflags &= ~HF_TS_MASK;
3525}
3526
3527void helper_invlpg(target_ulong addr)
3528{
3529 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3530 tlb_flush_page(env, addr);
3531}
3532
3533void helper_rdtsc(void)
3534{
3535 uint64_t val;
3536
3537 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3538 raise_exception(EXCP0D_GPF);
3539 }
3540 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3541
3542 val = cpu_get_tsc(env) + env->tsc_offset;
3543 EAX = (uint32_t)(val);
3544 EDX = (uint32_t)(val >> 32);
3545}
3546
3547void helper_rdtscp(void)
3548{
3549 helper_rdtsc();
3550#ifndef VBOX
3551 ECX = (uint32_t)(env->tsc_aux);
3552#else /* VBOX */
3553 uint64_t val;
3554 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3555 ECX = (uint32_t)(val);
3556 else
3557 ECX = 0;
3558#endif /* VBOX */
3559}
3560
3561void helper_rdpmc(void)
3562{
3563#ifdef VBOX
3564 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3565 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3566 raise_exception(EXCP0D_GPF);
3567 }
3568 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3569 EAX = 0;
3570 EDX = 0;
3571#else /* !VBOX */
3572 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3573 raise_exception(EXCP0D_GPF);
3574 }
3575 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3576
3577 /* currently unimplemented */
3578 raise_exception_err(EXCP06_ILLOP, 0);
3579#endif /* !VBOX */
3580}
3581
3582#if defined(CONFIG_USER_ONLY)
3583void helper_wrmsr(void)
3584{
3585}
3586
3587void helper_rdmsr(void)
3588{
3589}
3590#else
3591void helper_wrmsr(void)
3592{
3593 uint64_t val;
3594
3595 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3596
3597 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3598
3599 switch((uint32_t)ECX) {
3600 case MSR_IA32_SYSENTER_CS:
3601 env->sysenter_cs = val & 0xffff;
3602 break;
3603 case MSR_IA32_SYSENTER_ESP:
3604 env->sysenter_esp = val;
3605 break;
3606 case MSR_IA32_SYSENTER_EIP:
3607 env->sysenter_eip = val;
3608 break;
3609 case MSR_IA32_APICBASE:
3610# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3611 cpu_set_apic_base(env->apic_state, val);
3612# endif
3613 break;
3614 case MSR_EFER:
3615 {
3616 uint64_t update_mask;
3617 update_mask = 0;
3618 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3619 update_mask |= MSR_EFER_SCE;
3620 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3621 update_mask |= MSR_EFER_LME;
3622 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3623 update_mask |= MSR_EFER_FFXSR;
3624 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3625 update_mask |= MSR_EFER_NXE;
3626 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3627 update_mask |= MSR_EFER_SVME;
3628 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3629 update_mask |= MSR_EFER_FFXSR;
3630 cpu_load_efer(env, (env->efer & ~update_mask) |
3631 (val & update_mask));
3632 }
3633 break;
3634 case MSR_STAR:
3635 env->star = val;
3636 break;
3637 case MSR_PAT:
3638 env->pat = val;
3639 break;
3640 case MSR_VM_HSAVE_PA:
3641 env->vm_hsave = val;
3642 break;
3643#ifdef TARGET_X86_64
3644 case MSR_LSTAR:
3645 env->lstar = val;
3646 break;
3647 case MSR_CSTAR:
3648 env->cstar = val;
3649 break;
3650 case MSR_FMASK:
3651 env->fmask = val;
3652 break;
3653 case MSR_FSBASE:
3654 env->segs[R_FS].base = val;
3655 break;
3656 case MSR_GSBASE:
3657 env->segs[R_GS].base = val;
3658 break;
3659 case MSR_KERNELGSBASE:
3660 env->kernelgsbase = val;
3661 break;
3662#endif
3663# ifndef VBOX
3664 case MSR_MTRRphysBase(0):
3665 case MSR_MTRRphysBase(1):
3666 case MSR_MTRRphysBase(2):
3667 case MSR_MTRRphysBase(3):
3668 case MSR_MTRRphysBase(4):
3669 case MSR_MTRRphysBase(5):
3670 case MSR_MTRRphysBase(6):
3671 case MSR_MTRRphysBase(7):
3672 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3673 break;
3674 case MSR_MTRRphysMask(0):
3675 case MSR_MTRRphysMask(1):
3676 case MSR_MTRRphysMask(2):
3677 case MSR_MTRRphysMask(3):
3678 case MSR_MTRRphysMask(4):
3679 case MSR_MTRRphysMask(5):
3680 case MSR_MTRRphysMask(6):
3681 case MSR_MTRRphysMask(7):
3682 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3683 break;
3684 case MSR_MTRRfix64K_00000:
3685 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3686 break;
3687 case MSR_MTRRfix16K_80000:
3688 case MSR_MTRRfix16K_A0000:
3689 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3690 break;
3691 case MSR_MTRRfix4K_C0000:
3692 case MSR_MTRRfix4K_C8000:
3693 case MSR_MTRRfix4K_D0000:
3694 case MSR_MTRRfix4K_D8000:
3695 case MSR_MTRRfix4K_E0000:
3696 case MSR_MTRRfix4K_E8000:
3697 case MSR_MTRRfix4K_F0000:
3698 case MSR_MTRRfix4K_F8000:
3699 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3700 break;
3701 case MSR_MTRRdefType:
3702 env->mtrr_deftype = val;
3703 break;
3704 case MSR_MCG_STATUS:
3705 env->mcg_status = val;
3706 break;
3707 case MSR_MCG_CTL:
3708 if ((env->mcg_cap & MCG_CTL_P)
3709 && (val == 0 || val == ~(uint64_t)0))
3710 env->mcg_ctl = val;
3711 break;
3712 case MSR_TSC_AUX:
3713 env->tsc_aux = val;
3714 break;
3715# endif /* !VBOX */
3716 default:
3717# ifndef VBOX
3718 if ((uint32_t)ECX >= MSR_MC0_CTL
3719 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3720 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3721 if ((offset & 0x3) != 0
3722 || (val == 0 || val == ~(uint64_t)0))
3723 env->mce_banks[offset] = val;
3724 break;
3725 }
3726 /* XXX: exception ? */
3727# endif
3728 break;
3729 }
3730
3731# ifdef VBOX
3732 /* call CPUM. */
3733 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3734 {
3735 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3736 }
3737# endif
3738}
3739
3740void helper_rdmsr(void)
3741{
3742 uint64_t val;
3743
3744 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3745
3746 switch((uint32_t)ECX) {
3747 case MSR_IA32_SYSENTER_CS:
3748 val = env->sysenter_cs;
3749 break;
3750 case MSR_IA32_SYSENTER_ESP:
3751 val = env->sysenter_esp;
3752 break;
3753 case MSR_IA32_SYSENTER_EIP:
3754 val = env->sysenter_eip;
3755 break;
3756 case MSR_IA32_APICBASE:
3757#ifndef VBOX
3758 val = cpu_get_apic_base(env->apic_state);
3759#else /* VBOX */
3760 val = cpu_get_apic_base(env);
3761#endif /* VBOX */
3762 break;
3763 case MSR_EFER:
3764 val = env->efer;
3765 break;
3766 case MSR_STAR:
3767 val = env->star;
3768 break;
3769 case MSR_PAT:
3770 val = env->pat;
3771 break;
3772 case MSR_VM_HSAVE_PA:
3773 val = env->vm_hsave;
3774 break;
3775# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3776 case MSR_IA32_PERF_STATUS:
3777 /* tsc_increment_by_tick */
3778 val = 1000ULL;
3779 /* CPU multiplier */
3780 val |= (((uint64_t)4ULL) << 40);
3781 break;
3782# endif /* !VBOX */
3783#ifdef TARGET_X86_64
3784 case MSR_LSTAR:
3785 val = env->lstar;
3786 break;
3787 case MSR_CSTAR:
3788 val = env->cstar;
3789 break;
3790 case MSR_FMASK:
3791 val = env->fmask;
3792 break;
3793 case MSR_FSBASE:
3794 val = env->segs[R_FS].base;
3795 break;
3796 case MSR_GSBASE:
3797 val = env->segs[R_GS].base;
3798 break;
3799 case MSR_KERNELGSBASE:
3800 val = env->kernelgsbase;
3801 break;
3802# ifndef VBOX
3803 case MSR_TSC_AUX:
3804 val = env->tsc_aux;
3805 break;
3806# endif /*!VBOX*/
3807#endif
3808# ifndef VBOX
3809 case MSR_MTRRphysBase(0):
3810 case MSR_MTRRphysBase(1):
3811 case MSR_MTRRphysBase(2):
3812 case MSR_MTRRphysBase(3):
3813 case MSR_MTRRphysBase(4):
3814 case MSR_MTRRphysBase(5):
3815 case MSR_MTRRphysBase(6):
3816 case MSR_MTRRphysBase(7):
3817 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3818 break;
3819 case MSR_MTRRphysMask(0):
3820 case MSR_MTRRphysMask(1):
3821 case MSR_MTRRphysMask(2):
3822 case MSR_MTRRphysMask(3):
3823 case MSR_MTRRphysMask(4):
3824 case MSR_MTRRphysMask(5):
3825 case MSR_MTRRphysMask(6):
3826 case MSR_MTRRphysMask(7):
3827 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3828 break;
3829 case MSR_MTRRfix64K_00000:
3830 val = env->mtrr_fixed[0];
3831 break;
3832 case MSR_MTRRfix16K_80000:
3833 case MSR_MTRRfix16K_A0000:
3834 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3835 break;
3836 case MSR_MTRRfix4K_C0000:
3837 case MSR_MTRRfix4K_C8000:
3838 case MSR_MTRRfix4K_D0000:
3839 case MSR_MTRRfix4K_D8000:
3840 case MSR_MTRRfix4K_E0000:
3841 case MSR_MTRRfix4K_E8000:
3842 case MSR_MTRRfix4K_F0000:
3843 case MSR_MTRRfix4K_F8000:
3844 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3845 break;
3846 case MSR_MTRRdefType:
3847 val = env->mtrr_deftype;
3848 break;
3849 case MSR_MTRRcap:
3850 if (env->cpuid_features & CPUID_MTRR)
3851 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3852 else
3853 /* XXX: exception ? */
3854 val = 0;
3855 break;
3856 case MSR_MCG_CAP:
3857 val = env->mcg_cap;
3858 break;
3859 case MSR_MCG_CTL:
3860 if (env->mcg_cap & MCG_CTL_P)
3861 val = env->mcg_ctl;
3862 else
3863 val = 0;
3864 break;
3865 case MSR_MCG_STATUS:
3866 val = env->mcg_status;
3867 break;
3868# endif /* !VBOX */
3869 default:
3870# ifndef VBOX
3871 if ((uint32_t)ECX >= MSR_MC0_CTL
3872 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3873 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3874 val = env->mce_banks[offset];
3875 break;
3876 }
3877 /* XXX: exception ? */
3878 val = 0;
3879# else /* VBOX */
3880 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3881 {
3882 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3883 val = 0;
3884 }
3885# endif /* VBOX */
3886 break;
3887 }
3888 EAX = (uint32_t)(val);
3889 EDX = (uint32_t)(val >> 32);
3890
3891# ifdef VBOX_STRICT
3892 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3893 val = 0;
3894 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3895# endif
3896}
3897#endif
3898
3899target_ulong helper_lsl(target_ulong selector1)
3900{
3901 unsigned int limit;
3902 uint32_t e1, e2, eflags, selector;
3903 int rpl, dpl, cpl, type;
3904
3905 selector = selector1 & 0xffff;
3906 eflags = helper_cc_compute_all(CC_OP);
3907 if ((selector & 0xfffc) == 0)
3908 goto fail;
3909 if (load_segment(&e1, &e2, selector) != 0)
3910 goto fail;
3911 rpl = selector & 3;
3912 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3913 cpl = env->hflags & HF_CPL_MASK;
3914 if (e2 & DESC_S_MASK) {
3915 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3916 /* conforming */
3917 } else {
3918 if (dpl < cpl || dpl < rpl)
3919 goto fail;
3920 }
3921 } else {
3922 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3923 switch(type) {
3924 case 1:
3925 case 2:
3926 case 3:
3927 case 9:
3928 case 11:
3929 break;
3930 default:
3931 goto fail;
3932 }
3933 if (dpl < cpl || dpl < rpl) {
3934 fail:
3935 CC_SRC = eflags & ~CC_Z;
3936 return 0;
3937 }
3938 }
3939 limit = get_seg_limit(e1, e2);
3940 CC_SRC = eflags | CC_Z;
3941 return limit;
3942}
3943
3944target_ulong helper_lar(target_ulong selector1)
3945{
3946 uint32_t e1, e2, eflags, selector;
3947 int rpl, dpl, cpl, type;
3948
3949 selector = selector1 & 0xffff;
3950 eflags = helper_cc_compute_all(CC_OP);
3951 if ((selector & 0xfffc) == 0)
3952 goto fail;
3953 if (load_segment(&e1, &e2, selector) != 0)
3954 goto fail;
3955 rpl = selector & 3;
3956 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3957 cpl = env->hflags & HF_CPL_MASK;
3958 if (e2 & DESC_S_MASK) {
3959 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3960 /* conforming */
3961 } else {
3962 if (dpl < cpl || dpl < rpl)
3963 goto fail;
3964 }
3965 } else {
3966 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3967 switch(type) {
3968 case 1:
3969 case 2:
3970 case 3:
3971 case 4:
3972 case 5:
3973 case 9:
3974 case 11:
3975 case 12:
3976 break;
3977 default:
3978 goto fail;
3979 }
3980 if (dpl < cpl || dpl < rpl) {
3981 fail:
3982 CC_SRC = eflags & ~CC_Z;
3983 return 0;
3984 }
3985 }
3986 CC_SRC = eflags | CC_Z;
3987 return e2 & 0x00f0ff00;
3988}
3989
3990void helper_verr(target_ulong selector1)
3991{
3992 uint32_t e1, e2, eflags, selector;
3993 int rpl, dpl, cpl;
3994
3995 selector = selector1 & 0xffff;
3996 eflags = helper_cc_compute_all(CC_OP);
3997 if ((selector & 0xfffc) == 0)
3998 goto fail;
3999 if (load_segment(&e1, &e2, selector) != 0)
4000 goto fail;
4001 if (!(e2 & DESC_S_MASK))
4002 goto fail;
4003 rpl = selector & 3;
4004 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4005 cpl = env->hflags & HF_CPL_MASK;
4006 if (e2 & DESC_CS_MASK) {
4007 if (!(e2 & DESC_R_MASK))
4008 goto fail;
4009 if (!(e2 & DESC_C_MASK)) {
4010 if (dpl < cpl || dpl < rpl)
4011 goto fail;
4012 }
4013 } else {
4014 if (dpl < cpl || dpl < rpl) {
4015 fail:
4016 CC_SRC = eflags & ~CC_Z;
4017 return;
4018 }
4019 }
4020 CC_SRC = eflags | CC_Z;
4021}
4022
4023void helper_verw(target_ulong selector1)
4024{
4025 uint32_t e1, e2, eflags, selector;
4026 int rpl, dpl, cpl;
4027
4028 selector = selector1 & 0xffff;
4029 eflags = helper_cc_compute_all(CC_OP);
4030 if ((selector & 0xfffc) == 0)
4031 goto fail;
4032 if (load_segment(&e1, &e2, selector) != 0)
4033 goto fail;
4034 if (!(e2 & DESC_S_MASK))
4035 goto fail;
4036 rpl = selector & 3;
4037 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4038 cpl = env->hflags & HF_CPL_MASK;
4039 if (e2 & DESC_CS_MASK) {
4040 goto fail;
4041 } else {
4042 if (dpl < cpl || dpl < rpl)
4043 goto fail;
4044 if (!(e2 & DESC_W_MASK)) {
4045 fail:
4046 CC_SRC = eflags & ~CC_Z;
4047 return;
4048 }
4049 }
4050 CC_SRC = eflags | CC_Z;
4051}
4052
4053/* x87 FPU helpers */
4054
4055static void fpu_set_exception(int mask)
4056{
4057 env->fpus |= mask;
4058 if (env->fpus & (~env->fpuc & FPUC_EM))
4059 env->fpus |= FPUS_SE | FPUS_B;
4060}
4061
4062static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4063{
4064 if (b == 0.0)
4065 fpu_set_exception(FPUS_ZE);
4066 return a / b;
4067}
4068
4069static void fpu_raise_exception(void)
4070{
4071 if (env->cr[0] & CR0_NE_MASK) {
4072 raise_exception(EXCP10_COPR);
4073 }
4074#if !defined(CONFIG_USER_ONLY)
4075 else {
4076 cpu_set_ferr(env);
4077 }
4078#endif
4079}
4080
4081void helper_flds_FT0(uint32_t val)
4082{
4083 union {
4084 float32 f;
4085 uint32_t i;
4086 } u;
4087 u.i = val;
4088 FT0 = float32_to_floatx(u.f, &env->fp_status);
4089}
4090
4091void helper_fldl_FT0(uint64_t val)
4092{
4093 union {
4094 float64 f;
4095 uint64_t i;
4096 } u;
4097 u.i = val;
4098 FT0 = float64_to_floatx(u.f, &env->fp_status);
4099}
4100
4101void helper_fildl_FT0(int32_t val)
4102{
4103 FT0 = int32_to_floatx(val, &env->fp_status);
4104}
4105
4106void helper_flds_ST0(uint32_t val)
4107{
4108 int new_fpstt;
4109 union {
4110 float32 f;
4111 uint32_t i;
4112 } u;
4113 new_fpstt = (env->fpstt - 1) & 7;
4114 u.i = val;
4115 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4116 env->fpstt = new_fpstt;
4117 env->fptags[new_fpstt] = 0; /* validate stack entry */
4118}
4119
4120void helper_fldl_ST0(uint64_t val)
4121{
4122 int new_fpstt;
4123 union {
4124 float64 f;
4125 uint64_t i;
4126 } u;
4127 new_fpstt = (env->fpstt - 1) & 7;
4128 u.i = val;
4129 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4130 env->fpstt = new_fpstt;
4131 env->fptags[new_fpstt] = 0; /* validate stack entry */
4132}
4133
4134void helper_fildl_ST0(int32_t val)
4135{
4136 int new_fpstt;
4137 new_fpstt = (env->fpstt - 1) & 7;
4138 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4139 env->fpstt = new_fpstt;
4140 env->fptags[new_fpstt] = 0; /* validate stack entry */
4141}
4142
4143void helper_fildll_ST0(int64_t val)
4144{
4145 int new_fpstt;
4146 new_fpstt = (env->fpstt - 1) & 7;
4147 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4148 env->fpstt = new_fpstt;
4149 env->fptags[new_fpstt] = 0; /* validate stack entry */
4150}
4151
4152#ifndef VBOX
4153uint32_t helper_fsts_ST0(void)
4154#else
4155RTCCUINTREG helper_fsts_ST0(void)
4156#endif
4157{
4158 union {
4159 float32 f;
4160 uint32_t i;
4161 } u;
4162 u.f = floatx_to_float32(ST0, &env->fp_status);
4163 return u.i;
4164}
4165
4166uint64_t helper_fstl_ST0(void)
4167{
4168 union {
4169 float64 f;
4170 uint64_t i;
4171 } u;
4172 u.f = floatx_to_float64(ST0, &env->fp_status);
4173 return u.i;
4174}
4175
4176#ifndef VBOX
4177int32_t helper_fist_ST0(void)
4178#else
4179RTCCINTREG helper_fist_ST0(void)
4180#endif
4181{
4182 int32_t val;
4183 val = floatx_to_int32(ST0, &env->fp_status);
4184 if (val != (int16_t)val)
4185 val = -32768;
4186 return val;
4187}
4188
4189#ifndef VBOX
4190int32_t helper_fistl_ST0(void)
4191#else
4192RTCCINTREG helper_fistl_ST0(void)
4193#endif
4194{
4195 int32_t val;
4196 val = floatx_to_int32(ST0, &env->fp_status);
4197 return val;
4198}
4199
4200int64_t helper_fistll_ST0(void)
4201{
4202 int64_t val;
4203 val = floatx_to_int64(ST0, &env->fp_status);
4204 return val;
4205}
4206
4207#ifndef VBOX
4208int32_t helper_fistt_ST0(void)
4209#else
4210RTCCINTREG helper_fistt_ST0(void)
4211#endif
4212{
4213 int32_t val;
4214 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4215 if (val != (int16_t)val)
4216 val = -32768;
4217 return val;
4218}
4219
4220#ifndef VBOX
4221int32_t helper_fisttl_ST0(void)
4222#else
4223RTCCINTREG helper_fisttl_ST0(void)
4224#endif
4225{
4226 int32_t val;
4227 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4228 return val;
4229}
4230
4231int64_t helper_fisttll_ST0(void)
4232{
4233 int64_t val;
4234 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4235 return val;
4236}
4237
4238void helper_fldt_ST0(target_ulong ptr)
4239{
4240 int new_fpstt;
4241 new_fpstt = (env->fpstt - 1) & 7;
4242 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4243 env->fpstt = new_fpstt;
4244 env->fptags[new_fpstt] = 0; /* validate stack entry */
4245}
4246
4247void helper_fstt_ST0(target_ulong ptr)
4248{
4249 helper_fstt(ST0, ptr);
4250}
4251
4252void helper_fpush(void)
4253{
4254 fpush();
4255}
4256
4257void helper_fpop(void)
4258{
4259 fpop();
4260}
4261
4262void helper_fdecstp(void)
4263{
4264 env->fpstt = (env->fpstt - 1) & 7;
4265 env->fpus &= (~0x4700);
4266}
4267
4268void helper_fincstp(void)
4269{
4270 env->fpstt = (env->fpstt + 1) & 7;
4271 env->fpus &= (~0x4700);
4272}
4273
4274/* FPU move */
4275
4276void helper_ffree_STN(int st_index)
4277{
4278 env->fptags[(env->fpstt + st_index) & 7] = 1;
4279}
4280
4281void helper_fmov_ST0_FT0(void)
4282{
4283 ST0 = FT0;
4284}
4285
4286void helper_fmov_FT0_STN(int st_index)
4287{
4288 FT0 = ST(st_index);
4289}
4290
4291void helper_fmov_ST0_STN(int st_index)
4292{
4293 ST0 = ST(st_index);
4294}
4295
4296void helper_fmov_STN_ST0(int st_index)
4297{
4298 ST(st_index) = ST0;
4299}
4300
4301void helper_fxchg_ST0_STN(int st_index)
4302{
4303 CPU86_LDouble tmp;
4304 tmp = ST(st_index);
4305 ST(st_index) = ST0;
4306 ST0 = tmp;
4307}
4308
4309/* FPU operations */
4310
4311static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4312
4313void helper_fcom_ST0_FT0(void)
4314{
4315 int ret;
4316
4317 ret = floatx_compare(ST0, FT0, &env->fp_status);
4318 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4319}
4320
4321void helper_fucom_ST0_FT0(void)
4322{
4323 int ret;
4324
4325 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4326 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4327}
4328
4329static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4330
4331void helper_fcomi_ST0_FT0(void)
4332{
4333 int eflags;
4334 int ret;
4335
4336 ret = floatx_compare(ST0, FT0, &env->fp_status);
4337 eflags = helper_cc_compute_all(CC_OP);
4338 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4339 CC_SRC = eflags;
4340}
4341
4342void helper_fucomi_ST0_FT0(void)
4343{
4344 int eflags;
4345 int ret;
4346
4347 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4348 eflags = helper_cc_compute_all(CC_OP);
4349 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4350 CC_SRC = eflags;
4351}
4352
4353void helper_fadd_ST0_FT0(void)
4354{
4355 ST0 += FT0;
4356}
4357
4358void helper_fmul_ST0_FT0(void)
4359{
4360 ST0 *= FT0;
4361}
4362
4363void helper_fsub_ST0_FT0(void)
4364{
4365 ST0 -= FT0;
4366}
4367
4368void helper_fsubr_ST0_FT0(void)
4369{
4370 ST0 = FT0 - ST0;
4371}
4372
4373void helper_fdiv_ST0_FT0(void)
4374{
4375 ST0 = helper_fdiv(ST0, FT0);
4376}
4377
4378void helper_fdivr_ST0_FT0(void)
4379{
4380 ST0 = helper_fdiv(FT0, ST0);
4381}
4382
4383/* fp operations between STN and ST0 */
4384
4385void helper_fadd_STN_ST0(int st_index)
4386{
4387 ST(st_index) += ST0;
4388}
4389
4390void helper_fmul_STN_ST0(int st_index)
4391{
4392 ST(st_index) *= ST0;
4393}
4394
4395void helper_fsub_STN_ST0(int st_index)
4396{
4397 ST(st_index) -= ST0;
4398}
4399
4400void helper_fsubr_STN_ST0(int st_index)
4401{
4402 CPU86_LDouble *p;
4403 p = &ST(st_index);
4404 *p = ST0 - *p;
4405}
4406
4407void helper_fdiv_STN_ST0(int st_index)
4408{
4409 CPU86_LDouble *p;
4410 p = &ST(st_index);
4411 *p = helper_fdiv(*p, ST0);
4412}
4413
4414void helper_fdivr_STN_ST0(int st_index)
4415{
4416 CPU86_LDouble *p;
4417 p = &ST(st_index);
4418 *p = helper_fdiv(ST0, *p);
4419}
4420
4421/* misc FPU operations */
4422void helper_fchs_ST0(void)
4423{
4424 ST0 = floatx_chs(ST0);
4425}
4426
4427void helper_fabs_ST0(void)
4428{
4429 ST0 = floatx_abs(ST0);
4430}
4431
4432void helper_fld1_ST0(void)
4433{
4434 ST0 = f15rk[1];
4435}
4436
4437void helper_fldl2t_ST0(void)
4438{
4439 ST0 = f15rk[6];
4440}
4441
4442void helper_fldl2e_ST0(void)
4443{
4444 ST0 = f15rk[5];
4445}
4446
4447void helper_fldpi_ST0(void)
4448{
4449 ST0 = f15rk[2];
4450}
4451
4452void helper_fldlg2_ST0(void)
4453{
4454 ST0 = f15rk[3];
4455}
4456
4457void helper_fldln2_ST0(void)
4458{
4459 ST0 = f15rk[4];
4460}
4461
4462void helper_fldz_ST0(void)
4463{
4464 ST0 = f15rk[0];
4465}
4466
4467void helper_fldz_FT0(void)
4468{
4469 FT0 = f15rk[0];
4470}
4471
4472#ifndef VBOX
4473uint32_t helper_fnstsw(void)
4474#else
4475RTCCUINTREG helper_fnstsw(void)
4476#endif
4477{
4478 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4479}
4480
4481#ifndef VBOX
4482uint32_t helper_fnstcw(void)
4483#else
4484RTCCUINTREG helper_fnstcw(void)
4485#endif
4486{
4487 return env->fpuc;
4488}
4489
4490static void update_fp_status(void)
4491{
4492 int rnd_type;
4493
4494 /* set rounding mode */
4495 switch(env->fpuc & RC_MASK) {
4496 default:
4497 case RC_NEAR:
4498 rnd_type = float_round_nearest_even;
4499 break;
4500 case RC_DOWN:
4501 rnd_type = float_round_down;
4502 break;
4503 case RC_UP:
4504 rnd_type = float_round_up;
4505 break;
4506 case RC_CHOP:
4507 rnd_type = float_round_to_zero;
4508 break;
4509 }
4510 set_float_rounding_mode(rnd_type, &env->fp_status);
4511#ifdef FLOATX80
4512 switch((env->fpuc >> 8) & 3) {
4513 case 0:
4514 rnd_type = 32;
4515 break;
4516 case 2:
4517 rnd_type = 64;
4518 break;
4519 case 3:
4520 default:
4521 rnd_type = 80;
4522 break;
4523 }
4524 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4525#endif
4526}
4527
4528void helper_fldcw(uint32_t val)
4529{
4530 env->fpuc = val;
4531 update_fp_status();
4532}
4533
4534void helper_fclex(void)
4535{
4536 env->fpus &= 0x7f00;
4537}
4538
4539void helper_fwait(void)
4540{
4541 if (env->fpus & FPUS_SE)
4542 fpu_raise_exception();
4543}
4544
4545void helper_fninit(void)
4546{
4547 env->fpus = 0;
4548 env->fpstt = 0;
4549 env->fpuc = 0x37f;
4550 env->fptags[0] = 1;
4551 env->fptags[1] = 1;
4552 env->fptags[2] = 1;
4553 env->fptags[3] = 1;
4554 env->fptags[4] = 1;
4555 env->fptags[5] = 1;
4556 env->fptags[6] = 1;
4557 env->fptags[7] = 1;
4558}
4559
4560/* BCD ops */
4561
4562void helper_fbld_ST0(target_ulong ptr)
4563{
4564 CPU86_LDouble tmp;
4565 uint64_t val;
4566 unsigned int v;
4567 int i;
4568
4569 val = 0;
4570 for(i = 8; i >= 0; i--) {
4571 v = ldub(ptr + i);
4572 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4573 }
4574 tmp = val;
4575 if (ldub(ptr + 9) & 0x80)
4576 tmp = -tmp;
4577 fpush();
4578 ST0 = tmp;
4579}
4580
4581void helper_fbst_ST0(target_ulong ptr)
4582{
4583 int v;
4584 target_ulong mem_ref, mem_end;
4585 int64_t val;
4586
4587 val = floatx_to_int64(ST0, &env->fp_status);
4588 mem_ref = ptr;
4589 mem_end = mem_ref + 9;
4590 if (val < 0) {
4591 stb(mem_end, 0x80);
4592 val = -val;
4593 } else {
4594 stb(mem_end, 0x00);
4595 }
4596 while (mem_ref < mem_end) {
4597 if (val == 0)
4598 break;
4599 v = val % 100;
4600 val = val / 100;
4601 v = ((v / 10) << 4) | (v % 10);
4602 stb(mem_ref++, v);
4603 }
4604 while (mem_ref < mem_end) {
4605 stb(mem_ref++, 0);
4606 }
4607}
4608
4609void helper_f2xm1(void)
4610{
4611 ST0 = pow(2.0,ST0) - 1.0;
4612}
4613
4614void helper_fyl2x(void)
4615{
4616 CPU86_LDouble fptemp;
4617
4618 fptemp = ST0;
4619 if (fptemp>0.0){
4620 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4621 ST1 *= fptemp;
4622 fpop();
4623 } else {
4624 env->fpus &= (~0x4700);
4625 env->fpus |= 0x400;
4626 }
4627}
4628
4629void helper_fptan(void)
4630{
4631 CPU86_LDouble fptemp;
4632
4633 fptemp = ST0;
4634 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4635 env->fpus |= 0x400;
4636 } else {
4637 ST0 = tan(fptemp);
4638 fpush();
4639 ST0 = 1.0;
4640 env->fpus &= (~0x400); /* C2 <-- 0 */
4641 /* the above code is for |arg| < 2**52 only */
4642 }
4643}
4644
4645void helper_fpatan(void)
4646{
4647 CPU86_LDouble fptemp, fpsrcop;
4648
4649 fpsrcop = ST1;
4650 fptemp = ST0;
4651 ST1 = atan2(fpsrcop,fptemp);
4652 fpop();
4653}
4654
4655void helper_fxtract(void)
4656{
4657 CPU86_LDoubleU temp;
4658 unsigned int expdif;
4659
4660 temp.d = ST0;
4661 expdif = EXPD(temp) - EXPBIAS;
4662 /*DP exponent bias*/
4663 ST0 = expdif;
4664 fpush();
4665 BIASEXPONENT(temp);
4666 ST0 = temp.d;
4667}
4668
4669void helper_fprem1(void)
4670{
4671 CPU86_LDouble dblq, fpsrcop, fptemp;
4672 CPU86_LDoubleU fpsrcop1, fptemp1;
4673 int expdif;
4674 signed long long int q;
4675
4676#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4677 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4678#else
4679 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4680#endif
4681 ST0 = 0.0 / 0.0; /* NaN */
4682 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4683 return;
4684 }
4685
4686 fpsrcop = ST0;
4687 fptemp = ST1;
4688 fpsrcop1.d = fpsrcop;
4689 fptemp1.d = fptemp;
4690 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4691
4692 if (expdif < 0) {
4693 /* optimisation? taken from the AMD docs */
4694 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4695 /* ST0 is unchanged */
4696 return;
4697 }
4698
4699 if (expdif < 53) {
4700 dblq = fpsrcop / fptemp;
4701 /* round dblq towards nearest integer */
4702 dblq = rint(dblq);
4703 ST0 = fpsrcop - fptemp * dblq;
4704
4705 /* convert dblq to q by truncating towards zero */
4706 if (dblq < 0.0)
4707 q = (signed long long int)(-dblq);
4708 else
4709 q = (signed long long int)dblq;
4710
4711 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4712 /* (C0,C3,C1) <-- (q2,q1,q0) */
4713 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4714 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4715 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4716 } else {
4717 env->fpus |= 0x400; /* C2 <-- 1 */
4718 fptemp = pow(2.0, expdif - 50);
4719 fpsrcop = (ST0 / ST1) / fptemp;
4720 /* fpsrcop = integer obtained by chopping */
4721 fpsrcop = (fpsrcop < 0.0) ?
4722 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4723 ST0 -= (ST1 * fpsrcop * fptemp);
4724 }
4725}
4726
4727void helper_fprem(void)
4728{
4729 CPU86_LDouble dblq, fpsrcop, fptemp;
4730 CPU86_LDoubleU fpsrcop1, fptemp1;
4731 int expdif;
4732 signed long long int q;
4733
4734#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4735 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4736#else
4737 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4738#endif
4739 ST0 = 0.0 / 0.0; /* NaN */
4740 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4741 return;
4742 }
4743
4744 fpsrcop = (CPU86_LDouble)ST0;
4745 fptemp = (CPU86_LDouble)ST1;
4746 fpsrcop1.d = fpsrcop;
4747 fptemp1.d = fptemp;
4748 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4749
4750 if (expdif < 0) {
4751 /* optimisation? taken from the AMD docs */
4752 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4753 /* ST0 is unchanged */
4754 return;
4755 }
4756
4757 if ( expdif < 53 ) {
4758 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4759 /* round dblq towards zero */
4760 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4761 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4762
4763 /* convert dblq to q by truncating towards zero */
4764 if (dblq < 0.0)
4765 q = (signed long long int)(-dblq);
4766 else
4767 q = (signed long long int)dblq;
4768
4769 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4770 /* (C0,C3,C1) <-- (q2,q1,q0) */
4771 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4772 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4773 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4774 } else {
4775 int N = 32 + (expdif % 32); /* as per AMD docs */
4776 env->fpus |= 0x400; /* C2 <-- 1 */
4777 fptemp = pow(2.0, (double)(expdif - N));
4778 fpsrcop = (ST0 / ST1) / fptemp;
4779 /* fpsrcop = integer obtained by chopping */
4780 fpsrcop = (fpsrcop < 0.0) ?
4781 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4782 ST0 -= (ST1 * fpsrcop * fptemp);
4783 }
4784}
4785
4786void helper_fyl2xp1(void)
4787{
4788 CPU86_LDouble fptemp;
4789
4790 fptemp = ST0;
4791 if ((fptemp+1.0)>0.0) {
4792 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4793 ST1 *= fptemp;
4794 fpop();
4795 } else {
4796 env->fpus &= (~0x4700);
4797 env->fpus |= 0x400;
4798 }
4799}
4800
4801void helper_fsqrt(void)
4802{
4803 CPU86_LDouble fptemp;
4804
4805 fptemp = ST0;
4806 if (fptemp<0.0) {
4807 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4808 env->fpus |= 0x400;
4809 }
4810 ST0 = sqrt(fptemp);
4811}
4812
4813void helper_fsincos(void)
4814{
4815 CPU86_LDouble fptemp;
4816
4817 fptemp = ST0;
4818 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4819 env->fpus |= 0x400;
4820 } else {
4821 ST0 = sin(fptemp);
4822 fpush();
4823 ST0 = cos(fptemp);
4824 env->fpus &= (~0x400); /* C2 <-- 0 */
4825 /* the above code is for |arg| < 2**63 only */
4826 }
4827}
4828
4829void helper_frndint(void)
4830{
4831 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4832}
4833
4834void helper_fscale(void)
4835{
4836 ST0 = ldexp (ST0, (int)(ST1));
4837}
4838
4839void helper_fsin(void)
4840{
4841 CPU86_LDouble fptemp;
4842
4843 fptemp = ST0;
4844 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4845 env->fpus |= 0x400;
4846 } else {
4847 ST0 = sin(fptemp);
4848 env->fpus &= (~0x400); /* C2 <-- 0 */
4849 /* the above code is for |arg| < 2**53 only */
4850 }
4851}
4852
4853void helper_fcos(void)
4854{
4855 CPU86_LDouble fptemp;
4856
4857 fptemp = ST0;
4858 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4859 env->fpus |= 0x400;
4860 } else {
4861 ST0 = cos(fptemp);
4862 env->fpus &= (~0x400); /* C2 <-- 0 */
4863 /* the above code is for |arg5 < 2**63 only */
4864 }
4865}
4866
4867void helper_fxam_ST0(void)
4868{
4869 CPU86_LDoubleU temp;
4870 int expdif;
4871
4872 temp.d = ST0;
4873
4874 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4875 if (SIGND(temp))
4876 env->fpus |= 0x200; /* C1 <-- 1 */
4877
4878 /* XXX: test fptags too */
4879 expdif = EXPD(temp);
4880 if (expdif == MAXEXPD) {
4881#ifdef USE_X86LDOUBLE
4882 if (MANTD(temp) == 0x8000000000000000ULL)
4883#else
4884 if (MANTD(temp) == 0)
4885#endif
4886 env->fpus |= 0x500 /*Infinity*/;
4887 else
4888 env->fpus |= 0x100 /*NaN*/;
4889 } else if (expdif == 0) {
4890 if (MANTD(temp) == 0)
4891 env->fpus |= 0x4000 /*Zero*/;
4892 else
4893 env->fpus |= 0x4400 /*Denormal*/;
4894 } else {
4895 env->fpus |= 0x400;
4896 }
4897}
4898
4899void helper_fstenv(target_ulong ptr, int data32)
4900{
4901 int fpus, fptag, exp, i;
4902 uint64_t mant;
4903 CPU86_LDoubleU tmp;
4904
4905 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4906 fptag = 0;
4907 for (i=7; i>=0; i--) {
4908 fptag <<= 2;
4909 if (env->fptags[i]) {
4910 fptag |= 3;
4911 } else {
4912 tmp.d = env->fpregs[i].d;
4913 exp = EXPD(tmp);
4914 mant = MANTD(tmp);
4915 if (exp == 0 && mant == 0) {
4916 /* zero */
4917 fptag |= 1;
4918 } else if (exp == 0 || exp == MAXEXPD
4919#ifdef USE_X86LDOUBLE
4920 || (mant & (1LL << 63)) == 0
4921#endif
4922 ) {
4923 /* NaNs, infinity, denormal */
4924 fptag |= 2;
4925 }
4926 }
4927 }
4928 if (data32) {
4929 /* 32 bit */
4930 stl(ptr, env->fpuc);
4931 stl(ptr + 4, fpus);
4932 stl(ptr + 8, fptag);
4933 stl(ptr + 12, 0); /* fpip */
4934 stl(ptr + 16, 0); /* fpcs */
4935 stl(ptr + 20, 0); /* fpoo */
4936 stl(ptr + 24, 0); /* fpos */
4937 } else {
4938 /* 16 bit */
4939 stw(ptr, env->fpuc);
4940 stw(ptr + 2, fpus);
4941 stw(ptr + 4, fptag);
4942 stw(ptr + 6, 0);
4943 stw(ptr + 8, 0);
4944 stw(ptr + 10, 0);
4945 stw(ptr + 12, 0);
4946 }
4947}
4948
4949void helper_fldenv(target_ulong ptr, int data32)
4950{
4951 int i, fpus, fptag;
4952
4953 if (data32) {
4954 env->fpuc = lduw(ptr);
4955 fpus = lduw(ptr + 4);
4956 fptag = lduw(ptr + 8);
4957 }
4958 else {
4959 env->fpuc = lduw(ptr);
4960 fpus = lduw(ptr + 2);
4961 fptag = lduw(ptr + 4);
4962 }
4963 env->fpstt = (fpus >> 11) & 7;
4964 env->fpus = fpus & ~0x3800;
4965 for(i = 0;i < 8; i++) {
4966 env->fptags[i] = ((fptag & 3) == 3);
4967 fptag >>= 2;
4968 }
4969}
4970
4971void helper_fsave(target_ulong ptr, int data32)
4972{
4973 CPU86_LDouble tmp;
4974 int i;
4975
4976 helper_fstenv(ptr, data32);
4977
4978 ptr += (14 << data32);
4979 for(i = 0;i < 8; i++) {
4980 tmp = ST(i);
4981 helper_fstt(tmp, ptr);
4982 ptr += 10;
4983 }
4984
4985 /* fninit */
4986 env->fpus = 0;
4987 env->fpstt = 0;
4988 env->fpuc = 0x37f;
4989 env->fptags[0] = 1;
4990 env->fptags[1] = 1;
4991 env->fptags[2] = 1;
4992 env->fptags[3] = 1;
4993 env->fptags[4] = 1;
4994 env->fptags[5] = 1;
4995 env->fptags[6] = 1;
4996 env->fptags[7] = 1;
4997}
4998
4999void helper_frstor(target_ulong ptr, int data32)
5000{
5001 CPU86_LDouble tmp;
5002 int i;
5003
5004 helper_fldenv(ptr, data32);
5005 ptr += (14 << data32);
5006
5007 for(i = 0;i < 8; i++) {
5008 tmp = helper_fldt(ptr);
5009 ST(i) = tmp;
5010 ptr += 10;
5011 }
5012}
5013
5014void helper_fxsave(target_ulong ptr, int data64)
5015{
5016 int fpus, fptag, i, nb_xmm_regs;
5017 CPU86_LDouble tmp;
5018 target_ulong addr;
5019
5020 /* The operand must be 16 byte aligned */
5021 if (ptr & 0xf) {
5022 raise_exception(EXCP0D_GPF);
5023 }
5024
5025 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5026 fptag = 0;
5027 for(i = 0; i < 8; i++) {
5028 fptag |= (env->fptags[i] << i);
5029 }
5030 stw(ptr, env->fpuc);
5031 stw(ptr + 2, fpus);
5032 stw(ptr + 4, fptag ^ 0xff);
5033#ifdef TARGET_X86_64
5034 if (data64) {
5035 stq(ptr + 0x08, 0); /* rip */
5036 stq(ptr + 0x10, 0); /* rdp */
5037 } else
5038#endif
5039 {
5040 stl(ptr + 0x08, 0); /* eip */
5041 stl(ptr + 0x0c, 0); /* sel */
5042 stl(ptr + 0x10, 0); /* dp */
5043 stl(ptr + 0x14, 0); /* sel */
5044 }
5045
5046 addr = ptr + 0x20;
5047 for(i = 0;i < 8; i++) {
5048 tmp = ST(i);
5049 helper_fstt(tmp, addr);
5050 addr += 16;
5051 }
5052
5053 if (env->cr[4] & CR4_OSFXSR_MASK) {
5054 /* XXX: finish it */
5055 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5056 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5057 if (env->hflags & HF_CS64_MASK)
5058 nb_xmm_regs = 16;
5059 else
5060 nb_xmm_regs = 8;
5061 addr = ptr + 0xa0;
5062 /* Fast FXSAVE leaves out the XMM registers */
5063 if (!(env->efer & MSR_EFER_FFXSR)
5064 || (env->hflags & HF_CPL_MASK)
5065 || !(env->hflags & HF_LMA_MASK)) {
5066 for(i = 0; i < nb_xmm_regs; i++) {
5067 stq(addr, env->xmm_regs[i].XMM_Q(0));
5068 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5069 addr += 16;
5070 }
5071 }
5072 }
5073}
5074
5075void helper_fxrstor(target_ulong ptr, int data64)
5076{
5077 int i, fpus, fptag, nb_xmm_regs;
5078 CPU86_LDouble tmp;
5079 target_ulong addr;
5080
5081 /* The operand must be 16 byte aligned */
5082 if (ptr & 0xf) {
5083 raise_exception(EXCP0D_GPF);
5084 }
5085
5086 env->fpuc = lduw(ptr);
5087 fpus = lduw(ptr + 2);
5088 fptag = lduw(ptr + 4);
5089 env->fpstt = (fpus >> 11) & 7;
5090 env->fpus = fpus & ~0x3800;
5091 fptag ^= 0xff;
5092 for(i = 0;i < 8; i++) {
5093 env->fptags[i] = ((fptag >> i) & 1);
5094 }
5095
5096 addr = ptr + 0x20;
5097 for(i = 0;i < 8; i++) {
5098 tmp = helper_fldt(addr);
5099 ST(i) = tmp;
5100 addr += 16;
5101 }
5102
5103 if (env->cr[4] & CR4_OSFXSR_MASK) {
5104 /* XXX: finish it */
5105 env->mxcsr = ldl(ptr + 0x18);
5106 //ldl(ptr + 0x1c);
5107 if (env->hflags & HF_CS64_MASK)
5108 nb_xmm_regs = 16;
5109 else
5110 nb_xmm_regs = 8;
5111 addr = ptr + 0xa0;
5112 /* Fast FXRESTORE leaves out the XMM registers */
5113 if (!(env->efer & MSR_EFER_FFXSR)
5114 || (env->hflags & HF_CPL_MASK)
5115 || !(env->hflags & HF_LMA_MASK)) {
5116 for(i = 0; i < nb_xmm_regs; i++) {
5117#if !defined(VBOX) || __GNUC__ < 4
5118 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5119 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5120#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5121# if 1
5122 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5123 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5124 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5125 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5126# else
5127 /* this works fine on Mac OS X, gcc 4.0.1 */
5128 uint64_t u64 = ldq(addr);
5129 env->xmm_regs[i].XMM_Q(0);
5130 u64 = ldq(addr + 4);
5131 env->xmm_regs[i].XMM_Q(1) = u64;
5132# endif
5133#endif
5134 addr += 16;
5135 }
5136 }
5137 }
5138}
5139
5140#ifndef USE_X86LDOUBLE
5141
5142void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5143{
5144 CPU86_LDoubleU temp;
5145 int e;
5146
5147 temp.d = f;
5148 /* mantissa */
5149 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5150 /* exponent + sign */
5151 e = EXPD(temp) - EXPBIAS + 16383;
5152 e |= SIGND(temp) >> 16;
5153 *pexp = e;
5154}
5155
5156CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5157{
5158 CPU86_LDoubleU temp;
5159 int e;
5160 uint64_t ll;
5161
5162 /* XXX: handle overflow ? */
5163 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5164 e |= (upper >> 4) & 0x800; /* sign */
5165 ll = (mant >> 11) & ((1LL << 52) - 1);
5166#ifdef __arm__
5167 temp.l.upper = (e << 20) | (ll >> 32);
5168 temp.l.lower = ll;
5169#else
5170 temp.ll = ll | ((uint64_t)e << 52);
5171#endif
5172 return temp.d;
5173}
5174
5175#else
5176
5177void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5178{
5179 CPU86_LDoubleU temp;
5180
5181 temp.d = f;
5182 *pmant = temp.l.lower;
5183 *pexp = temp.l.upper;
5184}
5185
5186CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5187{
5188 CPU86_LDoubleU temp;
5189
5190 temp.l.upper = upper;
5191 temp.l.lower = mant;
5192 return temp.d;
5193}
5194#endif
5195
5196#ifdef TARGET_X86_64
5197
5198//#define DEBUG_MULDIV
5199
5200static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5201{
5202 *plow += a;
5203 /* carry test */
5204 if (*plow < a)
5205 (*phigh)++;
5206 *phigh += b;
5207}
5208
5209static void neg128(uint64_t *plow, uint64_t *phigh)
5210{
5211 *plow = ~ *plow;
5212 *phigh = ~ *phigh;
5213 add128(plow, phigh, 1, 0);
5214}
5215
5216/* return TRUE if overflow */
5217static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5218{
5219 uint64_t q, r, a1, a0;
5220 int i, qb, ab;
5221
5222 a0 = *plow;
5223 a1 = *phigh;
5224 if (a1 == 0) {
5225 q = a0 / b;
5226 r = a0 % b;
5227 *plow = q;
5228 *phigh = r;
5229 } else {
5230 if (a1 >= b)
5231 return 1;
5232 /* XXX: use a better algorithm */
5233 for(i = 0; i < 64; i++) {
5234 ab = a1 >> 63;
5235 a1 = (a1 << 1) | (a0 >> 63);
5236 if (ab || a1 >= b) {
5237 a1 -= b;
5238 qb = 1;
5239 } else {
5240 qb = 0;
5241 }
5242 a0 = (a0 << 1) | qb;
5243 }
5244#if defined(DEBUG_MULDIV)
5245 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5246 *phigh, *plow, b, a0, a1);
5247#endif
5248 *plow = a0;
5249 *phigh = a1;
5250 }
5251 return 0;
5252}
5253
5254/* return TRUE if overflow */
5255static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5256{
5257 int sa, sb;
5258 sa = ((int64_t)*phigh < 0);
5259 if (sa)
5260 neg128(plow, phigh);
5261 sb = (b < 0);
5262 if (sb)
5263 b = -b;
5264 if (div64(plow, phigh, b) != 0)
5265 return 1;
5266 if (sa ^ sb) {
5267 if (*plow > (1ULL << 63))
5268 return 1;
5269 *plow = - *plow;
5270 } else {
5271 if (*plow >= (1ULL << 63))
5272 return 1;
5273 }
5274 if (sa)
5275 *phigh = - *phigh;
5276 return 0;
5277}
5278
5279void helper_mulq_EAX_T0(target_ulong t0)
5280{
5281 uint64_t r0, r1;
5282
5283 mulu64(&r0, &r1, EAX, t0);
5284 EAX = r0;
5285 EDX = r1;
5286 CC_DST = r0;
5287 CC_SRC = r1;
5288}
5289
5290void helper_imulq_EAX_T0(target_ulong t0)
5291{
5292 uint64_t r0, r1;
5293
5294 muls64(&r0, &r1, EAX, t0);
5295 EAX = r0;
5296 EDX = r1;
5297 CC_DST = r0;
5298 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5299}
5300
5301target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5302{
5303 uint64_t r0, r1;
5304
5305 muls64(&r0, &r1, t0, t1);
5306 CC_DST = r0;
5307 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5308 return r0;
5309}
5310
5311void helper_divq_EAX(target_ulong t0)
5312{
5313 uint64_t r0, r1;
5314 if (t0 == 0) {
5315 raise_exception(EXCP00_DIVZ);
5316 }
5317 r0 = EAX;
5318 r1 = EDX;
5319 if (div64(&r0, &r1, t0))
5320 raise_exception(EXCP00_DIVZ);
5321 EAX = r0;
5322 EDX = r1;
5323}
5324
5325void helper_idivq_EAX(target_ulong t0)
5326{
5327 uint64_t r0, r1;
5328 if (t0 == 0) {
5329 raise_exception(EXCP00_DIVZ);
5330 }
5331 r0 = EAX;
5332 r1 = EDX;
5333 if (idiv64(&r0, &r1, t0))
5334 raise_exception(EXCP00_DIVZ);
5335 EAX = r0;
5336 EDX = r1;
5337}
5338#endif
5339
5340static void do_hlt(void)
5341{
5342 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5343 env->halted = 1;
5344 env->exception_index = EXCP_HLT;
5345 cpu_loop_exit();
5346}
5347
5348void helper_hlt(int next_eip_addend)
5349{
5350 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5351 EIP += next_eip_addend;
5352
5353 do_hlt();
5354}
5355
5356void helper_monitor(target_ulong ptr)
5357{
5358#ifdef VBOX
5359 if ((uint32_t)ECX > 1)
5360 raise_exception(EXCP0D_GPF);
5361#else /* !VBOX */
5362 if ((uint32_t)ECX != 0)
5363 raise_exception(EXCP0D_GPF);
5364#endif /* !VBOX */
5365 /* XXX: store address ? */
5366 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5367}
5368
5369void helper_mwait(int next_eip_addend)
5370{
5371 if ((uint32_t)ECX != 0)
5372 raise_exception(EXCP0D_GPF);
5373#ifdef VBOX
5374 helper_hlt(next_eip_addend);
5375#else /* !VBOX */
5376 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5377 EIP += next_eip_addend;
5378
5379 /* XXX: not complete but not completely erroneous */
5380 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5381 /* more than one CPU: do not sleep because another CPU may
5382 wake this one */
5383 } else {
5384 do_hlt();
5385 }
5386#endif /* !VBOX */
5387}
5388
5389void helper_debug(void)
5390{
5391 env->exception_index = EXCP_DEBUG;
5392 cpu_loop_exit();
5393}
5394
5395void helper_reset_rf(void)
5396{
5397 env->eflags &= ~RF_MASK;
5398}
5399
5400void helper_raise_interrupt(int intno, int next_eip_addend)
5401{
5402 raise_interrupt(intno, 1, 0, next_eip_addend);
5403}
5404
5405void helper_raise_exception(int exception_index)
5406{
5407 raise_exception(exception_index);
5408}
5409
5410void helper_cli(void)
5411{
5412 env->eflags &= ~IF_MASK;
5413}
5414
5415void helper_sti(void)
5416{
5417 env->eflags |= IF_MASK;
5418}
5419
5420#ifdef VBOX
5421void helper_cli_vme(void)
5422{
5423 env->eflags &= ~VIF_MASK;
5424}
5425
5426void helper_sti_vme(void)
5427{
5428 /* First check, then change eflags according to the AMD manual */
5429 if (env->eflags & VIP_MASK) {
5430 raise_exception(EXCP0D_GPF);
5431 }
5432 env->eflags |= VIF_MASK;
5433}
5434#endif /* VBOX */
5435
5436#if 0
5437/* vm86plus instructions */
5438void helper_cli_vm(void)
5439{
5440 env->eflags &= ~VIF_MASK;
5441}
5442
5443void helper_sti_vm(void)
5444{
5445 env->eflags |= VIF_MASK;
5446 if (env->eflags & VIP_MASK) {
5447 raise_exception(EXCP0D_GPF);
5448 }
5449}
5450#endif
5451
5452void helper_set_inhibit_irq(void)
5453{
5454 env->hflags |= HF_INHIBIT_IRQ_MASK;
5455}
5456
5457void helper_reset_inhibit_irq(void)
5458{
5459 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5460}
5461
5462void helper_boundw(target_ulong a0, int v)
5463{
5464 int low, high;
5465 low = ldsw(a0);
5466 high = ldsw(a0 + 2);
5467 v = (int16_t)v;
5468 if (v < low || v > high) {
5469 raise_exception(EXCP05_BOUND);
5470 }
5471}
5472
5473void helper_boundl(target_ulong a0, int v)
5474{
5475 int low, high;
5476 low = ldl(a0);
5477 high = ldl(a0 + 4);
5478 if (v < low || v > high) {
5479 raise_exception(EXCP05_BOUND);
5480 }
5481}
5482
5483static float approx_rsqrt(float a)
5484{
5485 return 1.0 / sqrt(a);
5486}
5487
5488static float approx_rcp(float a)
5489{
5490 return 1.0 / a;
5491}
5492
5493#if !defined(CONFIG_USER_ONLY)
5494
5495#define MMUSUFFIX _mmu
5496
5497#define SHIFT 0
5498#include "softmmu_template.h"
5499
5500#define SHIFT 1
5501#include "softmmu_template.h"
5502
5503#define SHIFT 2
5504#include "softmmu_template.h"
5505
5506#define SHIFT 3
5507#include "softmmu_template.h"
5508
5509#endif
5510
5511#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5512/* This code assumes real physical address always fit into host CPU reg,
5513 which is wrong in general, but true for our current use cases. */
5514RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5515{
5516 return remR3PhysReadS8(addr);
5517}
5518RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5519{
5520 return remR3PhysReadU8(addr);
5521}
5522void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5523{
5524 remR3PhysWriteU8(addr, val);
5525}
5526RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5527{
5528 return remR3PhysReadS16(addr);
5529}
5530RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5531{
5532 return remR3PhysReadU16(addr);
5533}
5534void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5535{
5536 remR3PhysWriteU16(addr, val);
5537}
5538RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5539{
5540 return remR3PhysReadS32(addr);
5541}
5542RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5543{
5544 return remR3PhysReadU32(addr);
5545}
5546void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5547{
5548 remR3PhysWriteU32(addr, val);
5549}
5550uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5551{
5552 return remR3PhysReadU64(addr);
5553}
5554void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5555{
5556 remR3PhysWriteU64(addr, val);
5557}
5558#endif /* VBOX */
5559
5560#if !defined(CONFIG_USER_ONLY)
5561/* try to fill the TLB and return an exception if error. If retaddr is
5562 NULL, it means that the function was called in C code (i.e. not
5563 from generated code or from helper.c) */
5564/* XXX: fix it to restore all registers */
5565void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5566{
5567 TranslationBlock *tb;
5568 int ret;
5569 unsigned long pc;
5570 CPUX86State *saved_env;
5571
5572 /* XXX: hack to restore env in all cases, even if not called from
5573 generated code */
5574 saved_env = env;
5575 env = cpu_single_env;
5576
5577 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5578 if (ret) {
5579 if (retaddr) {
5580 /* now we have a real cpu fault */
5581 pc = (unsigned long)retaddr;
5582 tb = tb_find_pc(pc);
5583 if (tb) {
5584 /* the PC is inside the translated code. It means that we have
5585 a virtual CPU fault */
5586 cpu_restore_state(tb, env, pc, NULL);
5587 }
5588 }
5589 raise_exception_err(env->exception_index, env->error_code);
5590 }
5591 env = saved_env;
5592}
5593#endif
5594
5595#ifdef VBOX
5596
5597/**
5598 * Correctly computes the eflags.
5599 * @returns eflags.
5600 * @param env1 CPU environment.
5601 */
5602uint32_t raw_compute_eflags(CPUX86State *env1)
5603{
5604 CPUX86State *savedenv = env;
5605 uint32_t efl;
5606 env = env1;
5607 efl = compute_eflags();
5608 env = savedenv;
5609 return efl;
5610}
5611
5612/**
5613 * Reads byte from virtual address in guest memory area.
5614 * XXX: is it working for any addresses? swapped out pages?
5615 * @returns read data byte.
5616 * @param env1 CPU environment.
5617 * @param pvAddr GC Virtual address.
5618 */
5619uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5620{
5621 CPUX86State *savedenv = env;
5622 uint8_t u8;
5623 env = env1;
5624 u8 = ldub_kernel(addr);
5625 env = savedenv;
5626 return u8;
5627}
5628
5629/**
5630 * Reads byte from virtual address in guest memory area.
5631 * XXX: is it working for any addresses? swapped out pages?
5632 * @returns read data byte.
5633 * @param env1 CPU environment.
5634 * @param pvAddr GC Virtual address.
5635 */
5636uint16_t read_word(CPUX86State *env1, target_ulong addr)
5637{
5638 CPUX86State *savedenv = env;
5639 uint16_t u16;
5640 env = env1;
5641 u16 = lduw_kernel(addr);
5642 env = savedenv;
5643 return u16;
5644}
5645
5646/**
5647 * Reads byte from virtual address in guest memory area.
5648 * XXX: is it working for any addresses? swapped out pages?
5649 * @returns read data byte.
5650 * @param env1 CPU environment.
5651 * @param pvAddr GC Virtual address.
5652 */
5653uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5654{
5655 CPUX86State *savedenv = env;
5656 uint32_t u32;
5657 env = env1;
5658 u32 = ldl_kernel(addr);
5659 env = savedenv;
5660 return u32;
5661}
5662
5663/**
5664 * Writes byte to virtual address in guest memory area.
5665 * XXX: is it working for any addresses? swapped out pages?
5666 * @returns read data byte.
5667 * @param env1 CPU environment.
5668 * @param pvAddr GC Virtual address.
5669 * @param val byte value
5670 */
5671void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5672{
5673 CPUX86State *savedenv = env;
5674 env = env1;
5675 stb(addr, val);
5676 env = savedenv;
5677}
5678
5679void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5680{
5681 CPUX86State *savedenv = env;
5682 env = env1;
5683 stw(addr, val);
5684 env = savedenv;
5685}
5686
5687void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5688{
5689 CPUX86State *savedenv = env;
5690 env = env1;
5691 stl(addr, val);
5692 env = savedenv;
5693}
5694
5695/**
5696 * Correctly loads selector into segment register with updating internal
5697 * qemu data/caches.
5698 * @param env1 CPU environment.
5699 * @param seg_reg Segment register.
5700 * @param selector Selector to load.
5701 */
5702void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5703{
5704 CPUX86State *savedenv = env;
5705#ifdef FORCE_SEGMENT_SYNC
5706 jmp_buf old_buf;
5707#endif
5708
5709 env = env1;
5710
5711 if ( env->eflags & X86_EFL_VM
5712 || !(env->cr[0] & X86_CR0_PE))
5713 {
5714 load_seg_vm(seg_reg, selector);
5715
5716 env = savedenv;
5717
5718 /* Successful sync. */
5719 env1->segs[seg_reg].newselector = 0;
5720 }
5721 else
5722 {
5723 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5724 time critical - let's not do that */
5725#ifdef FORCE_SEGMENT_SYNC
5726 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5727#endif
5728 if (setjmp(env1->jmp_env) == 0)
5729 {
5730 if (seg_reg == R_CS)
5731 {
5732 uint32_t e1, e2;
5733 e1 = e2 = 0;
5734 load_segment(&e1, &e2, selector);
5735 cpu_x86_load_seg_cache(env, R_CS, selector,
5736 get_seg_base(e1, e2),
5737 get_seg_limit(e1, e2),
5738 e2);
5739 }
5740 else
5741 helper_load_seg(seg_reg, selector);
5742 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5743 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5744
5745 env = savedenv;
5746
5747 /* Successful sync. */
5748 env1->segs[seg_reg].newselector = 0;
5749 }
5750 else
5751 {
5752 env = savedenv;
5753
5754 /* Postpone sync until the guest uses the selector. */
5755 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5756 env1->segs[seg_reg].newselector = selector;
5757 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5758 env1->exception_index = -1;
5759 env1->error_code = 0;
5760 env1->old_exception = -1;
5761 }
5762#ifdef FORCE_SEGMENT_SYNC
5763 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5764#endif
5765 }
5766
5767}
5768
5769DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5770{
5771 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5772}
5773
5774
5775int emulate_single_instr(CPUX86State *env1)
5776{
5777 TranslationBlock *tb;
5778 TranslationBlock *current;
5779 int flags;
5780 uint8_t *tc_ptr;
5781 target_ulong old_eip;
5782
5783 /* ensures env is loaded! */
5784 CPUX86State *savedenv = env;
5785 env = env1;
5786
5787 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5788
5789 current = env->current_tb;
5790 env->current_tb = NULL;
5791 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5792
5793 /*
5794 * Translate only one instruction.
5795 */
5796 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5797 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5798 env->segs[R_CS].base, flags, 0);
5799
5800 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5801
5802
5803 /* tb_link_phys: */
5804 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5805 tb->jmp_next[0] = NULL;
5806 tb->jmp_next[1] = NULL;
5807 Assert(tb->jmp_next[0] == NULL);
5808 Assert(tb->jmp_next[1] == NULL);
5809 if (tb->tb_next_offset[0] != 0xffff)
5810 tb_reset_jump(tb, 0);
5811 if (tb->tb_next_offset[1] != 0xffff)
5812 tb_reset_jump(tb, 1);
5813
5814 /*
5815 * Execute it using emulation
5816 */
5817 old_eip = env->eip;
5818 env->current_tb = tb;
5819
5820 /*
5821 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5822 * perhaps not a very safe hack
5823 */
5824 while (old_eip == env->eip)
5825 {
5826 tc_ptr = tb->tc_ptr;
5827
5828#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5829 int fake_ret;
5830 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5831#else
5832 tcg_qemu_tb_exec(tc_ptr);
5833#endif
5834
5835 /*
5836 * Exit once we detect an external interrupt and interrupts are enabled
5837 */
5838 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER))
5839 || ( (env->eflags & IF_MASK)
5840 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
5841 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
5842 )
5843 {
5844 break;
5845 }
5846 }
5847 env->current_tb = current;
5848
5849 tb_phys_invalidate(tb, -1);
5850 tb_free(tb);
5851/*
5852 Assert(tb->tb_next_offset[0] == 0xffff);
5853 Assert(tb->tb_next_offset[1] == 0xffff);
5854 Assert(tb->tb_next[0] == 0xffff);
5855 Assert(tb->tb_next[1] == 0xffff);
5856 Assert(tb->jmp_next[0] == NULL);
5857 Assert(tb->jmp_next[1] == NULL);
5858 Assert(tb->jmp_first == NULL); */
5859
5860 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5861
5862 /*
5863 * Execute the next instruction when we encounter instruction fusing.
5864 */
5865 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5866 {
5867 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5868 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5869 emulate_single_instr(env);
5870 }
5871
5872 env = savedenv;
5873 return 0;
5874}
5875
5876/**
5877 * Correctly loads a new ldtr selector.
5878 *
5879 * @param env1 CPU environment.
5880 * @param selector Selector to load.
5881 */
5882void sync_ldtr(CPUX86State *env1, int selector)
5883{
5884 CPUX86State *saved_env = env;
5885 if (setjmp(env1->jmp_env) == 0)
5886 {
5887 env = env1;
5888 helper_lldt(selector);
5889 env = saved_env;
5890 }
5891 else
5892 {
5893 env = saved_env;
5894#ifdef VBOX_STRICT
5895 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5896#endif
5897 }
5898}
5899
5900int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5901 uint32_t *esp_ptr, int dpl)
5902{
5903 int type, index, shift;
5904
5905 CPUX86State *savedenv = env;
5906 env = env1;
5907
5908 if (!(env->tr.flags & DESC_P_MASK))
5909 cpu_abort(env, "invalid tss");
5910 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5911 if ((type & 7) != 1)
5912 cpu_abort(env, "invalid tss type %d", type);
5913 shift = type >> 3;
5914 index = (dpl * 4 + 2) << shift;
5915 if (index + (4 << shift) - 1 > env->tr.limit)
5916 {
5917 env = savedenv;
5918 return 0;
5919 }
5920 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5921
5922 if (shift == 0) {
5923 *esp_ptr = lduw_kernel(env->tr.base + index);
5924 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5925 } else {
5926 *esp_ptr = ldl_kernel(env->tr.base + index);
5927 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5928 }
5929
5930 env = savedenv;
5931 return 1;
5932}
5933
5934//*****************************************************************************
5935// Needs to be at the bottom of the file (overriding macros)
5936
5937static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5938{
5939 return *(CPU86_LDouble *)ptr;
5940}
5941
5942static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5943{
5944 *(CPU86_LDouble *)ptr = f;
5945}
5946
5947#undef stw
5948#undef stl
5949#undef stq
5950#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5951#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5952#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5953
5954//*****************************************************************************
5955void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5956{
5957 int fpus, fptag, i, nb_xmm_regs;
5958 CPU86_LDouble tmp;
5959 uint8_t *addr;
5960 int data64 = !!(env->hflags & HF_LMA_MASK);
5961
5962 if (env->cpuid_features & CPUID_FXSR)
5963 {
5964 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5965 fptag = 0;
5966 for(i = 0; i < 8; i++) {
5967 fptag |= (env->fptags[i] << i);
5968 }
5969 stw(ptr, env->fpuc);
5970 stw(ptr + 2, fpus);
5971 stw(ptr + 4, fptag ^ 0xff);
5972
5973 addr = ptr + 0x20;
5974 for(i = 0;i < 8; i++) {
5975 tmp = ST(i);
5976 helper_fstt_raw(tmp, addr);
5977 addr += 16;
5978 }
5979
5980 if (env->cr[4] & CR4_OSFXSR_MASK) {
5981 /* XXX: finish it */
5982 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5983 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5984 nb_xmm_regs = 8 << data64;
5985 addr = ptr + 0xa0;
5986 for(i = 0; i < nb_xmm_regs; i++) {
5987#if __GNUC__ < 4
5988 stq(addr, env->xmm_regs[i].XMM_Q(0));
5989 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5990#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5991 stl(addr, env->xmm_regs[i].XMM_L(0));
5992 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5993 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5994 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5995#endif
5996 addr += 16;
5997 }
5998 }
5999 }
6000 else
6001 {
6002 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6003 int fptag;
6004
6005 fp->FCW = env->fpuc;
6006 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6007 fptag = 0;
6008 for (i=7; i>=0; i--) {
6009 fptag <<= 2;
6010 if (env->fptags[i]) {
6011 fptag |= 3;
6012 } else {
6013 /* the FPU automatically computes it */
6014 }
6015 }
6016 fp->FTW = fptag;
6017
6018 for(i = 0;i < 8; i++) {
6019 tmp = ST(i);
6020 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6021 }
6022 }
6023}
6024
6025//*****************************************************************************
6026#undef lduw
6027#undef ldl
6028#undef ldq
6029#define lduw(a) *(uint16_t *)(a)
6030#define ldl(a) *(uint32_t *)(a)
6031#define ldq(a) *(uint64_t *)(a)
6032//*****************************************************************************
6033void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6034{
6035 int i, fpus, fptag, nb_xmm_regs;
6036 CPU86_LDouble tmp;
6037 uint8_t *addr;
6038 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6039
6040 if (env->cpuid_features & CPUID_FXSR)
6041 {
6042 env->fpuc = lduw(ptr);
6043 fpus = lduw(ptr + 2);
6044 fptag = lduw(ptr + 4);
6045 env->fpstt = (fpus >> 11) & 7;
6046 env->fpus = fpus & ~0x3800;
6047 fptag ^= 0xff;
6048 for(i = 0;i < 8; i++) {
6049 env->fptags[i] = ((fptag >> i) & 1);
6050 }
6051
6052 addr = ptr + 0x20;
6053 for(i = 0;i < 8; i++) {
6054 tmp = helper_fldt_raw(addr);
6055 ST(i) = tmp;
6056 addr += 16;
6057 }
6058
6059 if (env->cr[4] & CR4_OSFXSR_MASK) {
6060 /* XXX: finish it, endianness */
6061 env->mxcsr = ldl(ptr + 0x18);
6062 //ldl(ptr + 0x1c);
6063 nb_xmm_regs = 8 << data64;
6064 addr = ptr + 0xa0;
6065 for(i = 0; i < nb_xmm_regs; i++) {
6066#if HC_ARCH_BITS == 32
6067 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6068 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6069 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6070 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6071 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6072#else
6073 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6074 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6075#endif
6076 addr += 16;
6077 }
6078 }
6079 }
6080 else
6081 {
6082 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6083 int fptag, j;
6084
6085 env->fpuc = fp->FCW;
6086 env->fpstt = (fp->FSW >> 11) & 7;
6087 env->fpus = fp->FSW & ~0x3800;
6088 fptag = fp->FTW;
6089 for(i = 0;i < 8; i++) {
6090 env->fptags[i] = ((fptag & 3) == 3);
6091 fptag >>= 2;
6092 }
6093 j = env->fpstt;
6094 for(i = 0;i < 8; i++) {
6095 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6096 ST(i) = tmp;
6097 }
6098 }
6099}
6100//*****************************************************************************
6101//*****************************************************************************
6102
6103#endif /* VBOX */
6104
6105/* Secure Virtual Machine helpers */
6106
6107#if defined(CONFIG_USER_ONLY)
6108
6109void helper_vmrun(int aflag, int next_eip_addend)
6110{
6111}
6112void helper_vmmcall(void)
6113{
6114}
6115void helper_vmload(int aflag)
6116{
6117}
6118void helper_vmsave(int aflag)
6119{
6120}
6121void helper_stgi(void)
6122{
6123}
6124void helper_clgi(void)
6125{
6126}
6127void helper_skinit(void)
6128{
6129}
6130void helper_invlpga(int aflag)
6131{
6132}
6133void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6134{
6135}
6136void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6137{
6138}
6139
6140void helper_svm_check_io(uint32_t port, uint32_t param,
6141 uint32_t next_eip_addend)
6142{
6143}
6144#else
6145
6146static inline void svm_save_seg(target_phys_addr_t addr,
6147 const SegmentCache *sc)
6148{
6149 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6150 sc->selector);
6151 stq_phys(addr + offsetof(struct vmcb_seg, base),
6152 sc->base);
6153 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6154 sc->limit);
6155 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6156 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6157}
6158
6159static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6160{
6161 unsigned int flags;
6162
6163 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6164 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6165 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6166 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6167 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6168}
6169
6170static inline void svm_load_seg_cache(target_phys_addr_t addr,
6171 CPUState *env, int seg_reg)
6172{
6173 SegmentCache sc1, *sc = &sc1;
6174 svm_load_seg(addr, sc);
6175 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6176 sc->base, sc->limit, sc->flags);
6177}
6178
6179void helper_vmrun(int aflag, int next_eip_addend)
6180{
6181 target_ulong addr;
6182 uint32_t event_inj;
6183 uint32_t int_ctl;
6184
6185 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6186
6187 if (aflag == 2)
6188 addr = EAX;
6189 else
6190 addr = (uint32_t)EAX;
6191
6192 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6193
6194 env->vm_vmcb = addr;
6195
6196 /* save the current CPU state in the hsave page */
6197 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6198 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6199
6200 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6201 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6202
6203 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6204 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6205 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6206 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6207 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6208 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6209
6210 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6211 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6212
6213 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6214 &env->segs[R_ES]);
6215 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6216 &env->segs[R_CS]);
6217 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6218 &env->segs[R_SS]);
6219 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6220 &env->segs[R_DS]);
6221
6222 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6223 EIP + next_eip_addend);
6224 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6225 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6226
6227 /* load the interception bitmaps so we do not need to access the
6228 vmcb in svm mode */
6229 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6230 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6231 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6232 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6233 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6234 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6235
6236 /* enable intercepts */
6237 env->hflags |= HF_SVMI_MASK;
6238
6239 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6240
6241 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6242 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6243
6244 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6245 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6246
6247 /* clear exit_info_2 so we behave like the real hardware */
6248 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6249
6250 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6251 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6252 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6253 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6254 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6255 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6256 if (int_ctl & V_INTR_MASKING_MASK) {
6257 env->v_tpr = int_ctl & V_TPR_MASK;
6258 env->hflags2 |= HF2_VINTR_MASK;
6259 if (env->eflags & IF_MASK)
6260 env->hflags2 |= HF2_HIF_MASK;
6261 }
6262
6263 cpu_load_efer(env,
6264 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6265 env->eflags = 0;
6266 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6267 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6268 CC_OP = CC_OP_EFLAGS;
6269
6270 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6271 env, R_ES);
6272 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6273 env, R_CS);
6274 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6275 env, R_SS);
6276 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6277 env, R_DS);
6278
6279 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6280 env->eip = EIP;
6281 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6282 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6283 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6284 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6285 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6286
6287 /* FIXME: guest state consistency checks */
6288
6289 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6290 case TLB_CONTROL_DO_NOTHING:
6291 break;
6292 case TLB_CONTROL_FLUSH_ALL_ASID:
6293 /* FIXME: this is not 100% correct but should work for now */
6294 tlb_flush(env, 1);
6295 break;
6296 }
6297
6298 env->hflags2 |= HF2_GIF_MASK;
6299
6300 if (int_ctl & V_IRQ_MASK) {
6301 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6302 }
6303
6304 /* maybe we need to inject an event */
6305 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6306 if (event_inj & SVM_EVTINJ_VALID) {
6307 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6308 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6309 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6310
6311 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6312 /* FIXME: need to implement valid_err */
6313 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6314 case SVM_EVTINJ_TYPE_INTR:
6315 env->exception_index = vector;
6316 env->error_code = event_inj_err;
6317 env->exception_is_int = 0;
6318 env->exception_next_eip = -1;
6319 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6320 /* XXX: is it always correct ? */
6321 do_interrupt(vector, 0, 0, 0, 1);
6322 break;
6323 case SVM_EVTINJ_TYPE_NMI:
6324 env->exception_index = EXCP02_NMI;
6325 env->error_code = event_inj_err;
6326 env->exception_is_int = 0;
6327 env->exception_next_eip = EIP;
6328 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6329 cpu_loop_exit();
6330 break;
6331 case SVM_EVTINJ_TYPE_EXEPT:
6332 env->exception_index = vector;
6333 env->error_code = event_inj_err;
6334 env->exception_is_int = 0;
6335 env->exception_next_eip = -1;
6336 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6337 cpu_loop_exit();
6338 break;
6339 case SVM_EVTINJ_TYPE_SOFT:
6340 env->exception_index = vector;
6341 env->error_code = event_inj_err;
6342 env->exception_is_int = 1;
6343 env->exception_next_eip = EIP;
6344 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6345 cpu_loop_exit();
6346 break;
6347 }
6348 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6349 }
6350}
6351
6352void helper_vmmcall(void)
6353{
6354 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6355 raise_exception(EXCP06_ILLOP);
6356}
6357
6358void helper_vmload(int aflag)
6359{
6360 target_ulong addr;
6361 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6362
6363 if (aflag == 2)
6364 addr = EAX;
6365 else
6366 addr = (uint32_t)EAX;
6367
6368 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6369 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6370 env->segs[R_FS].base);
6371
6372 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6373 env, R_FS);
6374 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6375 env, R_GS);
6376 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6377 &env->tr);
6378 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6379 &env->ldt);
6380
6381#ifdef TARGET_X86_64
6382 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6383 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6384 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6385 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6386#endif
6387 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6388 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6389 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6390 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6391}
6392
6393void helper_vmsave(int aflag)
6394{
6395 target_ulong addr;
6396 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6397
6398 if (aflag == 2)
6399 addr = EAX;
6400 else
6401 addr = (uint32_t)EAX;
6402
6403 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6404 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6405 env->segs[R_FS].base);
6406
6407 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6408 &env->segs[R_FS]);
6409 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6410 &env->segs[R_GS]);
6411 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6412 &env->tr);
6413 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6414 &env->ldt);
6415
6416#ifdef TARGET_X86_64
6417 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6418 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6419 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6420 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6421#endif
6422 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6423 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6424 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6425 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6426}
6427
6428void helper_stgi(void)
6429{
6430 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6431 env->hflags2 |= HF2_GIF_MASK;
6432}
6433
6434void helper_clgi(void)
6435{
6436 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6437 env->hflags2 &= ~HF2_GIF_MASK;
6438}
6439
6440void helper_skinit(void)
6441{
6442 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6443 /* XXX: not implemented */
6444 raise_exception(EXCP06_ILLOP);
6445}
6446
6447void helper_invlpga(int aflag)
6448{
6449 target_ulong addr;
6450 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6451
6452 if (aflag == 2)
6453 addr = EAX;
6454 else
6455 addr = (uint32_t)EAX;
6456
6457 /* XXX: could use the ASID to see if it is needed to do the
6458 flush */
6459 tlb_flush_page(env, addr);
6460}
6461
6462void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6463{
6464 if (likely(!(env->hflags & HF_SVMI_MASK)))
6465 return;
6466#ifndef VBOX
6467 switch(type) {
6468 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6469 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6470 helper_vmexit(type, param);
6471 }
6472 break;
6473 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6474 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6475 helper_vmexit(type, param);
6476 }
6477 break;
6478 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6479 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6480 helper_vmexit(type, param);
6481 }
6482 break;
6483 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6484 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6485 helper_vmexit(type, param);
6486 }
6487 break;
6488 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6489 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6490 helper_vmexit(type, param);
6491 }
6492 break;
6493 case SVM_EXIT_MSR:
6494 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6495 /* FIXME: this should be read in at vmrun (faster this way?) */
6496 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6497 uint32_t t0, t1;
6498 switch((uint32_t)ECX) {
6499 case 0 ... 0x1fff:
6500 t0 = (ECX * 2) % 8;
6501 t1 = ECX / 8;
6502 break;
6503 case 0xc0000000 ... 0xc0001fff:
6504 t0 = (8192 + ECX - 0xc0000000) * 2;
6505 t1 = (t0 / 8);
6506 t0 %= 8;
6507 break;
6508 case 0xc0010000 ... 0xc0011fff:
6509 t0 = (16384 + ECX - 0xc0010000) * 2;
6510 t1 = (t0 / 8);
6511 t0 %= 8;
6512 break;
6513 default:
6514 helper_vmexit(type, param);
6515 t0 = 0;
6516 t1 = 0;
6517 break;
6518 }
6519 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6520 helper_vmexit(type, param);
6521 }
6522 break;
6523 default:
6524 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6525 helper_vmexit(type, param);
6526 }
6527 break;
6528 }
6529#else /* VBOX */
6530 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6531#endif /* VBOX */
6532}
6533
6534void helper_svm_check_io(uint32_t port, uint32_t param,
6535 uint32_t next_eip_addend)
6536{
6537 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6538 /* FIXME: this should be read in at vmrun (faster this way?) */
6539 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6540 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6541 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6542 /* next EIP */
6543 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6544 env->eip + next_eip_addend);
6545 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6546 }
6547 }
6548}
6549
6550/* Note: currently only 32 bits of exit_code are used */
6551void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6552{
6553 uint32_t int_ctl;
6554
6555 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6556 exit_code, exit_info_1,
6557 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6558 EIP);
6559
6560 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6561 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6562 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6563 } else {
6564 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6565 }
6566
6567 /* Save the VM state in the vmcb */
6568 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6569 &env->segs[R_ES]);
6570 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6571 &env->segs[R_CS]);
6572 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6573 &env->segs[R_SS]);
6574 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6575 &env->segs[R_DS]);
6576
6577 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6578 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6579
6580 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6581 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6582
6583 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6584 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6585 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6586 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6587 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6588
6589 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6590 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6591 int_ctl |= env->v_tpr & V_TPR_MASK;
6592 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6593 int_ctl |= V_IRQ_MASK;
6594 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6595
6596 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6597 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6598 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6599 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6600 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6601 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6602 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6603
6604 /* Reload the host state from vm_hsave */
6605 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6606 env->hflags &= ~HF_SVMI_MASK;
6607 env->intercept = 0;
6608 env->intercept_exceptions = 0;
6609 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6610 env->tsc_offset = 0;
6611
6612 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6613 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6614
6615 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6616 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6617
6618 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6619 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6620 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6621 /* we need to set the efer after the crs so the hidden flags get
6622 set properly */
6623 cpu_load_efer(env,
6624 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6625 env->eflags = 0;
6626 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6627 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6628 CC_OP = CC_OP_EFLAGS;
6629
6630 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6631 env, R_ES);
6632 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6633 env, R_CS);
6634 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6635 env, R_SS);
6636 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6637 env, R_DS);
6638
6639 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6640 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6641 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6642
6643 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6644 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6645
6646 /* other setups */
6647 cpu_x86_set_cpl(env, 0);
6648 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6649 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6650
6651 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6652 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6653 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6654 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6655 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6656
6657 env->hflags2 &= ~HF2_GIF_MASK;
6658 /* FIXME: Resets the current ASID register to zero (host ASID). */
6659
6660 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6661
6662 /* Clears the TSC_OFFSET inside the processor. */
6663
6664 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6665 from the page table indicated the host's CR3. If the PDPEs contain
6666 illegal state, the processor causes a shutdown. */
6667
6668 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6669 env->cr[0] |= CR0_PE_MASK;
6670 env->eflags &= ~VM_MASK;
6671
6672 /* Disables all breakpoints in the host DR7 register. */
6673
6674 /* Checks the reloaded host state for consistency. */
6675
6676 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6677 host's code segment or non-canonical (in the case of long mode), a
6678 #GP fault is delivered inside the host.) */
6679
6680 /* remove any pending exception */
6681 env->exception_index = -1;
6682 env->error_code = 0;
6683 env->old_exception = -1;
6684
6685 cpu_loop_exit();
6686}
6687
6688#endif
6689
6690/* MMX/SSE */
6691/* XXX: optimize by storing fptt and fptags in the static cpu state */
6692void helper_enter_mmx(void)
6693{
6694 env->fpstt = 0;
6695 *(uint32_t *)(env->fptags) = 0;
6696 *(uint32_t *)(env->fptags + 4) = 0;
6697}
6698
6699void helper_emms(void)
6700{
6701 /* set to empty state */
6702 *(uint32_t *)(env->fptags) = 0x01010101;
6703 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6704}
6705
6706/* XXX: suppress */
6707void helper_movq(void *d, void *s)
6708{
6709 *(uint64_t *)d = *(uint64_t *)s;
6710}
6711
6712#define SHIFT 0
6713#include "ops_sse.h"
6714
6715#define SHIFT 1
6716#include "ops_sse.h"
6717
6718#define SHIFT 0
6719#include "helper_template.h"
6720#undef SHIFT
6721
6722#define SHIFT 1
6723#include "helper_template.h"
6724#undef SHIFT
6725
6726#define SHIFT 2
6727#include "helper_template.h"
6728#undef SHIFT
6729
6730#ifdef TARGET_X86_64
6731
6732#define SHIFT 3
6733#include "helper_template.h"
6734#undef SHIFT
6735
6736#endif
6737
6738/* bit operations */
6739target_ulong helper_bsf(target_ulong t0)
6740{
6741 int count;
6742 target_ulong res;
6743
6744 res = t0;
6745 count = 0;
6746 while ((res & 1) == 0) {
6747 count++;
6748 res >>= 1;
6749 }
6750 return count;
6751}
6752
6753target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6754{
6755 int count;
6756 target_ulong res, mask;
6757
6758 if (wordsize > 0 && t0 == 0) {
6759 return wordsize;
6760 }
6761 res = t0;
6762 count = TARGET_LONG_BITS - 1;
6763 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6764 while ((res & mask) == 0) {
6765 count--;
6766 res <<= 1;
6767 }
6768 if (wordsize > 0) {
6769 return wordsize - 1 - count;
6770 }
6771 return count;
6772}
6773
6774target_ulong helper_bsr(target_ulong t0)
6775{
6776 return helper_lzcnt(t0, 0);
6777}
6778
6779static int compute_all_eflags(void)
6780{
6781 return CC_SRC;
6782}
6783
6784static int compute_c_eflags(void)
6785{
6786 return CC_SRC & CC_C;
6787}
6788
6789uint32_t helper_cc_compute_all(int op)
6790{
6791 switch (op) {
6792 default: /* should never happen */ return 0;
6793
6794 case CC_OP_EFLAGS: return compute_all_eflags();
6795
6796 case CC_OP_MULB: return compute_all_mulb();
6797 case CC_OP_MULW: return compute_all_mulw();
6798 case CC_OP_MULL: return compute_all_mull();
6799
6800 case CC_OP_ADDB: return compute_all_addb();
6801 case CC_OP_ADDW: return compute_all_addw();
6802 case CC_OP_ADDL: return compute_all_addl();
6803
6804 case CC_OP_ADCB: return compute_all_adcb();
6805 case CC_OP_ADCW: return compute_all_adcw();
6806 case CC_OP_ADCL: return compute_all_adcl();
6807
6808 case CC_OP_SUBB: return compute_all_subb();
6809 case CC_OP_SUBW: return compute_all_subw();
6810 case CC_OP_SUBL: return compute_all_subl();
6811
6812 case CC_OP_SBBB: return compute_all_sbbb();
6813 case CC_OP_SBBW: return compute_all_sbbw();
6814 case CC_OP_SBBL: return compute_all_sbbl();
6815
6816 case CC_OP_LOGICB: return compute_all_logicb();
6817 case CC_OP_LOGICW: return compute_all_logicw();
6818 case CC_OP_LOGICL: return compute_all_logicl();
6819
6820 case CC_OP_INCB: return compute_all_incb();
6821 case CC_OP_INCW: return compute_all_incw();
6822 case CC_OP_INCL: return compute_all_incl();
6823
6824 case CC_OP_DECB: return compute_all_decb();
6825 case CC_OP_DECW: return compute_all_decw();
6826 case CC_OP_DECL: return compute_all_decl();
6827
6828 case CC_OP_SHLB: return compute_all_shlb();
6829 case CC_OP_SHLW: return compute_all_shlw();
6830 case CC_OP_SHLL: return compute_all_shll();
6831
6832 case CC_OP_SARB: return compute_all_sarb();
6833 case CC_OP_SARW: return compute_all_sarw();
6834 case CC_OP_SARL: return compute_all_sarl();
6835
6836#ifdef TARGET_X86_64
6837 case CC_OP_MULQ: return compute_all_mulq();
6838
6839 case CC_OP_ADDQ: return compute_all_addq();
6840
6841 case CC_OP_ADCQ: return compute_all_adcq();
6842
6843 case CC_OP_SUBQ: return compute_all_subq();
6844
6845 case CC_OP_SBBQ: return compute_all_sbbq();
6846
6847 case CC_OP_LOGICQ: return compute_all_logicq();
6848
6849 case CC_OP_INCQ: return compute_all_incq();
6850
6851 case CC_OP_DECQ: return compute_all_decq();
6852
6853 case CC_OP_SHLQ: return compute_all_shlq();
6854
6855 case CC_OP_SARQ: return compute_all_sarq();
6856#endif
6857 }
6858}
6859
6860uint32_t helper_cc_compute_c(int op)
6861{
6862 switch (op) {
6863 default: /* should never happen */ return 0;
6864
6865 case CC_OP_EFLAGS: return compute_c_eflags();
6866
6867 case CC_OP_MULB: return compute_c_mull();
6868 case CC_OP_MULW: return compute_c_mull();
6869 case CC_OP_MULL: return compute_c_mull();
6870
6871 case CC_OP_ADDB: return compute_c_addb();
6872 case CC_OP_ADDW: return compute_c_addw();
6873 case CC_OP_ADDL: return compute_c_addl();
6874
6875 case CC_OP_ADCB: return compute_c_adcb();
6876 case CC_OP_ADCW: return compute_c_adcw();
6877 case CC_OP_ADCL: return compute_c_adcl();
6878
6879 case CC_OP_SUBB: return compute_c_subb();
6880 case CC_OP_SUBW: return compute_c_subw();
6881 case CC_OP_SUBL: return compute_c_subl();
6882
6883 case CC_OP_SBBB: return compute_c_sbbb();
6884 case CC_OP_SBBW: return compute_c_sbbw();
6885 case CC_OP_SBBL: return compute_c_sbbl();
6886
6887 case CC_OP_LOGICB: return compute_c_logicb();
6888 case CC_OP_LOGICW: return compute_c_logicw();
6889 case CC_OP_LOGICL: return compute_c_logicl();
6890
6891 case CC_OP_INCB: return compute_c_incl();
6892 case CC_OP_INCW: return compute_c_incl();
6893 case CC_OP_INCL: return compute_c_incl();
6894
6895 case CC_OP_DECB: return compute_c_incl();
6896 case CC_OP_DECW: return compute_c_incl();
6897 case CC_OP_DECL: return compute_c_incl();
6898
6899 case CC_OP_SHLB: return compute_c_shlb();
6900 case CC_OP_SHLW: return compute_c_shlw();
6901 case CC_OP_SHLL: return compute_c_shll();
6902
6903 case CC_OP_SARB: return compute_c_sarl();
6904 case CC_OP_SARW: return compute_c_sarl();
6905 case CC_OP_SARL: return compute_c_sarl();
6906
6907#ifdef TARGET_X86_64
6908 case CC_OP_MULQ: return compute_c_mull();
6909
6910 case CC_OP_ADDQ: return compute_c_addq();
6911
6912 case CC_OP_ADCQ: return compute_c_adcq();
6913
6914 case CC_OP_SUBQ: return compute_c_subq();
6915
6916 case CC_OP_SBBQ: return compute_c_sbbq();
6917
6918 case CC_OP_LOGICQ: return compute_c_logicq();
6919
6920 case CC_OP_INCQ: return compute_c_incl();
6921
6922 case CC_OP_DECQ: return compute_c_incl();
6923
6924 case CC_OP_SHLQ: return compute_c_shlq();
6925
6926 case CC_OP_SARQ: return compute_c_sarl();
6927#endif
6928 }
6929}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette