VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 43387

Last change on this file since 43387 was 43387, checked in by vboxsync, 12 years ago

VMM: HM cleanup.

  • Property svn:eol-style set to native
File size: 196.6 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232#ifdef VBOX
233 /* Trying to load a selector with CPL=1? */
234 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
235 {
236 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
237 selector = selector & 0xfffc;
238 }
239#endif /* VBOX */
240
241 if (selector & 0x4)
242 dt = &env->ldt;
243 else
244 dt = &env->gdt;
245 index = selector & ~7;
246 if ((index + 7) > dt->limit)
247 return -1;
248 ptr = dt->base + index;
249 *e1_ptr = ldl_kernel(ptr);
250 *e2_ptr = ldl_kernel(ptr + 4);
251 return 0;
252}
253
254static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
255{
256 unsigned int limit;
257 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
258 if (e2 & DESC_G_MASK)
259 limit = (limit << 12) | 0xfff;
260 return limit;
261}
262
263static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
264{
265 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
266}
267
268static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
269{
270 sc->base = get_seg_base(e1, e2);
271 sc->limit = get_seg_limit(e1, e2);
272 sc->flags = e2;
273#ifdef VBOX
274 sc->newselector = 0;
275 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
276#endif
277}
278
279/* init the segment cache in vm86 mode. */
280static inline void load_seg_vm(int seg, int selector)
281{
282 selector &= 0xffff;
283#ifdef VBOX
284 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
285 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
286 flags |= (3 << DESC_DPL_SHIFT);
287
288 cpu_x86_load_seg_cache(env, seg, selector,
289 (selector << 4), 0xffff, flags);
290#else /* VBOX */
291 cpu_x86_load_seg_cache(env, seg, selector,
292 (selector << 4), 0xffff, 0);
293#endif /* VBOX */
294}
295
296static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
297 uint32_t *esp_ptr, int dpl)
298{
299#ifndef VBOX
300 int type, index, shift;
301#else
302 unsigned int type, index, shift;
303#endif
304
305#if 0
306 {
307 int i;
308 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
309 for(i=0;i<env->tr.limit;i++) {
310 printf("%02x ", env->tr.base[i]);
311 if ((i & 7) == 7) printf("\n");
312 }
313 printf("\n");
314 }
315#endif
316
317 if (!(env->tr.flags & DESC_P_MASK))
318 cpu_abort(env, "invalid tss");
319 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
320 if ((type & 7) != 1)
321 cpu_abort(env, "invalid tss type");
322 shift = type >> 3;
323 index = (dpl * 4 + 2) << shift;
324 if (index + (4 << shift) - 1 > env->tr.limit)
325 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
326 if (shift == 0) {
327 *esp_ptr = lduw_kernel(env->tr.base + index);
328 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
329 } else {
330 *esp_ptr = ldl_kernel(env->tr.base + index);
331 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
332 }
333}
334
335/* XXX: merge with load_seg() */
336static void tss_load_seg(int seg_reg, int selector)
337{
338 uint32_t e1, e2;
339 int rpl, dpl, cpl;
340
341#ifdef VBOX
342 e1 = e2 = 0; /* gcc warning? */
343 cpl = env->hflags & HF_CPL_MASK;
344 /* Trying to load a selector with CPL=1? */
345 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
346 {
347 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
348 selector = selector & 0xfffc;
349 }
350#endif /* VBOX */
351
352 if ((selector & 0xfffc) != 0) {
353 if (load_segment(&e1, &e2, selector) != 0)
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 if (!(e2 & DESC_S_MASK))
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 rpl = selector & 3;
358 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
359 cpl = env->hflags & HF_CPL_MASK;
360 if (seg_reg == R_CS) {
361 if (!(e2 & DESC_CS_MASK))
362 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
363 /* XXX: is it correct ? */
364 if (dpl != rpl)
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
366 if ((e2 & DESC_C_MASK) && dpl > rpl)
367 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
368 } else if (seg_reg == R_SS) {
369 /* SS must be writable data */
370 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372 if (dpl != cpl || dpl != rpl)
373 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
374 } else {
375 /* not readable code */
376 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
377 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
378 /* if data or non conforming code, checks the rights */
379 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
380 if (dpl < cpl || dpl < rpl)
381 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
382 }
383 }
384 if (!(e2 & DESC_P_MASK))
385 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
386 cpu_x86_load_seg_cache(env, seg_reg, selector,
387 get_seg_base(e1, e2),
388 get_seg_limit(e1, e2),
389 e2);
390 } else {
391 if (seg_reg == R_SS || seg_reg == R_CS)
392 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
393#ifdef VBOX
394# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
395 cpu_x86_load_seg_cache(env, seg_reg, selector,
396 0, 0, 0);
397# endif
398#endif /* VBOX */
399 }
400}
401
402#define SWITCH_TSS_JMP 0
403#define SWITCH_TSS_IRET 1
404#define SWITCH_TSS_CALL 2
405
406/* XXX: restore CPU state in registers (PowerPC case) */
407static void switch_tss(int tss_selector,
408 uint32_t e1, uint32_t e2, int source,
409 uint32_t next_eip)
410{
411 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
412 target_ulong tss_base;
413 uint32_t new_regs[8], new_segs[6];
414 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
415 uint32_t old_eflags, eflags_mask;
416 SegmentCache *dt;
417#ifndef VBOX
418 int index;
419#else
420 unsigned int index;
421#endif
422 target_ulong ptr;
423
424 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
425 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
426
427 /* if task gate, we read the TSS segment and we load it */
428 if (type == 5) {
429 if (!(e2 & DESC_P_MASK))
430 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
431 tss_selector = e1 >> 16;
432 if (tss_selector & 4)
433 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
434 if (load_segment(&e1, &e2, tss_selector) != 0)
435 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
436 if (e2 & DESC_S_MASK)
437 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
438 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
439 if ((type & 7) != 1)
440 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
441 }
442
443 if (!(e2 & DESC_P_MASK))
444 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
445
446 if (type & 8)
447 tss_limit_max = 103;
448 else
449 tss_limit_max = 43;
450 tss_limit = get_seg_limit(e1, e2);
451 tss_base = get_seg_base(e1, e2);
452 if ((tss_selector & 4) != 0 ||
453 tss_limit < tss_limit_max)
454 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
455 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
456 if (old_type & 8)
457 old_tss_limit_max = 103;
458 else
459 old_tss_limit_max = 43;
460
461 /* read all the registers from the new TSS */
462 if (type & 8) {
463 /* 32 bit */
464 new_cr3 = ldl_kernel(tss_base + 0x1c);
465 new_eip = ldl_kernel(tss_base + 0x20);
466 new_eflags = ldl_kernel(tss_base + 0x24);
467 for(i = 0; i < 8; i++)
468 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
469 for(i = 0; i < 6; i++)
470 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
471 new_ldt = lduw_kernel(tss_base + 0x60);
472 new_trap = ldl_kernel(tss_base + 0x64);
473 } else {
474 /* 16 bit */
475 new_cr3 = 0;
476 new_eip = lduw_kernel(tss_base + 0x0e);
477 new_eflags = lduw_kernel(tss_base + 0x10);
478 for(i = 0; i < 8; i++)
479 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
480 for(i = 0; i < 4; i++)
481 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
482 new_ldt = lduw_kernel(tss_base + 0x2a);
483 new_segs[R_FS] = 0;
484 new_segs[R_GS] = 0;
485 new_trap = 0;
486 }
487
488 /* NOTE: we must avoid memory exceptions during the task switch,
489 so we make dummy accesses before */
490 /* XXX: it can still fail in some cases, so a bigger hack is
491 necessary to valid the TLB after having done the accesses */
492
493 v1 = ldub_kernel(env->tr.base);
494 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
495 stb_kernel(env->tr.base, v1);
496 stb_kernel(env->tr.base + old_tss_limit_max, v2);
497
498 /* clear busy bit (it is restartable) */
499 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
500 target_ulong ptr;
501 uint32_t e2;
502 ptr = env->gdt.base + (env->tr.selector & ~7);
503 e2 = ldl_kernel(ptr + 4);
504 e2 &= ~DESC_TSS_BUSY_MASK;
505 stl_kernel(ptr + 4, e2);
506 }
507 old_eflags = compute_eflags();
508 if (source == SWITCH_TSS_IRET)
509 old_eflags &= ~NT_MASK;
510
511 /* save the current state in the old TSS */
512 if (type & 8) {
513 /* 32 bit */
514 stl_kernel(env->tr.base + 0x20, next_eip);
515 stl_kernel(env->tr.base + 0x24, old_eflags);
516 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
517 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
518 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
519 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
520 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
521 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
522 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
523 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
524 for(i = 0; i < 6; i++)
525 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
526#ifdef VBOX
527 /* Must store the ldt as it gets reloaded and might have been changed. */
528 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
529#endif
530#if defined(VBOX) && defined(DEBUG)
531 printf("TSS 32 bits switch\n");
532 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
533#endif
534 } else {
535 /* 16 bit */
536 stw_kernel(env->tr.base + 0x0e, next_eip);
537 stw_kernel(env->tr.base + 0x10, old_eflags);
538 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
539 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
540 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
541 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
542 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
543 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
544 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
545 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
546 for(i = 0; i < 4; i++)
547 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
548#ifdef VBOX
549 /* Must store the ldt as it gets reloaded and might have been changed. */
550 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
551#endif
552 }
553
554 /* now if an exception occurs, it will occurs in the next task
555 context */
556
557 if (source == SWITCH_TSS_CALL) {
558 stw_kernel(tss_base, env->tr.selector);
559 new_eflags |= NT_MASK;
560 }
561
562 /* set busy bit */
563 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
564 target_ulong ptr;
565 uint32_t e2;
566 ptr = env->gdt.base + (tss_selector & ~7);
567 e2 = ldl_kernel(ptr + 4);
568 e2 |= DESC_TSS_BUSY_MASK;
569 stl_kernel(ptr + 4, e2);
570 }
571
572 /* set the new CPU state */
573 /* from this point, any exception which occurs can give problems */
574 env->cr[0] |= CR0_TS_MASK;
575 env->hflags |= HF_TS_MASK;
576 env->tr.selector = tss_selector;
577 env->tr.base = tss_base;
578 env->tr.limit = tss_limit;
579 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
580#ifdef VBOX
581 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
582 env->tr.newselector = 0;
583#endif
584
585 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
586 cpu_x86_update_cr3(env, new_cr3);
587 }
588
589 /* load all registers without an exception, then reload them with
590 possible exception */
591 env->eip = new_eip;
592 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
593 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
594 if (!(type & 8))
595 eflags_mask &= 0xffff;
596 load_eflags(new_eflags, eflags_mask);
597 /* XXX: what to do in 16 bit case ? */
598 EAX = new_regs[0];
599 ECX = new_regs[1];
600 EDX = new_regs[2];
601 EBX = new_regs[3];
602 ESP = new_regs[4];
603 EBP = new_regs[5];
604 ESI = new_regs[6];
605 EDI = new_regs[7];
606 if (new_eflags & VM_MASK) {
607 for(i = 0; i < 6; i++)
608 load_seg_vm(i, new_segs[i]);
609 /* in vm86, CPL is always 3 */
610 cpu_x86_set_cpl(env, 3);
611 } else {
612 /* CPL is set the RPL of CS */
613 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
614 /* first just selectors as the rest may trigger exceptions */
615 for(i = 0; i < 6; i++)
616 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
617 }
618
619 env->ldt.selector = new_ldt & ~4;
620 env->ldt.base = 0;
621 env->ldt.limit = 0;
622 env->ldt.flags = 0;
623#ifdef VBOX
624 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
625 env->ldt.newselector = 0;
626#endif
627
628 /* load the LDT */
629 if (new_ldt & 4)
630 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
631
632 if ((new_ldt & 0xfffc) != 0) {
633 dt = &env->gdt;
634 index = new_ldt & ~7;
635 if ((index + 7) > dt->limit)
636 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
637 ptr = dt->base + index;
638 e1 = ldl_kernel(ptr);
639 e2 = ldl_kernel(ptr + 4);
640 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
641 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
642 if (!(e2 & DESC_P_MASK))
643 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
644 load_seg_cache_raw_dt(&env->ldt, e1, e2);
645 }
646
647 /* load the segments */
648 if (!(new_eflags & VM_MASK)) {
649 tss_load_seg(R_CS, new_segs[R_CS]);
650 tss_load_seg(R_SS, new_segs[R_SS]);
651 tss_load_seg(R_ES, new_segs[R_ES]);
652 tss_load_seg(R_DS, new_segs[R_DS]);
653 tss_load_seg(R_FS, new_segs[R_FS]);
654 tss_load_seg(R_GS, new_segs[R_GS]);
655 }
656
657 /* check that EIP is in the CS segment limits */
658 if (new_eip > env->segs[R_CS].limit) {
659 /* XXX: different exception if CALL ? */
660 raise_exception_err(EXCP0D_GPF, 0);
661 }
662
663#ifndef CONFIG_USER_ONLY
664 /* reset local breakpoints */
665 if (env->dr[7] & 0x55) {
666 for (i = 0; i < 4; i++) {
667 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
668 hw_breakpoint_remove(env, i);
669 }
670 env->dr[7] &= ~0x55;
671 }
672#endif
673}
674
675/* check if Port I/O is allowed in TSS */
676static inline void check_io(int addr, int size)
677{
678#ifndef VBOX
679 int io_offset, val, mask;
680#else
681 int val, mask;
682 unsigned int io_offset;
683#endif /* VBOX */
684
685 /* TSS must be a valid 32 bit one */
686 if (!(env->tr.flags & DESC_P_MASK) ||
687 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
688 env->tr.limit < 103)
689 goto fail;
690 io_offset = lduw_kernel(env->tr.base + 0x66);
691 io_offset += (addr >> 3);
692 /* Note: the check needs two bytes */
693 if ((io_offset + 1) > env->tr.limit)
694 goto fail;
695 val = lduw_kernel(env->tr.base + io_offset);
696 val >>= (addr & 7);
697 mask = (1 << size) - 1;
698 /* all bits must be zero to allow the I/O */
699 if ((val & mask) != 0) {
700 fail:
701 raise_exception_err(EXCP0D_GPF, 0);
702 }
703}
704
705#ifdef VBOX
706
707/* Keep in sync with gen_check_external_event() */
708void helper_check_external_event()
709{
710 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
711 | CPU_INTERRUPT_EXTERNAL_EXIT
712 | CPU_INTERRUPT_EXTERNAL_TIMER
713 | CPU_INTERRUPT_EXTERNAL_DMA))
714 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
715 && (env->eflags & IF_MASK)
716 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
717 {
718 helper_external_event();
719 }
720
721}
722
723void helper_sync_seg(uint32_t reg)
724{
725 if (env->segs[reg].newselector)
726 sync_seg(env, reg, env->segs[reg].newselector);
727}
728
729#endif /* VBOX */
730
731void helper_check_iob(uint32_t t0)
732{
733 check_io(t0, 1);
734}
735
736void helper_check_iow(uint32_t t0)
737{
738 check_io(t0, 2);
739}
740
741void helper_check_iol(uint32_t t0)
742{
743 check_io(t0, 4);
744}
745
746void helper_outb(uint32_t port, uint32_t data)
747{
748#ifndef VBOX
749 cpu_outb(port, data & 0xff);
750#else
751 cpu_outb(env, port, data & 0xff);
752#endif
753}
754
755target_ulong helper_inb(uint32_t port)
756{
757#ifndef VBOX
758 return cpu_inb(port);
759#else
760 return cpu_inb(env, port);
761#endif
762}
763
764void helper_outw(uint32_t port, uint32_t data)
765{
766#ifndef VBOX
767 cpu_outw(port, data & 0xffff);
768#else
769 cpu_outw(env, port, data & 0xffff);
770#endif
771}
772
773target_ulong helper_inw(uint32_t port)
774{
775#ifndef VBOX
776 return cpu_inw(port);
777#else
778 return cpu_inw(env, port);
779#endif
780}
781
782void helper_outl(uint32_t port, uint32_t data)
783{
784#ifndef VBOX
785 cpu_outl(port, data);
786#else
787 cpu_outl(env, port, data);
788#endif
789}
790
791target_ulong helper_inl(uint32_t port)
792{
793#ifndef VBOX
794 return cpu_inl(port);
795#else
796 return cpu_inl(env, port);
797#endif
798}
799
800static inline unsigned int get_sp_mask(unsigned int e2)
801{
802 if (e2 & DESC_B_MASK)
803 return 0xffffffff;
804 else
805 return 0xffff;
806}
807
808static int exeption_has_error_code(int intno)
809{
810 switch(intno) {
811 case 8:
812 case 10:
813 case 11:
814 case 12:
815 case 13:
816 case 14:
817 case 17:
818 return 1;
819 }
820 return 0;
821}
822
823#ifdef TARGET_X86_64
824#define SET_ESP(val, sp_mask)\
825do {\
826 if ((sp_mask) == 0xffff)\
827 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
828 else if ((sp_mask) == 0xffffffffLL)\
829 ESP = (uint32_t)(val);\
830 else\
831 ESP = (val);\
832} while (0)
833#else
834#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
835#endif
836
837/* in 64-bit machines, this can overflow. So this segment addition macro
838 * can be used to trim the value to 32-bit whenever needed */
839#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
840
841/* XXX: add a is_user flag to have proper security support */
842#define PUSHW(ssp, sp, sp_mask, val)\
843{\
844 sp -= 2;\
845 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
846}
847
848#define PUSHL(ssp, sp, sp_mask, val)\
849{\
850 sp -= 4;\
851 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
852}
853
854#define POPW(ssp, sp, sp_mask, val)\
855{\
856 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
857 sp += 2;\
858}
859
860#define POPL(ssp, sp, sp_mask, val)\
861{\
862 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
863 sp += 4;\
864}
865
866/* protected mode interrupt */
867static void do_interrupt_protected(int intno, int is_int, int error_code,
868 unsigned int next_eip, int is_hw)
869{
870 SegmentCache *dt;
871 target_ulong ptr, ssp;
872 int type, dpl, selector, ss_dpl, cpl;
873 int has_error_code, new_stack, shift;
874 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
875 uint32_t old_eip, sp_mask;
876
877#ifdef VBOX
878 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
879 cpu_loop_exit();
880#endif
881
882 has_error_code = 0;
883 if (!is_int && !is_hw)
884 has_error_code = exeption_has_error_code(intno);
885 if (is_int)
886 old_eip = next_eip;
887 else
888 old_eip = env->eip;
889
890 dt = &env->idt;
891#ifndef VBOX
892 if (intno * 8 + 7 > dt->limit)
893#else
894 if ((unsigned)intno * 8 + 7 > dt->limit)
895#endif
896 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
897 ptr = dt->base + intno * 8;
898 e1 = ldl_kernel(ptr);
899 e2 = ldl_kernel(ptr + 4);
900 /* check gate type */
901 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
902 switch(type) {
903 case 5: /* task gate */
904#ifdef VBOX
905 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
906 cpl = env->hflags & HF_CPL_MASK;
907 /* check privilege if software int */
908 if (is_int && dpl < cpl)
909 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
910#endif
911 /* must do that check here to return the correct error code */
912 if (!(e2 & DESC_P_MASK))
913 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
914 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
915 if (has_error_code) {
916 int type;
917 uint32_t mask;
918 /* push the error code */
919 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
920 shift = type >> 3;
921 if (env->segs[R_SS].flags & DESC_B_MASK)
922 mask = 0xffffffff;
923 else
924 mask = 0xffff;
925 esp = (ESP - (2 << shift)) & mask;
926 ssp = env->segs[R_SS].base + esp;
927 if (shift)
928 stl_kernel(ssp, error_code);
929 else
930 stw_kernel(ssp, error_code);
931 SET_ESP(esp, mask);
932 }
933 return;
934 case 6: /* 286 interrupt gate */
935 case 7: /* 286 trap gate */
936 case 14: /* 386 interrupt gate */
937 case 15: /* 386 trap gate */
938 break;
939 default:
940 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
941 break;
942 }
943 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
944 cpl = env->hflags & HF_CPL_MASK;
945 /* check privilege if software int */
946 if (is_int && dpl < cpl)
947 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
948 /* check valid bit */
949 if (!(e2 & DESC_P_MASK))
950 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
951 selector = e1 >> 16;
952 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
953 if ((selector & 0xfffc) == 0)
954 raise_exception_err(EXCP0D_GPF, 0);
955
956 if (load_segment(&e1, &e2, selector) != 0)
957 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
958 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
959 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
961 if (dpl > cpl)
962 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
963 if (!(e2 & DESC_P_MASK))
964 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
965 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
966 /* to inner privilege */
967 get_ss_esp_from_tss(&ss, &esp, dpl);
968 if ((ss & 0xfffc) == 0)
969 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
970 if ((ss & 3) != dpl)
971 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
972 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
973 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
974 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
975 if (ss_dpl != dpl)
976 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
977 if (!(ss_e2 & DESC_S_MASK) ||
978 (ss_e2 & DESC_CS_MASK) ||
979 !(ss_e2 & DESC_W_MASK))
980 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
981 if (!(ss_e2 & DESC_P_MASK))
982#ifdef VBOX /* See page 3-477 of 253666.pdf */
983 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
984#else
985 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
986#endif
987 new_stack = 1;
988 sp_mask = get_sp_mask(ss_e2);
989 ssp = get_seg_base(ss_e1, ss_e2);
990#if defined(VBOX) && defined(DEBUG)
991 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
992#endif
993 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
994 /* to same privilege */
995 if (env->eflags & VM_MASK)
996 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
997 new_stack = 0;
998 sp_mask = get_sp_mask(env->segs[R_SS].flags);
999 ssp = env->segs[R_SS].base;
1000 esp = ESP;
1001 dpl = cpl;
1002 } else {
1003 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1004 new_stack = 0; /* avoid warning */
1005 sp_mask = 0; /* avoid warning */
1006 ssp = 0; /* avoid warning */
1007 esp = 0; /* avoid warning */
1008 }
1009
1010 shift = type >> 3;
1011
1012#if 0
1013 /* XXX: check that enough room is available */
1014 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1015 if (env->eflags & VM_MASK)
1016 push_size += 8;
1017 push_size <<= shift;
1018#endif
1019 if (shift == 1) {
1020 if (new_stack) {
1021 if (env->eflags & VM_MASK) {
1022 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1023 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1024 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1025 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1026 }
1027 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1028 PUSHL(ssp, esp, sp_mask, ESP);
1029 }
1030 PUSHL(ssp, esp, sp_mask, compute_eflags());
1031 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1032 PUSHL(ssp, esp, sp_mask, old_eip);
1033 if (has_error_code) {
1034 PUSHL(ssp, esp, sp_mask, error_code);
1035 }
1036 } else {
1037 if (new_stack) {
1038 if (env->eflags & VM_MASK) {
1039 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1040 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1041 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1042 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1043 }
1044 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1045 PUSHW(ssp, esp, sp_mask, ESP);
1046 }
1047 PUSHW(ssp, esp, sp_mask, compute_eflags());
1048 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1049 PUSHW(ssp, esp, sp_mask, old_eip);
1050 if (has_error_code) {
1051 PUSHW(ssp, esp, sp_mask, error_code);
1052 }
1053 }
1054
1055 if (new_stack) {
1056 if (env->eflags & VM_MASK) {
1057 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1058 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1059 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1060 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1061 }
1062 ss = (ss & ~3) | dpl;
1063 cpu_x86_load_seg_cache(env, R_SS, ss,
1064 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1065 }
1066 SET_ESP(esp, sp_mask);
1067
1068 selector = (selector & ~3) | dpl;
1069 cpu_x86_load_seg_cache(env, R_CS, selector,
1070 get_seg_base(e1, e2),
1071 get_seg_limit(e1, e2),
1072 e2);
1073 cpu_x86_set_cpl(env, dpl);
1074 env->eip = offset;
1075
1076 /* interrupt gate clear IF mask */
1077 if ((type & 1) == 0) {
1078 env->eflags &= ~IF_MASK;
1079 }
1080#ifndef VBOX
1081 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1082#else
1083 /*
1084 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1085 * gets confused by seemingly changed EFLAGS. See #3491 and
1086 * public bug #2341.
1087 */
1088 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1089#endif
1090}
1091
1092#ifdef VBOX
1093
1094/* check if VME interrupt redirection is enabled in TSS */
1095DECLINLINE(bool) is_vme_irq_redirected(int intno)
1096{
1097 unsigned int io_offset, intredir_offset;
1098 unsigned char val, mask;
1099
1100 /* TSS must be a valid 32 bit one */
1101 if (!(env->tr.flags & DESC_P_MASK) ||
1102 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1103 env->tr.limit < 103)
1104 goto fail;
1105 io_offset = lduw_kernel(env->tr.base + 0x66);
1106 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1107 if (io_offset < 0x68 + 0x20)
1108 io_offset = 0x68 + 0x20;
1109 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1110 intredir_offset = io_offset - 0x20;
1111
1112 intredir_offset += (intno >> 3);
1113 if ((intredir_offset) > env->tr.limit)
1114 goto fail;
1115
1116 val = ldub_kernel(env->tr.base + intredir_offset);
1117 mask = 1 << (unsigned char)(intno & 7);
1118
1119 /* bit set means no redirection. */
1120 if ((val & mask) != 0) {
1121 return false;
1122 }
1123 return true;
1124
1125fail:
1126 raise_exception_err(EXCP0D_GPF, 0);
1127 return true;
1128}
1129
1130/* V86 mode software interrupt with CR4.VME=1 */
1131static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1132{
1133 target_ulong ptr, ssp;
1134 int selector;
1135 uint32_t offset, esp;
1136 uint32_t old_cs, old_eflags;
1137 uint32_t iopl;
1138
1139 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1140
1141 if (!is_vme_irq_redirected(intno))
1142 {
1143 if (iopl == 3)
1144 {
1145 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1146 return;
1147 }
1148 else
1149 raise_exception_err(EXCP0D_GPF, 0);
1150 }
1151
1152 /* virtual mode idt is at linear address 0 */
1153 ptr = 0 + intno * 4;
1154 offset = lduw_kernel(ptr);
1155 selector = lduw_kernel(ptr + 2);
1156 esp = ESP;
1157 ssp = env->segs[R_SS].base;
1158 old_cs = env->segs[R_CS].selector;
1159
1160 old_eflags = compute_eflags();
1161 if (iopl < 3)
1162 {
1163 /* copy VIF into IF and set IOPL to 3 */
1164 if (env->eflags & VIF_MASK)
1165 old_eflags |= IF_MASK;
1166 else
1167 old_eflags &= ~IF_MASK;
1168
1169 old_eflags |= (3 << IOPL_SHIFT);
1170 }
1171
1172 /* XXX: use SS segment size ? */
1173 PUSHW(ssp, esp, 0xffff, old_eflags);
1174 PUSHW(ssp, esp, 0xffff, old_cs);
1175 PUSHW(ssp, esp, 0xffff, next_eip);
1176
1177 /* update processor state */
1178 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1179 env->eip = offset;
1180 env->segs[R_CS].selector = selector;
1181 env->segs[R_CS].base = (selector << 4);
1182 env->eflags &= ~(TF_MASK | RF_MASK);
1183
1184 if (iopl < 3)
1185 env->eflags &= ~VIF_MASK;
1186 else
1187 env->eflags &= ~IF_MASK;
1188}
1189
1190#endif /* VBOX */
1191
1192#ifdef TARGET_X86_64
1193
1194#define PUSHQ(sp, val)\
1195{\
1196 sp -= 8;\
1197 stq_kernel(sp, (val));\
1198}
1199
1200#define POPQ(sp, val)\
1201{\
1202 val = ldq_kernel(sp);\
1203 sp += 8;\
1204}
1205
1206static inline target_ulong get_rsp_from_tss(int level)
1207{
1208 int index;
1209
1210#if 0
1211 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1212 env->tr.base, env->tr.limit);
1213#endif
1214
1215 if (!(env->tr.flags & DESC_P_MASK))
1216 cpu_abort(env, "invalid tss");
1217 index = 8 * level + 4;
1218 if ((index + 7) > env->tr.limit)
1219 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1220 return ldq_kernel(env->tr.base + index);
1221}
1222
1223/* 64 bit interrupt */
1224static void do_interrupt64(int intno, int is_int, int error_code,
1225 target_ulong next_eip, int is_hw)
1226{
1227 SegmentCache *dt;
1228 target_ulong ptr;
1229 int type, dpl, selector, cpl, ist;
1230 int has_error_code, new_stack;
1231 uint32_t e1, e2, e3, ss;
1232 target_ulong old_eip, esp, offset;
1233
1234#ifdef VBOX
1235 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1236 cpu_loop_exit();
1237#endif
1238
1239 has_error_code = 0;
1240 if (!is_int && !is_hw)
1241 has_error_code = exeption_has_error_code(intno);
1242 if (is_int)
1243 old_eip = next_eip;
1244 else
1245 old_eip = env->eip;
1246
1247 dt = &env->idt;
1248 if (intno * 16 + 15 > dt->limit)
1249 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1250 ptr = dt->base + intno * 16;
1251 e1 = ldl_kernel(ptr);
1252 e2 = ldl_kernel(ptr + 4);
1253 e3 = ldl_kernel(ptr + 8);
1254 /* check gate type */
1255 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1256 switch(type) {
1257 case 14: /* 386 interrupt gate */
1258 case 15: /* 386 trap gate */
1259 break;
1260 default:
1261 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1262 break;
1263 }
1264 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1265 cpl = env->hflags & HF_CPL_MASK;
1266 /* check privilege if software int */
1267 if (is_int && dpl < cpl)
1268 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1269 /* check valid bit */
1270 if (!(e2 & DESC_P_MASK))
1271 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1272 selector = e1 >> 16;
1273 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1274 ist = e2 & 7;
1275 if ((selector & 0xfffc) == 0)
1276 raise_exception_err(EXCP0D_GPF, 0);
1277
1278 if (load_segment(&e1, &e2, selector) != 0)
1279 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1280 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1281 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1282 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1283 if (dpl > cpl)
1284 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1285 if (!(e2 & DESC_P_MASK))
1286 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1287 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1288 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1289 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1290 /* to inner privilege */
1291 if (ist != 0)
1292 esp = get_rsp_from_tss(ist + 3);
1293 else
1294 esp = get_rsp_from_tss(dpl);
1295 esp &= ~0xfLL; /* align stack */
1296 ss = 0;
1297 new_stack = 1;
1298 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1299 /* to same privilege */
1300 if (env->eflags & VM_MASK)
1301 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1302 new_stack = 0;
1303 if (ist != 0)
1304 esp = get_rsp_from_tss(ist + 3);
1305 else
1306 esp = ESP;
1307 esp &= ~0xfLL; /* align stack */
1308 dpl = cpl;
1309 } else {
1310 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1311 new_stack = 0; /* avoid warning */
1312 esp = 0; /* avoid warning */
1313 }
1314
1315 PUSHQ(esp, env->segs[R_SS].selector);
1316 PUSHQ(esp, ESP);
1317 PUSHQ(esp, compute_eflags());
1318 PUSHQ(esp, env->segs[R_CS].selector);
1319 PUSHQ(esp, old_eip);
1320 if (has_error_code) {
1321 PUSHQ(esp, error_code);
1322 }
1323
1324 if (new_stack) {
1325 ss = 0 | dpl;
1326 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1327 }
1328 ESP = esp;
1329
1330 selector = (selector & ~3) | dpl;
1331 cpu_x86_load_seg_cache(env, R_CS, selector,
1332 get_seg_base(e1, e2),
1333 get_seg_limit(e1, e2),
1334 e2);
1335 cpu_x86_set_cpl(env, dpl);
1336 env->eip = offset;
1337
1338 /* interrupt gate clear IF mask */
1339 if ((type & 1) == 0) {
1340 env->eflags &= ~IF_MASK;
1341 }
1342#ifndef VBOX
1343 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1344#else /* VBOX */
1345 /*
1346 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1347 * gets confused by seemingly changed EFLAGS. See #3491 and
1348 * public bug #2341.
1349 */
1350 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1351#endif /* VBOX */
1352}
1353#endif
1354
1355#ifdef TARGET_X86_64
1356#if defined(CONFIG_USER_ONLY)
1357void helper_syscall(int next_eip_addend)
1358{
1359 env->exception_index = EXCP_SYSCALL;
1360 env->exception_next_eip = env->eip + next_eip_addend;
1361 cpu_loop_exit();
1362}
1363#else
1364void helper_syscall(int next_eip_addend)
1365{
1366 int selector;
1367
1368 if (!(env->efer & MSR_EFER_SCE)) {
1369 raise_exception_err(EXCP06_ILLOP, 0);
1370 }
1371 selector = (env->star >> 32) & 0xffff;
1372 if (env->hflags & HF_LMA_MASK) {
1373 int code64;
1374
1375 ECX = env->eip + next_eip_addend;
1376 env->regs[11] = compute_eflags();
1377
1378 code64 = env->hflags & HF_CS64_MASK;
1379
1380 cpu_x86_set_cpl(env, 0);
1381 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1382 0, 0xffffffff,
1383 DESC_G_MASK | DESC_P_MASK |
1384 DESC_S_MASK |
1385 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1386 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1387 0, 0xffffffff,
1388 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1389 DESC_S_MASK |
1390 DESC_W_MASK | DESC_A_MASK);
1391 env->eflags &= ~env->fmask;
1392 load_eflags(env->eflags, 0);
1393 if (code64)
1394 env->eip = env->lstar;
1395 else
1396 env->eip = env->cstar;
1397 } else {
1398 ECX = (uint32_t)(env->eip + next_eip_addend);
1399
1400 cpu_x86_set_cpl(env, 0);
1401 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1402 0, 0xffffffff,
1403 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1404 DESC_S_MASK |
1405 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1406 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1407 0, 0xffffffff,
1408 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1409 DESC_S_MASK |
1410 DESC_W_MASK | DESC_A_MASK);
1411 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1412 env->eip = (uint32_t)env->star;
1413 }
1414}
1415#endif
1416#endif
1417
1418#ifdef TARGET_X86_64
1419void helper_sysret(int dflag)
1420{
1421 int cpl, selector;
1422
1423 if (!(env->efer & MSR_EFER_SCE)) {
1424 raise_exception_err(EXCP06_ILLOP, 0);
1425 }
1426 cpl = env->hflags & HF_CPL_MASK;
1427 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1428 raise_exception_err(EXCP0D_GPF, 0);
1429 }
1430 selector = (env->star >> 48) & 0xffff;
1431 if (env->hflags & HF_LMA_MASK) {
1432 if (dflag == 2) {
1433 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1434 0, 0xffffffff,
1435 DESC_G_MASK | DESC_P_MASK |
1436 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1437 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1438 DESC_L_MASK);
1439 env->eip = ECX;
1440 } else {
1441 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1442 0, 0xffffffff,
1443 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1444 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1445 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1446 env->eip = (uint32_t)ECX;
1447 }
1448 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1449 0, 0xffffffff,
1450 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1451 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1452 DESC_W_MASK | DESC_A_MASK);
1453 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1454 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1455 cpu_x86_set_cpl(env, 3);
1456 } else {
1457 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1458 0, 0xffffffff,
1459 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1460 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1461 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1462 env->eip = (uint32_t)ECX;
1463 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1464 0, 0xffffffff,
1465 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1466 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1467 DESC_W_MASK | DESC_A_MASK);
1468 env->eflags |= IF_MASK;
1469 cpu_x86_set_cpl(env, 3);
1470 }
1471}
1472#endif
1473
1474#ifdef VBOX
1475
1476/**
1477 * Checks and processes external VMM events.
1478 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1479 */
1480void helper_external_event(void)
1481{
1482# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1483 uintptr_t uSP;
1484# ifdef RT_ARCH_AMD64
1485 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1486# else
1487 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1488# endif
1489 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1490# endif
1491 /* Keep in sync with flags checked by gen_check_external_event() */
1492 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1493 {
1494 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1495 ~CPU_INTERRUPT_EXTERNAL_HARD);
1496 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1497 }
1498 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1499 {
1500 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1501 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1502 cpu_exit(env);
1503 }
1504 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1505 {
1506 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1507 ~CPU_INTERRUPT_EXTERNAL_DMA);
1508 remR3DmaRun(env);
1509 }
1510 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1511 {
1512 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1513 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1514 remR3TimersRun(env);
1515 }
1516 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1517 {
1518 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1519 ~CPU_INTERRUPT_EXTERNAL_HARD);
1520 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1521 }
1522}
1523
1524/* helper for recording call instruction addresses for later scanning */
1525void helper_record_call()
1526{
1527 if ( !(env->state & CPU_RAW_RING0)
1528 && (env->cr[0] & CR0_PG_MASK)
1529 && !(env->eflags & X86_EFL_IF))
1530 remR3RecordCall(env);
1531}
1532
1533#endif /* VBOX */
1534
1535/* real mode interrupt */
1536static void do_interrupt_real(int intno, int is_int, int error_code,
1537 unsigned int next_eip)
1538{
1539 SegmentCache *dt;
1540 target_ulong ptr, ssp;
1541 int selector;
1542 uint32_t offset, esp;
1543 uint32_t old_cs, old_eip;
1544
1545 /* real mode (simpler !) */
1546 dt = &env->idt;
1547#ifndef VBOX
1548 if (intno * 4 + 3 > dt->limit)
1549#else
1550 if ((unsigned)intno * 4 + 3 > dt->limit)
1551#endif
1552 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1553 ptr = dt->base + intno * 4;
1554 offset = lduw_kernel(ptr);
1555 selector = lduw_kernel(ptr + 2);
1556 esp = ESP;
1557 ssp = env->segs[R_SS].base;
1558 if (is_int)
1559 old_eip = next_eip;
1560 else
1561 old_eip = env->eip;
1562 old_cs = env->segs[R_CS].selector;
1563 /* XXX: use SS segment size ? */
1564 PUSHW(ssp, esp, 0xffff, compute_eflags());
1565 PUSHW(ssp, esp, 0xffff, old_cs);
1566 PUSHW(ssp, esp, 0xffff, old_eip);
1567
1568 /* update processor state */
1569 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1570 env->eip = offset;
1571 env->segs[R_CS].selector = selector;
1572 env->segs[R_CS].base = (selector << 4);
1573 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1574}
1575
1576/* fake user mode interrupt */
1577void do_interrupt_user(int intno, int is_int, int error_code,
1578 target_ulong next_eip)
1579{
1580 SegmentCache *dt;
1581 target_ulong ptr;
1582 int dpl, cpl, shift;
1583 uint32_t e2;
1584
1585 dt = &env->idt;
1586 if (env->hflags & HF_LMA_MASK) {
1587 shift = 4;
1588 } else {
1589 shift = 3;
1590 }
1591 ptr = dt->base + (intno << shift);
1592 e2 = ldl_kernel(ptr + 4);
1593
1594 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1595 cpl = env->hflags & HF_CPL_MASK;
1596 /* check privilege if software int */
1597 if (is_int && dpl < cpl)
1598 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1599
1600 /* Since we emulate only user space, we cannot do more than
1601 exiting the emulation with the suitable exception and error
1602 code */
1603 if (is_int)
1604 EIP = next_eip;
1605}
1606
1607#if !defined(CONFIG_USER_ONLY)
1608static void handle_even_inj(int intno, int is_int, int error_code,
1609 int is_hw, int rm)
1610{
1611 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1612 if (!(event_inj & SVM_EVTINJ_VALID)) {
1613 int type;
1614 if (is_int)
1615 type = SVM_EVTINJ_TYPE_SOFT;
1616 else
1617 type = SVM_EVTINJ_TYPE_EXEPT;
1618 event_inj = intno | type | SVM_EVTINJ_VALID;
1619 if (!rm && exeption_has_error_code(intno)) {
1620 event_inj |= SVM_EVTINJ_VALID_ERR;
1621 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1622 }
1623 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1624 }
1625}
1626#endif
1627
1628/*
1629 * Begin execution of an interruption. is_int is TRUE if coming from
1630 * the int instruction. next_eip is the EIP value AFTER the interrupt
1631 * instruction. It is only relevant if is_int is TRUE.
1632 */
1633void do_interrupt(int intno, int is_int, int error_code,
1634 target_ulong next_eip, int is_hw)
1635{
1636 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1637 if ((env->cr[0] & CR0_PE_MASK)) {
1638 static int count;
1639 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1640 count, intno, error_code, is_int,
1641 env->hflags & HF_CPL_MASK,
1642 env->segs[R_CS].selector, EIP,
1643 (int)env->segs[R_CS].base + EIP,
1644 env->segs[R_SS].selector, ESP);
1645 if (intno == 0x0e) {
1646 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1647 } else {
1648 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1649 }
1650 qemu_log("\n");
1651 log_cpu_state(env, X86_DUMP_CCOP);
1652#if 0
1653 {
1654 int i;
1655 uint8_t *ptr;
1656 qemu_log(" code=");
1657 ptr = env->segs[R_CS].base + env->eip;
1658 for(i = 0; i < 16; i++) {
1659 qemu_log(" %02x", ldub(ptr + i));
1660 }
1661 qemu_log("\n");
1662 }
1663#endif
1664 count++;
1665 }
1666 }
1667#ifdef VBOX
1668 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1669 if (is_int) {
1670 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1671 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1672 } else {
1673 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1674 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1675 }
1676 }
1677#endif
1678 if (env->cr[0] & CR0_PE_MASK) {
1679#if !defined(CONFIG_USER_ONLY)
1680 if (env->hflags & HF_SVMI_MASK)
1681 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1682#endif
1683#ifdef TARGET_X86_64
1684 if (env->hflags & HF_LMA_MASK) {
1685 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1686 } else
1687#endif
1688 {
1689#ifdef VBOX
1690 /* int xx *, v86 code and VME enabled? */
1691 if ( (env->eflags & VM_MASK)
1692 && (env->cr[4] & CR4_VME_MASK)
1693 && is_int
1694 && !is_hw
1695 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1696 )
1697 do_soft_interrupt_vme(intno, error_code, next_eip);
1698 else
1699#endif /* VBOX */
1700 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1701 }
1702 } else {
1703#if !defined(CONFIG_USER_ONLY)
1704 if (env->hflags & HF_SVMI_MASK)
1705 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1706#endif
1707 do_interrupt_real(intno, is_int, error_code, next_eip);
1708 }
1709
1710#if !defined(CONFIG_USER_ONLY)
1711 if (env->hflags & HF_SVMI_MASK) {
1712 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1713 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1714 }
1715#endif
1716}
1717
1718/* This should come from sysemu.h - if we could include it here... */
1719void qemu_system_reset_request(void);
1720
1721/*
1722 * Check nested exceptions and change to double or triple fault if
1723 * needed. It should only be called, if this is not an interrupt.
1724 * Returns the new exception number.
1725 */
1726static int check_exception(int intno, int *error_code)
1727{
1728 int first_contributory = env->old_exception == 0 ||
1729 (env->old_exception >= 10 &&
1730 env->old_exception <= 13);
1731 int second_contributory = intno == 0 ||
1732 (intno >= 10 && intno <= 13);
1733
1734 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1735 env->old_exception, intno);
1736
1737#if !defined(CONFIG_USER_ONLY)
1738 if (env->old_exception == EXCP08_DBLE) {
1739 if (env->hflags & HF_SVMI_MASK)
1740 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1741
1742 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1743
1744# ifndef VBOX
1745 qemu_system_reset_request();
1746# else
1747 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1748# endif
1749 return EXCP_HLT;
1750 }
1751#endif
1752
1753 if ((first_contributory && second_contributory)
1754 || (env->old_exception == EXCP0E_PAGE &&
1755 (second_contributory || (intno == EXCP0E_PAGE)))) {
1756 intno = EXCP08_DBLE;
1757 *error_code = 0;
1758 }
1759
1760 if (second_contributory || (intno == EXCP0E_PAGE) ||
1761 (intno == EXCP08_DBLE))
1762 env->old_exception = intno;
1763
1764 return intno;
1765}
1766
1767/*
1768 * Signal an interruption. It is executed in the main CPU loop.
1769 * is_int is TRUE if coming from the int instruction. next_eip is the
1770 * EIP value AFTER the interrupt instruction. It is only relevant if
1771 * is_int is TRUE.
1772 */
1773static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1774 int next_eip_addend)
1775{
1776#if defined(VBOX) && defined(DEBUG)
1777 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1778#endif
1779 if (!is_int) {
1780 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1781 intno = check_exception(intno, &error_code);
1782 } else {
1783 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1784 }
1785
1786 env->exception_index = intno;
1787 env->error_code = error_code;
1788 env->exception_is_int = is_int;
1789 env->exception_next_eip = env->eip + next_eip_addend;
1790 cpu_loop_exit();
1791}
1792
1793/* shortcuts to generate exceptions */
1794
1795void raise_exception_err(int exception_index, int error_code)
1796{
1797 raise_interrupt(exception_index, 0, error_code, 0);
1798}
1799
1800void raise_exception(int exception_index)
1801{
1802 raise_interrupt(exception_index, 0, 0, 0);
1803}
1804
1805void raise_exception_env(int exception_index, CPUState *nenv)
1806{
1807 env = nenv;
1808 raise_exception(exception_index);
1809}
1810/* SMM support */
1811
1812#if defined(CONFIG_USER_ONLY)
1813
1814void do_smm_enter(void)
1815{
1816}
1817
1818void helper_rsm(void)
1819{
1820}
1821
1822#else
1823
1824#ifdef TARGET_X86_64
1825#define SMM_REVISION_ID 0x00020064
1826#else
1827#define SMM_REVISION_ID 0x00020000
1828#endif
1829
1830void do_smm_enter(void)
1831{
1832 target_ulong sm_state;
1833 SegmentCache *dt;
1834 int i, offset;
1835
1836 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1837 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1838
1839 env->hflags |= HF_SMM_MASK;
1840 cpu_smm_update(env);
1841
1842 sm_state = env->smbase + 0x8000;
1843
1844#ifdef TARGET_X86_64
1845 for(i = 0; i < 6; i++) {
1846 dt = &env->segs[i];
1847 offset = 0x7e00 + i * 16;
1848 stw_phys(sm_state + offset, dt->selector);
1849 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1850 stl_phys(sm_state + offset + 4, dt->limit);
1851 stq_phys(sm_state + offset + 8, dt->base);
1852 }
1853
1854 stq_phys(sm_state + 0x7e68, env->gdt.base);
1855 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1856
1857 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1858 stq_phys(sm_state + 0x7e78, env->ldt.base);
1859 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1860 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1861
1862 stq_phys(sm_state + 0x7e88, env->idt.base);
1863 stl_phys(sm_state + 0x7e84, env->idt.limit);
1864
1865 stw_phys(sm_state + 0x7e90, env->tr.selector);
1866 stq_phys(sm_state + 0x7e98, env->tr.base);
1867 stl_phys(sm_state + 0x7e94, env->tr.limit);
1868 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1869
1870 stq_phys(sm_state + 0x7ed0, env->efer);
1871
1872 stq_phys(sm_state + 0x7ff8, EAX);
1873 stq_phys(sm_state + 0x7ff0, ECX);
1874 stq_phys(sm_state + 0x7fe8, EDX);
1875 stq_phys(sm_state + 0x7fe0, EBX);
1876 stq_phys(sm_state + 0x7fd8, ESP);
1877 stq_phys(sm_state + 0x7fd0, EBP);
1878 stq_phys(sm_state + 0x7fc8, ESI);
1879 stq_phys(sm_state + 0x7fc0, EDI);
1880 for(i = 8; i < 16; i++)
1881 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1882 stq_phys(sm_state + 0x7f78, env->eip);
1883 stl_phys(sm_state + 0x7f70, compute_eflags());
1884 stl_phys(sm_state + 0x7f68, env->dr[6]);
1885 stl_phys(sm_state + 0x7f60, env->dr[7]);
1886
1887 stl_phys(sm_state + 0x7f48, env->cr[4]);
1888 stl_phys(sm_state + 0x7f50, env->cr[3]);
1889 stl_phys(sm_state + 0x7f58, env->cr[0]);
1890
1891 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1892 stl_phys(sm_state + 0x7f00, env->smbase);
1893#else
1894 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1895 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1896 stl_phys(sm_state + 0x7ff4, compute_eflags());
1897 stl_phys(sm_state + 0x7ff0, env->eip);
1898 stl_phys(sm_state + 0x7fec, EDI);
1899 stl_phys(sm_state + 0x7fe8, ESI);
1900 stl_phys(sm_state + 0x7fe4, EBP);
1901 stl_phys(sm_state + 0x7fe0, ESP);
1902 stl_phys(sm_state + 0x7fdc, EBX);
1903 stl_phys(sm_state + 0x7fd8, EDX);
1904 stl_phys(sm_state + 0x7fd4, ECX);
1905 stl_phys(sm_state + 0x7fd0, EAX);
1906 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1907 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1908
1909 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1910 stl_phys(sm_state + 0x7f64, env->tr.base);
1911 stl_phys(sm_state + 0x7f60, env->tr.limit);
1912 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1913
1914 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1915 stl_phys(sm_state + 0x7f80, env->ldt.base);
1916 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1917 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1918
1919 stl_phys(sm_state + 0x7f74, env->gdt.base);
1920 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1921
1922 stl_phys(sm_state + 0x7f58, env->idt.base);
1923 stl_phys(sm_state + 0x7f54, env->idt.limit);
1924
1925 for(i = 0; i < 6; i++) {
1926 dt = &env->segs[i];
1927 if (i < 3)
1928 offset = 0x7f84 + i * 12;
1929 else
1930 offset = 0x7f2c + (i - 3) * 12;
1931 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1932 stl_phys(sm_state + offset + 8, dt->base);
1933 stl_phys(sm_state + offset + 4, dt->limit);
1934 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1935 }
1936 stl_phys(sm_state + 0x7f14, env->cr[4]);
1937
1938 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1939 stl_phys(sm_state + 0x7ef8, env->smbase);
1940#endif
1941 /* init SMM cpu state */
1942
1943#ifdef TARGET_X86_64
1944 cpu_load_efer(env, 0);
1945#endif
1946 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1947 env->eip = 0x00008000;
1948 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1949 0xffffffff, 0);
1950 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1951 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1952 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1953 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1954 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1955
1956 cpu_x86_update_cr0(env,
1957 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1958 cpu_x86_update_cr4(env, 0);
1959 env->dr[7] = 0x00000400;
1960 CC_OP = CC_OP_EFLAGS;
1961}
1962
1963void helper_rsm(void)
1964{
1965#ifdef VBOX
1966 cpu_abort(env, "helper_rsm");
1967#else /* !VBOX */
1968 target_ulong sm_state;
1969 int i, offset;
1970 uint32_t val;
1971
1972 sm_state = env->smbase + 0x8000;
1973#ifdef TARGET_X86_64
1974 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1975
1976 for(i = 0; i < 6; i++) {
1977 offset = 0x7e00 + i * 16;
1978 cpu_x86_load_seg_cache(env, i,
1979 lduw_phys(sm_state + offset),
1980 ldq_phys(sm_state + offset + 8),
1981 ldl_phys(sm_state + offset + 4),
1982 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1983 }
1984
1985 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1986 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1987
1988 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1989 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1990 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1991 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1992#ifdef VBOX
1993 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
1994 env->ldt.newselector = 0;
1995#endif
1996
1997 env->idt.base = ldq_phys(sm_state + 0x7e88);
1998 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1999
2000 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2001 env->tr.base = ldq_phys(sm_state + 0x7e98);
2002 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2003 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2004#ifdef VBOX
2005 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2006 env->tr.newselector = 0;
2007#endif
2008
2009 EAX = ldq_phys(sm_state + 0x7ff8);
2010 ECX = ldq_phys(sm_state + 0x7ff0);
2011 EDX = ldq_phys(sm_state + 0x7fe8);
2012 EBX = ldq_phys(sm_state + 0x7fe0);
2013 ESP = ldq_phys(sm_state + 0x7fd8);
2014 EBP = ldq_phys(sm_state + 0x7fd0);
2015 ESI = ldq_phys(sm_state + 0x7fc8);
2016 EDI = ldq_phys(sm_state + 0x7fc0);
2017 for(i = 8; i < 16; i++)
2018 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2019 env->eip = ldq_phys(sm_state + 0x7f78);
2020 load_eflags(ldl_phys(sm_state + 0x7f70),
2021 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2022 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2023 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2024
2025 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2026 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2027 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2028
2029 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2030 if (val & 0x20000) {
2031 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2032 }
2033#else
2034 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2035 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2036 load_eflags(ldl_phys(sm_state + 0x7ff4),
2037 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2038 env->eip = ldl_phys(sm_state + 0x7ff0);
2039 EDI = ldl_phys(sm_state + 0x7fec);
2040 ESI = ldl_phys(sm_state + 0x7fe8);
2041 EBP = ldl_phys(sm_state + 0x7fe4);
2042 ESP = ldl_phys(sm_state + 0x7fe0);
2043 EBX = ldl_phys(sm_state + 0x7fdc);
2044 EDX = ldl_phys(sm_state + 0x7fd8);
2045 ECX = ldl_phys(sm_state + 0x7fd4);
2046 EAX = ldl_phys(sm_state + 0x7fd0);
2047 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2048 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2049
2050 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2051 env->tr.base = ldl_phys(sm_state + 0x7f64);
2052 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2053 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2054#ifdef VBOX
2055 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2056 env->tr.newselector = 0;
2057#endif
2058
2059 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2060 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2061 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2062 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2063#ifdef VBOX
2064 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2065 env->ldt.newselector = 0;
2066#endif
2067
2068 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2069 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2070
2071 env->idt.base = ldl_phys(sm_state + 0x7f58);
2072 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2073
2074 for(i = 0; i < 6; i++) {
2075 if (i < 3)
2076 offset = 0x7f84 + i * 12;
2077 else
2078 offset = 0x7f2c + (i - 3) * 12;
2079 cpu_x86_load_seg_cache(env, i,
2080 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2081 ldl_phys(sm_state + offset + 8),
2082 ldl_phys(sm_state + offset + 4),
2083 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2084 }
2085 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2086
2087 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2088 if (val & 0x20000) {
2089 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2090 }
2091#endif
2092 CC_OP = CC_OP_EFLAGS;
2093 env->hflags &= ~HF_SMM_MASK;
2094 cpu_smm_update(env);
2095
2096 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2097 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2098#endif /* !VBOX */
2099}
2100
2101#endif /* !CONFIG_USER_ONLY */
2102
2103
2104/* division, flags are undefined */
2105
2106void helper_divb_AL(target_ulong t0)
2107{
2108 unsigned int num, den, q, r;
2109
2110 num = (EAX & 0xffff);
2111 den = (t0 & 0xff);
2112 if (den == 0) {
2113 raise_exception(EXCP00_DIVZ);
2114 }
2115 q = (num / den);
2116 if (q > 0xff)
2117 raise_exception(EXCP00_DIVZ);
2118 q &= 0xff;
2119 r = (num % den) & 0xff;
2120 EAX = (EAX & ~0xffff) | (r << 8) | q;
2121}
2122
2123void helper_idivb_AL(target_ulong t0)
2124{
2125 int num, den, q, r;
2126
2127 num = (int16_t)EAX;
2128 den = (int8_t)t0;
2129 if (den == 0) {
2130 raise_exception(EXCP00_DIVZ);
2131 }
2132 q = (num / den);
2133 if (q != (int8_t)q)
2134 raise_exception(EXCP00_DIVZ);
2135 q &= 0xff;
2136 r = (num % den) & 0xff;
2137 EAX = (EAX & ~0xffff) | (r << 8) | q;
2138}
2139
2140void helper_divw_AX(target_ulong t0)
2141{
2142 unsigned int num, den, q, r;
2143
2144 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2145 den = (t0 & 0xffff);
2146 if (den == 0) {
2147 raise_exception(EXCP00_DIVZ);
2148 }
2149 q = (num / den);
2150 if (q > 0xffff)
2151 raise_exception(EXCP00_DIVZ);
2152 q &= 0xffff;
2153 r = (num % den) & 0xffff;
2154 EAX = (EAX & ~0xffff) | q;
2155 EDX = (EDX & ~0xffff) | r;
2156}
2157
2158void helper_idivw_AX(target_ulong t0)
2159{
2160 int num, den, q, r;
2161
2162 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2163 den = (int16_t)t0;
2164 if (den == 0) {
2165 raise_exception(EXCP00_DIVZ);
2166 }
2167 q = (num / den);
2168 if (q != (int16_t)q)
2169 raise_exception(EXCP00_DIVZ);
2170 q &= 0xffff;
2171 r = (num % den) & 0xffff;
2172 EAX = (EAX & ~0xffff) | q;
2173 EDX = (EDX & ~0xffff) | r;
2174}
2175
2176void helper_divl_EAX(target_ulong t0)
2177{
2178 unsigned int den, r;
2179 uint64_t num, q;
2180
2181 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2182 den = t0;
2183 if (den == 0) {
2184 raise_exception(EXCP00_DIVZ);
2185 }
2186 q = (num / den);
2187 r = (num % den);
2188 if (q > 0xffffffff)
2189 raise_exception(EXCP00_DIVZ);
2190 EAX = (uint32_t)q;
2191 EDX = (uint32_t)r;
2192}
2193
2194void helper_idivl_EAX(target_ulong t0)
2195{
2196 int den, r;
2197 int64_t num, q;
2198
2199 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2200 den = t0;
2201 if (den == 0) {
2202 raise_exception(EXCP00_DIVZ);
2203 }
2204 q = (num / den);
2205 r = (num % den);
2206 if (q != (int32_t)q)
2207 raise_exception(EXCP00_DIVZ);
2208 EAX = (uint32_t)q;
2209 EDX = (uint32_t)r;
2210}
2211
2212/* bcd */
2213
2214/* XXX: exception */
2215void helper_aam(int base)
2216{
2217 int al, ah;
2218 al = EAX & 0xff;
2219 ah = al / base;
2220 al = al % base;
2221 EAX = (EAX & ~0xffff) | al | (ah << 8);
2222 CC_DST = al;
2223}
2224
2225void helper_aad(int base)
2226{
2227 int al, ah;
2228 al = EAX & 0xff;
2229 ah = (EAX >> 8) & 0xff;
2230 al = ((ah * base) + al) & 0xff;
2231 EAX = (EAX & ~0xffff) | al;
2232 CC_DST = al;
2233}
2234
2235void helper_aaa(void)
2236{
2237 int icarry;
2238 int al, ah, af;
2239 int eflags;
2240
2241 eflags = helper_cc_compute_all(CC_OP);
2242 af = eflags & CC_A;
2243 al = EAX & 0xff;
2244 ah = (EAX >> 8) & 0xff;
2245
2246 icarry = (al > 0xf9);
2247 if (((al & 0x0f) > 9 ) || af) {
2248 al = (al + 6) & 0x0f;
2249 ah = (ah + 1 + icarry) & 0xff;
2250 eflags |= CC_C | CC_A;
2251 } else {
2252 eflags &= ~(CC_C | CC_A);
2253 al &= 0x0f;
2254 }
2255 EAX = (EAX & ~0xffff) | al | (ah << 8);
2256 CC_SRC = eflags;
2257}
2258
2259void helper_aas(void)
2260{
2261 int icarry;
2262 int al, ah, af;
2263 int eflags;
2264
2265 eflags = helper_cc_compute_all(CC_OP);
2266 af = eflags & CC_A;
2267 al = EAX & 0xff;
2268 ah = (EAX >> 8) & 0xff;
2269
2270 icarry = (al < 6);
2271 if (((al & 0x0f) > 9 ) || af) {
2272 al = (al - 6) & 0x0f;
2273 ah = (ah - 1 - icarry) & 0xff;
2274 eflags |= CC_C | CC_A;
2275 } else {
2276 eflags &= ~(CC_C | CC_A);
2277 al &= 0x0f;
2278 }
2279 EAX = (EAX & ~0xffff) | al | (ah << 8);
2280 CC_SRC = eflags;
2281}
2282
2283void helper_daa(void)
2284{
2285 int al, af, cf;
2286 int eflags;
2287
2288 eflags = helper_cc_compute_all(CC_OP);
2289 cf = eflags & CC_C;
2290 af = eflags & CC_A;
2291 al = EAX & 0xff;
2292
2293 eflags = 0;
2294 if (((al & 0x0f) > 9 ) || af) {
2295 al = (al + 6) & 0xff;
2296 eflags |= CC_A;
2297 }
2298 if ((al > 0x9f) || cf) {
2299 al = (al + 0x60) & 0xff;
2300 eflags |= CC_C;
2301 }
2302 EAX = (EAX & ~0xff) | al;
2303 /* well, speed is not an issue here, so we compute the flags by hand */
2304 eflags |= (al == 0) << 6; /* zf */
2305 eflags |= parity_table[al]; /* pf */
2306 eflags |= (al & 0x80); /* sf */
2307 CC_SRC = eflags;
2308}
2309
2310void helper_das(void)
2311{
2312 int al, al1, af, cf;
2313 int eflags;
2314
2315 eflags = helper_cc_compute_all(CC_OP);
2316 cf = eflags & CC_C;
2317 af = eflags & CC_A;
2318 al = EAX & 0xff;
2319
2320 eflags = 0;
2321 al1 = al;
2322 if (((al & 0x0f) > 9 ) || af) {
2323 eflags |= CC_A;
2324 if (al < 6 || cf)
2325 eflags |= CC_C;
2326 al = (al - 6) & 0xff;
2327 }
2328 if ((al1 > 0x99) || cf) {
2329 al = (al - 0x60) & 0xff;
2330 eflags |= CC_C;
2331 }
2332 EAX = (EAX & ~0xff) | al;
2333 /* well, speed is not an issue here, so we compute the flags by hand */
2334 eflags |= (al == 0) << 6; /* zf */
2335 eflags |= parity_table[al]; /* pf */
2336 eflags |= (al & 0x80); /* sf */
2337 CC_SRC = eflags;
2338}
2339
2340void helper_into(int next_eip_addend)
2341{
2342 int eflags;
2343 eflags = helper_cc_compute_all(CC_OP);
2344 if (eflags & CC_O) {
2345 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2346 }
2347}
2348
2349void helper_cmpxchg8b(target_ulong a0)
2350{
2351 uint64_t d;
2352 int eflags;
2353
2354 eflags = helper_cc_compute_all(CC_OP);
2355 d = ldq(a0);
2356 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2357 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2358 eflags |= CC_Z;
2359 } else {
2360 /* always do the store */
2361 stq(a0, d);
2362 EDX = (uint32_t)(d >> 32);
2363 EAX = (uint32_t)d;
2364 eflags &= ~CC_Z;
2365 }
2366 CC_SRC = eflags;
2367}
2368
2369#ifdef TARGET_X86_64
2370void helper_cmpxchg16b(target_ulong a0)
2371{
2372 uint64_t d0, d1;
2373 int eflags;
2374
2375 if ((a0 & 0xf) != 0)
2376 raise_exception(EXCP0D_GPF);
2377 eflags = helper_cc_compute_all(CC_OP);
2378 d0 = ldq(a0);
2379 d1 = ldq(a0 + 8);
2380 if (d0 == EAX && d1 == EDX) {
2381 stq(a0, EBX);
2382 stq(a0 + 8, ECX);
2383 eflags |= CC_Z;
2384 } else {
2385 /* always do the store */
2386 stq(a0, d0);
2387 stq(a0 + 8, d1);
2388 EDX = d1;
2389 EAX = d0;
2390 eflags &= ~CC_Z;
2391 }
2392 CC_SRC = eflags;
2393}
2394#endif
2395
2396void helper_single_step(void)
2397{
2398#ifndef CONFIG_USER_ONLY
2399 check_hw_breakpoints(env, 1);
2400 env->dr[6] |= DR6_BS;
2401#endif
2402 raise_exception(EXCP01_DB);
2403}
2404
2405void helper_cpuid(void)
2406{
2407 uint32_t eax, ebx, ecx, edx;
2408
2409 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2410
2411 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2412 EAX = eax;
2413 EBX = ebx;
2414 ECX = ecx;
2415 EDX = edx;
2416}
2417
2418void helper_enter_level(int level, int data32, target_ulong t1)
2419{
2420 target_ulong ssp;
2421 uint32_t esp_mask, esp, ebp;
2422
2423 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2424 ssp = env->segs[R_SS].base;
2425 ebp = EBP;
2426 esp = ESP;
2427 if (data32) {
2428 /* 32 bit */
2429 esp -= 4;
2430 while (--level) {
2431 esp -= 4;
2432 ebp -= 4;
2433 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2434 }
2435 esp -= 4;
2436 stl(ssp + (esp & esp_mask), t1);
2437 } else {
2438 /* 16 bit */
2439 esp -= 2;
2440 while (--level) {
2441 esp -= 2;
2442 ebp -= 2;
2443 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2444 }
2445 esp -= 2;
2446 stw(ssp + (esp & esp_mask), t1);
2447 }
2448}
2449
2450#ifdef TARGET_X86_64
2451void helper_enter64_level(int level, int data64, target_ulong t1)
2452{
2453 target_ulong esp, ebp;
2454 ebp = EBP;
2455 esp = ESP;
2456
2457 if (data64) {
2458 /* 64 bit */
2459 esp -= 8;
2460 while (--level) {
2461 esp -= 8;
2462 ebp -= 8;
2463 stq(esp, ldq(ebp));
2464 }
2465 esp -= 8;
2466 stq(esp, t1);
2467 } else {
2468 /* 16 bit */
2469 esp -= 2;
2470 while (--level) {
2471 esp -= 2;
2472 ebp -= 2;
2473 stw(esp, lduw(ebp));
2474 }
2475 esp -= 2;
2476 stw(esp, t1);
2477 }
2478}
2479#endif
2480
2481void helper_lldt(int selector)
2482{
2483 SegmentCache *dt;
2484 uint32_t e1, e2;
2485#ifndef VBOX
2486 int index, entry_limit;
2487#else
2488 unsigned int index, entry_limit;
2489#endif
2490 target_ulong ptr;
2491
2492#ifdef VBOX
2493 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2494 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2495#endif
2496
2497 selector &= 0xffff;
2498 if ((selector & 0xfffc) == 0) {
2499 /* XXX: NULL selector case: invalid LDT */
2500 env->ldt.base = 0;
2501 env->ldt.limit = 0;
2502#ifdef VBOX
2503 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2504 env->ldt.newselector = 0;
2505#endif
2506 } else {
2507 if (selector & 0x4)
2508 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2509 dt = &env->gdt;
2510 index = selector & ~7;
2511#ifdef TARGET_X86_64
2512 if (env->hflags & HF_LMA_MASK)
2513 entry_limit = 15;
2514 else
2515#endif
2516 entry_limit = 7;
2517 if ((index + entry_limit) > dt->limit)
2518 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2519 ptr = dt->base + index;
2520 e1 = ldl_kernel(ptr);
2521 e2 = ldl_kernel(ptr + 4);
2522 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2523 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2524 if (!(e2 & DESC_P_MASK))
2525 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2526#ifdef TARGET_X86_64
2527 if (env->hflags & HF_LMA_MASK) {
2528 uint32_t e3;
2529 e3 = ldl_kernel(ptr + 8);
2530 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2531 env->ldt.base |= (target_ulong)e3 << 32;
2532 } else
2533#endif
2534 {
2535 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2536 }
2537 }
2538 env->ldt.selector = selector;
2539#ifdef VBOX
2540 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2541 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2542#endif
2543}
2544
2545void helper_ltr(int selector)
2546{
2547 SegmentCache *dt;
2548 uint32_t e1, e2;
2549#ifndef VBOX
2550 int index, type, entry_limit;
2551#else
2552 unsigned int index;
2553 int type, entry_limit;
2554#endif
2555 target_ulong ptr;
2556
2557#ifdef VBOX
2558 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2559 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2560 env->tr.flags, (RTSEL)(selector & 0xffff)));
2561#endif
2562 selector &= 0xffff;
2563 if ((selector & 0xfffc) == 0) {
2564 /* NULL selector case: invalid TR */
2565 env->tr.base = 0;
2566 env->tr.limit = 0;
2567 env->tr.flags = 0;
2568#ifdef VBOX
2569 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2570 env->tr.newselector = 0;
2571#endif
2572 } else {
2573 if (selector & 0x4)
2574 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2575 dt = &env->gdt;
2576 index = selector & ~7;
2577#ifdef TARGET_X86_64
2578 if (env->hflags & HF_LMA_MASK)
2579 entry_limit = 15;
2580 else
2581#endif
2582 entry_limit = 7;
2583 if ((index + entry_limit) > dt->limit)
2584 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2585 ptr = dt->base + index;
2586 e1 = ldl_kernel(ptr);
2587 e2 = ldl_kernel(ptr + 4);
2588 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2589 if ((e2 & DESC_S_MASK) ||
2590 (type != 1 && type != 9))
2591 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2592 if (!(e2 & DESC_P_MASK))
2593 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2594#ifdef TARGET_X86_64
2595 if (env->hflags & HF_LMA_MASK) {
2596 uint32_t e3, e4;
2597 e3 = ldl_kernel(ptr + 8);
2598 e4 = ldl_kernel(ptr + 12);
2599 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2600 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2601 load_seg_cache_raw_dt(&env->tr, e1, e2);
2602 env->tr.base |= (target_ulong)e3 << 32;
2603 } else
2604#endif
2605 {
2606 load_seg_cache_raw_dt(&env->tr, e1, e2);
2607 }
2608 e2 |= DESC_TSS_BUSY_MASK;
2609 stl_kernel(ptr + 4, e2);
2610 }
2611 env->tr.selector = selector;
2612#ifdef VBOX
2613 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2614 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2615 env->tr.flags, (RTSEL)(selector & 0xffff)));
2616#endif
2617}
2618
2619/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2620void helper_load_seg(int seg_reg, int selector)
2621{
2622 uint32_t e1, e2;
2623 int cpl, dpl, rpl;
2624 SegmentCache *dt;
2625#ifndef VBOX
2626 int index;
2627#else
2628 unsigned int index;
2629#endif
2630 target_ulong ptr;
2631
2632 selector &= 0xffff;
2633 cpl = env->hflags & HF_CPL_MASK;
2634#ifdef VBOX
2635
2636 /* Trying to load a selector with CPL=1? */
2637 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2638 {
2639 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2640 selector = selector & 0xfffc;
2641 }
2642#endif /* VBOX */
2643 if ((selector & 0xfffc) == 0) {
2644 /* null selector case */
2645 if (seg_reg == R_SS
2646#ifdef TARGET_X86_64
2647 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2648#endif
2649 )
2650 raise_exception_err(EXCP0D_GPF, 0);
2651 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2652 } else {
2653
2654 if (selector & 0x4)
2655 dt = &env->ldt;
2656 else
2657 dt = &env->gdt;
2658 index = selector & ~7;
2659 if ((index + 7) > dt->limit)
2660 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2661 ptr = dt->base + index;
2662 e1 = ldl_kernel(ptr);
2663 e2 = ldl_kernel(ptr + 4);
2664
2665 if (!(e2 & DESC_S_MASK))
2666 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2667 rpl = selector & 3;
2668 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2669 if (seg_reg == R_SS) {
2670 /* must be writable segment */
2671 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2672 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2673 if (rpl != cpl || dpl != cpl)
2674 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2675 } else {
2676 /* must be readable segment */
2677 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2678 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2679
2680 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2681 /* if not conforming code, test rights */
2682 if (dpl < cpl || dpl < rpl)
2683 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2684 }
2685 }
2686
2687 if (!(e2 & DESC_P_MASK)) {
2688 if (seg_reg == R_SS)
2689 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2690 else
2691 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2692 }
2693
2694 /* set the access bit if not already set */
2695 if (!(e2 & DESC_A_MASK)) {
2696 e2 |= DESC_A_MASK;
2697 stl_kernel(ptr + 4, e2);
2698 }
2699
2700 cpu_x86_load_seg_cache(env, seg_reg, selector,
2701 get_seg_base(e1, e2),
2702 get_seg_limit(e1, e2),
2703 e2);
2704#if 0
2705 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2706 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2707#endif
2708 }
2709}
2710
2711/* protected mode jump */
2712void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2713 int next_eip_addend)
2714{
2715 int gate_cs, type;
2716 uint32_t e1, e2, cpl, dpl, rpl, limit;
2717 target_ulong next_eip;
2718
2719#ifdef VBOX /** @todo Why do we do this? */
2720 e1 = e2 = 0;
2721#endif
2722 if ((new_cs & 0xfffc) == 0)
2723 raise_exception_err(EXCP0D_GPF, 0);
2724 if (load_segment(&e1, &e2, new_cs) != 0)
2725 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2726 cpl = env->hflags & HF_CPL_MASK;
2727 if (e2 & DESC_S_MASK) {
2728 if (!(e2 & DESC_CS_MASK))
2729 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2730 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2731 if (e2 & DESC_C_MASK) {
2732 /* conforming code segment */
2733 if (dpl > cpl)
2734 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2735 } else {
2736 /* non conforming code segment */
2737 rpl = new_cs & 3;
2738 if (rpl > cpl)
2739 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2740 if (dpl != cpl)
2741 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2742 }
2743 if (!(e2 & DESC_P_MASK))
2744 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2745 limit = get_seg_limit(e1, e2);
2746 if (new_eip > limit &&
2747 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2748 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2749 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2750 get_seg_base(e1, e2), limit, e2);
2751 EIP = new_eip;
2752 } else {
2753 /* jump to call or task gate */
2754 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2755 rpl = new_cs & 3;
2756 cpl = env->hflags & HF_CPL_MASK;
2757 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2758 switch(type) {
2759 case 1: /* 286 TSS */
2760 case 9: /* 386 TSS */
2761 case 5: /* task gate */
2762 if (dpl < cpl || dpl < rpl)
2763 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2764 next_eip = env->eip + next_eip_addend;
2765 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2766 CC_OP = CC_OP_EFLAGS;
2767 break;
2768 case 4: /* 286 call gate */
2769 case 12: /* 386 call gate */
2770 if ((dpl < cpl) || (dpl < rpl))
2771 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2772 if (!(e2 & DESC_P_MASK))
2773 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2774 gate_cs = e1 >> 16;
2775 new_eip = (e1 & 0xffff);
2776 if (type == 12)
2777 new_eip |= (e2 & 0xffff0000);
2778 if (load_segment(&e1, &e2, gate_cs) != 0)
2779 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2780 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2781 /* must be code segment */
2782 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2783 (DESC_S_MASK | DESC_CS_MASK)))
2784 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2785 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2786 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2787 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2788 if (!(e2 & DESC_P_MASK))
2789#ifdef VBOX /* See page 3-514 of 253666.pdf */
2790 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2791#else
2792 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2793#endif
2794 limit = get_seg_limit(e1, e2);
2795 if (new_eip > limit)
2796 raise_exception_err(EXCP0D_GPF, 0);
2797 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2798 get_seg_base(e1, e2), limit, e2);
2799 EIP = new_eip;
2800 break;
2801 default:
2802 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2803 break;
2804 }
2805 }
2806}
2807
2808/* real mode call */
2809void helper_lcall_real(int new_cs, target_ulong new_eip1,
2810 int shift, int next_eip)
2811{
2812 int new_eip;
2813 uint32_t esp, esp_mask;
2814 target_ulong ssp;
2815
2816 new_eip = new_eip1;
2817 esp = ESP;
2818 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2819 ssp = env->segs[R_SS].base;
2820 if (shift) {
2821 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2822 PUSHL(ssp, esp, esp_mask, next_eip);
2823 } else {
2824 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2825 PUSHW(ssp, esp, esp_mask, next_eip);
2826 }
2827
2828 SET_ESP(esp, esp_mask);
2829 env->eip = new_eip;
2830 env->segs[R_CS].selector = new_cs;
2831 env->segs[R_CS].base = (new_cs << 4);
2832}
2833
2834/* protected mode call */
2835void helper_lcall_protected(int new_cs, target_ulong new_eip,
2836 int shift, int next_eip_addend)
2837{
2838 int new_stack, i;
2839 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2840 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2841 uint32_t val, limit, old_sp_mask;
2842 target_ulong ssp, old_ssp, next_eip;
2843
2844#ifdef VBOX /** @todo Why do we do this? */
2845 e1 = e2 = 0;
2846#endif
2847 next_eip = env->eip + next_eip_addend;
2848 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2849 LOG_PCALL_STATE(env);
2850 if ((new_cs & 0xfffc) == 0)
2851 raise_exception_err(EXCP0D_GPF, 0);
2852 if (load_segment(&e1, &e2, new_cs) != 0)
2853 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2854 cpl = env->hflags & HF_CPL_MASK;
2855 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2856 if (e2 & DESC_S_MASK) {
2857 if (!(e2 & DESC_CS_MASK))
2858 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2859 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2860 if (e2 & DESC_C_MASK) {
2861 /* conforming code segment */
2862 if (dpl > cpl)
2863 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2864 } else {
2865 /* non conforming code segment */
2866 rpl = new_cs & 3;
2867 if (rpl > cpl)
2868 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2869 if (dpl != cpl)
2870 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2871 }
2872 if (!(e2 & DESC_P_MASK))
2873 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2874
2875#ifdef TARGET_X86_64
2876 /* XXX: check 16/32 bit cases in long mode */
2877 if (shift == 2) {
2878 target_ulong rsp;
2879 /* 64 bit case */
2880 rsp = ESP;
2881 PUSHQ(rsp, env->segs[R_CS].selector);
2882 PUSHQ(rsp, next_eip);
2883 /* from this point, not restartable */
2884 ESP = rsp;
2885 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2886 get_seg_base(e1, e2),
2887 get_seg_limit(e1, e2), e2);
2888 EIP = new_eip;
2889 } else
2890#endif
2891 {
2892 sp = ESP;
2893 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2894 ssp = env->segs[R_SS].base;
2895 if (shift) {
2896 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2897 PUSHL(ssp, sp, sp_mask, next_eip);
2898 } else {
2899 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2900 PUSHW(ssp, sp, sp_mask, next_eip);
2901 }
2902
2903 limit = get_seg_limit(e1, e2);
2904 if (new_eip > limit)
2905 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2906 /* from this point, not restartable */
2907 SET_ESP(sp, sp_mask);
2908 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2909 get_seg_base(e1, e2), limit, e2);
2910 EIP = new_eip;
2911 }
2912 } else {
2913 /* check gate type */
2914 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2915 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2916 rpl = new_cs & 3;
2917 switch(type) {
2918 case 1: /* available 286 TSS */
2919 case 9: /* available 386 TSS */
2920 case 5: /* task gate */
2921 if (dpl < cpl || dpl < rpl)
2922 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2923 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2924 CC_OP = CC_OP_EFLAGS;
2925 return;
2926 case 4: /* 286 call gate */
2927 case 12: /* 386 call gate */
2928 break;
2929 default:
2930 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2931 break;
2932 }
2933 shift = type >> 3;
2934
2935 if (dpl < cpl || dpl < rpl)
2936 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2937 /* check valid bit */
2938 if (!(e2 & DESC_P_MASK))
2939 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2940 selector = e1 >> 16;
2941 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2942 param_count = e2 & 0x1f;
2943 if ((selector & 0xfffc) == 0)
2944 raise_exception_err(EXCP0D_GPF, 0);
2945
2946 if (load_segment(&e1, &e2, selector) != 0)
2947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2948 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2949 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2950 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2951 if (dpl > cpl)
2952 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2953 if (!(e2 & DESC_P_MASK))
2954 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2955
2956 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2957 /* to inner privilege */
2958 get_ss_esp_from_tss(&ss, &sp, dpl);
2959 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2960 ss, sp, param_count, ESP);
2961 if ((ss & 0xfffc) == 0)
2962 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2963 if ((ss & 3) != dpl)
2964 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2965 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2966 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2967 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2968 if (ss_dpl != dpl)
2969 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2970 if (!(ss_e2 & DESC_S_MASK) ||
2971 (ss_e2 & DESC_CS_MASK) ||
2972 !(ss_e2 & DESC_W_MASK))
2973 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2974 if (!(ss_e2 & DESC_P_MASK))
2975#ifdef VBOX /* See page 3-99 of 253666.pdf */
2976 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2977#else
2978 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2979#endif
2980
2981 // push_size = ((param_count * 2) + 8) << shift;
2982
2983 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2984 old_ssp = env->segs[R_SS].base;
2985
2986 sp_mask = get_sp_mask(ss_e2);
2987 ssp = get_seg_base(ss_e1, ss_e2);
2988 if (shift) {
2989 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2990 PUSHL(ssp, sp, sp_mask, ESP);
2991 for(i = param_count - 1; i >= 0; i--) {
2992 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2993 PUSHL(ssp, sp, sp_mask, val);
2994 }
2995 } else {
2996 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2997 PUSHW(ssp, sp, sp_mask, ESP);
2998 for(i = param_count - 1; i >= 0; i--) {
2999 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3000 PUSHW(ssp, sp, sp_mask, val);
3001 }
3002 }
3003 new_stack = 1;
3004 } else {
3005 /* to same privilege */
3006 sp = ESP;
3007 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3008 ssp = env->segs[R_SS].base;
3009 // push_size = (4 << shift);
3010 new_stack = 0;
3011 }
3012
3013 if (shift) {
3014 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3015 PUSHL(ssp, sp, sp_mask, next_eip);
3016 } else {
3017 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3018 PUSHW(ssp, sp, sp_mask, next_eip);
3019 }
3020
3021 /* from this point, not restartable */
3022
3023 if (new_stack) {
3024 ss = (ss & ~3) | dpl;
3025 cpu_x86_load_seg_cache(env, R_SS, ss,
3026 ssp,
3027 get_seg_limit(ss_e1, ss_e2),
3028 ss_e2);
3029 }
3030
3031 selector = (selector & ~3) | dpl;
3032 cpu_x86_load_seg_cache(env, R_CS, selector,
3033 get_seg_base(e1, e2),
3034 get_seg_limit(e1, e2),
3035 e2);
3036 cpu_x86_set_cpl(env, dpl);
3037 SET_ESP(sp, sp_mask);
3038 EIP = offset;
3039 }
3040}
3041
3042/* real and vm86 mode iret */
3043void helper_iret_real(int shift)
3044{
3045 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3046 target_ulong ssp;
3047 int eflags_mask;
3048#ifdef VBOX
3049 bool fVME = false;
3050
3051 remR3TrapClear(env->pVM);
3052#endif /* VBOX */
3053
3054 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3055 sp = ESP;
3056 ssp = env->segs[R_SS].base;
3057 if (shift == 1) {
3058 /* 32 bits */
3059 POPL(ssp, sp, sp_mask, new_eip);
3060 POPL(ssp, sp, sp_mask, new_cs);
3061 new_cs &= 0xffff;
3062 POPL(ssp, sp, sp_mask, new_eflags);
3063 } else {
3064 /* 16 bits */
3065 POPW(ssp, sp, sp_mask, new_eip);
3066 POPW(ssp, sp, sp_mask, new_cs);
3067 POPW(ssp, sp, sp_mask, new_eflags);
3068 }
3069#ifdef VBOX
3070 if ( (env->eflags & VM_MASK)
3071 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3072 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3073 {
3074 fVME = true;
3075 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3076 /* if TF will be set -> #GP */
3077 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3078 || (new_eflags & TF_MASK))
3079 raise_exception(EXCP0D_GPF);
3080 }
3081#endif /* VBOX */
3082 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3083 env->segs[R_CS].selector = new_cs;
3084 env->segs[R_CS].base = (new_cs << 4);
3085 env->eip = new_eip;
3086#ifdef VBOX
3087 if (fVME)
3088 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3089 else
3090#endif
3091 if (env->eflags & VM_MASK)
3092 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3093 else
3094 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3095 if (shift == 0)
3096 eflags_mask &= 0xffff;
3097 load_eflags(new_eflags, eflags_mask);
3098 env->hflags2 &= ~HF2_NMI_MASK;
3099#ifdef VBOX
3100 if (fVME)
3101 {
3102 if (new_eflags & IF_MASK)
3103 env->eflags |= VIF_MASK;
3104 else
3105 env->eflags &= ~VIF_MASK;
3106 }
3107#endif /* VBOX */
3108}
3109
3110static inline void validate_seg(int seg_reg, int cpl)
3111{
3112 int dpl;
3113 uint32_t e2;
3114
3115 /* XXX: on x86_64, we do not want to nullify FS and GS because
3116 they may still contain a valid base. I would be interested to
3117 know how a real x86_64 CPU behaves */
3118 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3119 (env->segs[seg_reg].selector & 0xfffc) == 0)
3120 return;
3121
3122 e2 = env->segs[seg_reg].flags;
3123 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3124 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3125 /* data or non conforming code segment */
3126 if (dpl < cpl) {
3127 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3128 }
3129 }
3130}
3131
3132/* protected mode iret */
3133static inline void helper_ret_protected(int shift, int is_iret, int addend)
3134{
3135 uint32_t new_cs, new_eflags, new_ss;
3136 uint32_t new_es, new_ds, new_fs, new_gs;
3137 uint32_t e1, e2, ss_e1, ss_e2;
3138 int cpl, dpl, rpl, eflags_mask, iopl;
3139 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3140
3141#ifdef VBOX /** @todo Why do we do this? */
3142 ss_e1 = ss_e2 = e1 = e2 = 0;
3143#endif
3144
3145#ifdef TARGET_X86_64
3146 if (shift == 2)
3147 sp_mask = -1;
3148 else
3149#endif
3150 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3151 sp = ESP;
3152 ssp = env->segs[R_SS].base;
3153 new_eflags = 0; /* avoid warning */
3154#ifdef TARGET_X86_64
3155 if (shift == 2) {
3156 POPQ(sp, new_eip);
3157 POPQ(sp, new_cs);
3158 new_cs &= 0xffff;
3159 if (is_iret) {
3160 POPQ(sp, new_eflags);
3161 }
3162 } else
3163#endif
3164 if (shift == 1) {
3165 /* 32 bits */
3166 POPL(ssp, sp, sp_mask, new_eip);
3167 POPL(ssp, sp, sp_mask, new_cs);
3168 new_cs &= 0xffff;
3169 if (is_iret) {
3170 POPL(ssp, sp, sp_mask, new_eflags);
3171#if defined(VBOX) && defined(DEBUG)
3172 printf("iret: new CS %04X\n", new_cs);
3173 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3174 printf("iret: new EFLAGS %08X\n", new_eflags);
3175 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3176#endif
3177 if (new_eflags & VM_MASK)
3178 goto return_to_vm86;
3179 }
3180#ifdef VBOX
3181 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3182 {
3183# ifdef DEBUG
3184 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3185# endif
3186 new_cs = new_cs & 0xfffc;
3187 }
3188#endif
3189 } else {
3190 /* 16 bits */
3191 POPW(ssp, sp, sp_mask, new_eip);
3192 POPW(ssp, sp, sp_mask, new_cs);
3193 if (is_iret)
3194 POPW(ssp, sp, sp_mask, new_eflags);
3195 }
3196 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3197 new_cs, new_eip, shift, addend);
3198 LOG_PCALL_STATE(env);
3199 if ((new_cs & 0xfffc) == 0)
3200 {
3201#if defined(VBOX) && defined(DEBUG)
3202 printf("new_cs & 0xfffc) == 0\n");
3203#endif
3204 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3205 }
3206 if (load_segment(&e1, &e2, new_cs) != 0)
3207 {
3208#if defined(VBOX) && defined(DEBUG)
3209 printf("load_segment failed\n");
3210#endif
3211 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3212 }
3213 if (!(e2 & DESC_S_MASK) ||
3214 !(e2 & DESC_CS_MASK))
3215 {
3216#if defined(VBOX) && defined(DEBUG)
3217 printf("e2 mask %08x\n", e2);
3218#endif
3219 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3220 }
3221 cpl = env->hflags & HF_CPL_MASK;
3222 rpl = new_cs & 3;
3223 if (rpl < cpl)
3224 {
3225#if defined(VBOX) && defined(DEBUG)
3226 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3227#endif
3228 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3229 }
3230 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3231 if (e2 & DESC_C_MASK) {
3232 if (dpl > rpl)
3233 {
3234#if defined(VBOX) && defined(DEBUG)
3235 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3236#endif
3237 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3238 }
3239 } else {
3240 if (dpl != rpl)
3241 {
3242#if defined(VBOX) && defined(DEBUG)
3243 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3244#endif
3245 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3246 }
3247 }
3248 if (!(e2 & DESC_P_MASK))
3249 {
3250#if defined(VBOX) && defined(DEBUG)
3251 printf("DESC_P_MASK e2=%08x\n", e2);
3252#endif
3253 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3254 }
3255
3256 sp += addend;
3257 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3258 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3259 /* return to same privilege level */
3260#ifdef VBOX
3261 if (!(e2 & DESC_A_MASK))
3262 e2 = set_segment_accessed(new_cs, e2);
3263#endif
3264 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3265 get_seg_base(e1, e2),
3266 get_seg_limit(e1, e2),
3267 e2);
3268 } else {
3269 /* return to different privilege level */
3270#ifdef TARGET_X86_64
3271 if (shift == 2) {
3272 POPQ(sp, new_esp);
3273 POPQ(sp, new_ss);
3274 new_ss &= 0xffff;
3275 } else
3276#endif
3277 if (shift == 1) {
3278 /* 32 bits */
3279 POPL(ssp, sp, sp_mask, new_esp);
3280 POPL(ssp, sp, sp_mask, new_ss);
3281 new_ss &= 0xffff;
3282 } else {
3283 /* 16 bits */
3284 POPW(ssp, sp, sp_mask, new_esp);
3285 POPW(ssp, sp, sp_mask, new_ss);
3286 }
3287 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3288 new_ss, new_esp);
3289 if ((new_ss & 0xfffc) == 0) {
3290#ifdef TARGET_X86_64
3291 /* NULL ss is allowed in long mode if cpl != 3*/
3292 /* XXX: test CS64 ? */
3293 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3294# ifdef VBOX
3295 if (!(e2 & DESC_A_MASK))
3296 e2 = set_segment_accessed(new_cs, e2);
3297# endif
3298 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3299 0, 0xffffffff,
3300 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3301 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3302 DESC_W_MASK | DESC_A_MASK);
3303 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3304 } else
3305#endif
3306 {
3307 raise_exception_err(EXCP0D_GPF, 0);
3308 }
3309 } else {
3310 if ((new_ss & 3) != rpl)
3311 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3312 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3313 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3314 if (!(ss_e2 & DESC_S_MASK) ||
3315 (ss_e2 & DESC_CS_MASK) ||
3316 !(ss_e2 & DESC_W_MASK))
3317 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3318 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3319 if (dpl != rpl)
3320 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3321 if (!(ss_e2 & DESC_P_MASK))
3322 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3323#ifdef VBOX
3324 if (!(e2 & DESC_A_MASK))
3325 e2 = set_segment_accessed(new_cs, e2);
3326 if (!(ss_e2 & DESC_A_MASK))
3327 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3328#endif
3329 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3330 get_seg_base(ss_e1, ss_e2),
3331 get_seg_limit(ss_e1, ss_e2),
3332 ss_e2);
3333 }
3334
3335 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3336 get_seg_base(e1, e2),
3337 get_seg_limit(e1, e2),
3338 e2);
3339 cpu_x86_set_cpl(env, rpl);
3340 sp = new_esp;
3341#ifdef TARGET_X86_64
3342 if (env->hflags & HF_CS64_MASK)
3343 sp_mask = -1;
3344 else
3345#endif
3346 sp_mask = get_sp_mask(ss_e2);
3347
3348 /* validate data segments */
3349 validate_seg(R_ES, rpl);
3350 validate_seg(R_DS, rpl);
3351 validate_seg(R_FS, rpl);
3352 validate_seg(R_GS, rpl);
3353
3354 sp += addend;
3355 }
3356 SET_ESP(sp, sp_mask);
3357 env->eip = new_eip;
3358 if (is_iret) {
3359 /* NOTE: 'cpl' is the _old_ CPL */
3360 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3361 if (cpl == 0)
3362#ifdef VBOX
3363 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3364#else
3365 eflags_mask |= IOPL_MASK;
3366#endif
3367 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3368 if (cpl <= iopl)
3369 eflags_mask |= IF_MASK;
3370 if (shift == 0)
3371 eflags_mask &= 0xffff;
3372 load_eflags(new_eflags, eflags_mask);
3373 }
3374 return;
3375
3376 return_to_vm86:
3377 POPL(ssp, sp, sp_mask, new_esp);
3378 POPL(ssp, sp, sp_mask, new_ss);
3379 POPL(ssp, sp, sp_mask, new_es);
3380 POPL(ssp, sp, sp_mask, new_ds);
3381 POPL(ssp, sp, sp_mask, new_fs);
3382 POPL(ssp, sp, sp_mask, new_gs);
3383
3384 /* modify processor state */
3385 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3386 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3387 load_seg_vm(R_CS, new_cs & 0xffff);
3388 cpu_x86_set_cpl(env, 3);
3389 load_seg_vm(R_SS, new_ss & 0xffff);
3390 load_seg_vm(R_ES, new_es & 0xffff);
3391 load_seg_vm(R_DS, new_ds & 0xffff);
3392 load_seg_vm(R_FS, new_fs & 0xffff);
3393 load_seg_vm(R_GS, new_gs & 0xffff);
3394
3395 env->eip = new_eip & 0xffff;
3396 ESP = new_esp;
3397}
3398
3399void helper_iret_protected(int shift, int next_eip)
3400{
3401 int tss_selector, type;
3402 uint32_t e1, e2;
3403
3404#ifdef VBOX
3405 e1 = e2 = 0; /** @todo Why do we do this? */
3406 remR3TrapClear(env->pVM);
3407#endif
3408
3409 /* specific case for TSS */
3410 if (env->eflags & NT_MASK) {
3411#ifdef TARGET_X86_64
3412 if (env->hflags & HF_LMA_MASK)
3413 raise_exception_err(EXCP0D_GPF, 0);
3414#endif
3415 tss_selector = lduw_kernel(env->tr.base + 0);
3416 if (tss_selector & 4)
3417 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3418 if (load_segment(&e1, &e2, tss_selector) != 0)
3419 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3420 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3421 /* NOTE: we check both segment and busy TSS */
3422 if (type != 3)
3423 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3424 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3425 } else {
3426 helper_ret_protected(shift, 1, 0);
3427 }
3428 env->hflags2 &= ~HF2_NMI_MASK;
3429}
3430
3431void helper_lret_protected(int shift, int addend)
3432{
3433 helper_ret_protected(shift, 0, addend);
3434}
3435
3436void helper_sysenter(void)
3437{
3438 if (env->sysenter_cs == 0) {
3439 raise_exception_err(EXCP0D_GPF, 0);
3440 }
3441 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3442 cpu_x86_set_cpl(env, 0);
3443
3444#ifdef TARGET_X86_64
3445 if (env->hflags & HF_LMA_MASK) {
3446 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3447 0, 0xffffffff,
3448 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3449 DESC_S_MASK |
3450 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3451 } else
3452#endif
3453 {
3454 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3455 0, 0xffffffff,
3456 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3457 DESC_S_MASK |
3458 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3459 }
3460 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3461 0, 0xffffffff,
3462 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3463 DESC_S_MASK |
3464 DESC_W_MASK | DESC_A_MASK);
3465 ESP = env->sysenter_esp;
3466 EIP = env->sysenter_eip;
3467}
3468
3469void helper_sysexit(int dflag)
3470{
3471 int cpl;
3472
3473 cpl = env->hflags & HF_CPL_MASK;
3474 if (env->sysenter_cs == 0 || cpl != 0) {
3475 raise_exception_err(EXCP0D_GPF, 0);
3476 }
3477 cpu_x86_set_cpl(env, 3);
3478#ifdef TARGET_X86_64
3479 if (dflag == 2) {
3480 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3481 0, 0xffffffff,
3482 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3483 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3484 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3485 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3486 0, 0xffffffff,
3487 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3488 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3489 DESC_W_MASK | DESC_A_MASK);
3490 } else
3491#endif
3492 {
3493 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3494 0, 0xffffffff,
3495 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3496 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3497 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3498 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3499 0, 0xffffffff,
3500 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3501 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3502 DESC_W_MASK | DESC_A_MASK);
3503 }
3504 ESP = ECX;
3505 EIP = EDX;
3506}
3507
3508#if defined(CONFIG_USER_ONLY)
3509target_ulong helper_read_crN(int reg)
3510{
3511 return 0;
3512}
3513
3514void helper_write_crN(int reg, target_ulong t0)
3515{
3516}
3517
3518void helper_movl_drN_T0(int reg, target_ulong t0)
3519{
3520}
3521#else
3522target_ulong helper_read_crN(int reg)
3523{
3524 target_ulong val;
3525
3526 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3527 switch(reg) {
3528 default:
3529 val = env->cr[reg];
3530 break;
3531 case 8:
3532 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3533#ifndef VBOX
3534 val = cpu_get_apic_tpr(env->apic_state);
3535#else /* VBOX */
3536 val = cpu_get_apic_tpr(env);
3537#endif /* VBOX */
3538 } else {
3539 val = env->v_tpr;
3540 }
3541 break;
3542 }
3543 return val;
3544}
3545
3546void helper_write_crN(int reg, target_ulong t0)
3547{
3548 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3549 switch(reg) {
3550 case 0:
3551 cpu_x86_update_cr0(env, t0);
3552 break;
3553 case 3:
3554 cpu_x86_update_cr3(env, t0);
3555 break;
3556 case 4:
3557 cpu_x86_update_cr4(env, t0);
3558 break;
3559 case 8:
3560 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3561#ifndef VBOX
3562 cpu_set_apic_tpr(env->apic_state, t0);
3563#else /* VBOX */
3564 cpu_set_apic_tpr(env, t0);
3565#endif /* VBOX */
3566 }
3567 env->v_tpr = t0 & 0x0f;
3568 break;
3569 default:
3570 env->cr[reg] = t0;
3571 break;
3572 }
3573}
3574
3575void helper_movl_drN_T0(int reg, target_ulong t0)
3576{
3577 int i;
3578
3579 if (reg < 4) {
3580 hw_breakpoint_remove(env, reg);
3581 env->dr[reg] = t0;
3582 hw_breakpoint_insert(env, reg);
3583 } else if (reg == 7) {
3584 for (i = 0; i < 4; i++)
3585 hw_breakpoint_remove(env, i);
3586 env->dr[7] = t0;
3587 for (i = 0; i < 4; i++)
3588 hw_breakpoint_insert(env, i);
3589 } else
3590 env->dr[reg] = t0;
3591}
3592#endif
3593
3594void helper_lmsw(target_ulong t0)
3595{
3596 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3597 if already set to one. */
3598 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3599 helper_write_crN(0, t0);
3600}
3601
3602void helper_clts(void)
3603{
3604 env->cr[0] &= ~CR0_TS_MASK;
3605 env->hflags &= ~HF_TS_MASK;
3606}
3607
3608void helper_invlpg(target_ulong addr)
3609{
3610 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3611 tlb_flush_page(env, addr);
3612}
3613
3614void helper_rdtsc(void)
3615{
3616 uint64_t val;
3617
3618 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3619 raise_exception(EXCP0D_GPF);
3620 }
3621 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3622
3623 val = cpu_get_tsc(env) + env->tsc_offset;
3624 EAX = (uint32_t)(val);
3625 EDX = (uint32_t)(val >> 32);
3626}
3627
3628void helper_rdtscp(void)
3629{
3630 helper_rdtsc();
3631#ifndef VBOX
3632 ECX = (uint32_t)(env->tsc_aux);
3633#else /* VBOX */
3634 uint64_t val;
3635 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3636 ECX = (uint32_t)(val);
3637 else
3638 ECX = 0;
3639#endif /* VBOX */
3640}
3641
3642void helper_rdpmc(void)
3643{
3644#ifdef VBOX
3645 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3646 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3647 raise_exception(EXCP0D_GPF);
3648 }
3649 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3650 EAX = 0;
3651 EDX = 0;
3652#else /* !VBOX */
3653 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3654 raise_exception(EXCP0D_GPF);
3655 }
3656 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3657
3658 /* currently unimplemented */
3659 raise_exception_err(EXCP06_ILLOP, 0);
3660#endif /* !VBOX */
3661}
3662
3663#if defined(CONFIG_USER_ONLY)
3664void helper_wrmsr(void)
3665{
3666}
3667
3668void helper_rdmsr(void)
3669{
3670}
3671#else
3672void helper_wrmsr(void)
3673{
3674 uint64_t val;
3675
3676 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3677
3678 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3679
3680 switch((uint32_t)ECX) {
3681 case MSR_IA32_SYSENTER_CS:
3682 env->sysenter_cs = val & 0xffff;
3683 break;
3684 case MSR_IA32_SYSENTER_ESP:
3685 env->sysenter_esp = val;
3686 break;
3687 case MSR_IA32_SYSENTER_EIP:
3688 env->sysenter_eip = val;
3689 break;
3690 case MSR_IA32_APICBASE:
3691# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3692 cpu_set_apic_base(env->apic_state, val);
3693# endif
3694 break;
3695 case MSR_EFER:
3696 {
3697 uint64_t update_mask;
3698 update_mask = 0;
3699 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3700 update_mask |= MSR_EFER_SCE;
3701 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3702 update_mask |= MSR_EFER_LME;
3703 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3704 update_mask |= MSR_EFER_FFXSR;
3705 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3706 update_mask |= MSR_EFER_NXE;
3707 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3708 update_mask |= MSR_EFER_SVME;
3709 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3710 update_mask |= MSR_EFER_FFXSR;
3711 cpu_load_efer(env, (env->efer & ~update_mask) |
3712 (val & update_mask));
3713 }
3714 break;
3715 case MSR_STAR:
3716 env->star = val;
3717 break;
3718 case MSR_PAT:
3719 env->pat = val;
3720 break;
3721 case MSR_VM_HSAVE_PA:
3722 env->vm_hsave = val;
3723 break;
3724#ifdef TARGET_X86_64
3725 case MSR_LSTAR:
3726 env->lstar = val;
3727 break;
3728 case MSR_CSTAR:
3729 env->cstar = val;
3730 break;
3731 case MSR_FMASK:
3732 env->fmask = val;
3733 break;
3734 case MSR_FSBASE:
3735 env->segs[R_FS].base = val;
3736 break;
3737 case MSR_GSBASE:
3738 env->segs[R_GS].base = val;
3739 break;
3740 case MSR_KERNELGSBASE:
3741 env->kernelgsbase = val;
3742 break;
3743#endif
3744# ifndef VBOX
3745 case MSR_MTRRphysBase(0):
3746 case MSR_MTRRphysBase(1):
3747 case MSR_MTRRphysBase(2):
3748 case MSR_MTRRphysBase(3):
3749 case MSR_MTRRphysBase(4):
3750 case MSR_MTRRphysBase(5):
3751 case MSR_MTRRphysBase(6):
3752 case MSR_MTRRphysBase(7):
3753 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3754 break;
3755 case MSR_MTRRphysMask(0):
3756 case MSR_MTRRphysMask(1):
3757 case MSR_MTRRphysMask(2):
3758 case MSR_MTRRphysMask(3):
3759 case MSR_MTRRphysMask(4):
3760 case MSR_MTRRphysMask(5):
3761 case MSR_MTRRphysMask(6):
3762 case MSR_MTRRphysMask(7):
3763 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3764 break;
3765 case MSR_MTRRfix64K_00000:
3766 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3767 break;
3768 case MSR_MTRRfix16K_80000:
3769 case MSR_MTRRfix16K_A0000:
3770 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3771 break;
3772 case MSR_MTRRfix4K_C0000:
3773 case MSR_MTRRfix4K_C8000:
3774 case MSR_MTRRfix4K_D0000:
3775 case MSR_MTRRfix4K_D8000:
3776 case MSR_MTRRfix4K_E0000:
3777 case MSR_MTRRfix4K_E8000:
3778 case MSR_MTRRfix4K_F0000:
3779 case MSR_MTRRfix4K_F8000:
3780 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3781 break;
3782 case MSR_MTRRdefType:
3783 env->mtrr_deftype = val;
3784 break;
3785 case MSR_MCG_STATUS:
3786 env->mcg_status = val;
3787 break;
3788 case MSR_MCG_CTL:
3789 if ((env->mcg_cap & MCG_CTL_P)
3790 && (val == 0 || val == ~(uint64_t)0))
3791 env->mcg_ctl = val;
3792 break;
3793 case MSR_TSC_AUX:
3794 env->tsc_aux = val;
3795 break;
3796# endif /* !VBOX */
3797 default:
3798# ifndef VBOX
3799 if ((uint32_t)ECX >= MSR_MC0_CTL
3800 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3801 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3802 if ((offset & 0x3) != 0
3803 || (val == 0 || val == ~(uint64_t)0))
3804 env->mce_banks[offset] = val;
3805 break;
3806 }
3807 /* XXX: exception ? */
3808# endif
3809 break;
3810 }
3811
3812# ifdef VBOX
3813 /* call CPUM. */
3814 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3815 {
3816 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3817 }
3818# endif
3819}
3820
3821void helper_rdmsr(void)
3822{
3823 uint64_t val;
3824
3825 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3826
3827 switch((uint32_t)ECX) {
3828 case MSR_IA32_SYSENTER_CS:
3829 val = env->sysenter_cs;
3830 break;
3831 case MSR_IA32_SYSENTER_ESP:
3832 val = env->sysenter_esp;
3833 break;
3834 case MSR_IA32_SYSENTER_EIP:
3835 val = env->sysenter_eip;
3836 break;
3837 case MSR_IA32_APICBASE:
3838#ifndef VBOX
3839 val = cpu_get_apic_base(env->apic_state);
3840#else /* VBOX */
3841 val = cpu_get_apic_base(env);
3842#endif /* VBOX */
3843 break;
3844 case MSR_EFER:
3845 val = env->efer;
3846 break;
3847 case MSR_STAR:
3848 val = env->star;
3849 break;
3850 case MSR_PAT:
3851 val = env->pat;
3852 break;
3853 case MSR_VM_HSAVE_PA:
3854 val = env->vm_hsave;
3855 break;
3856# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3857 case MSR_IA32_PERF_STATUS:
3858 /* tsc_increment_by_tick */
3859 val = 1000ULL;
3860 /* CPU multiplier */
3861 val |= (((uint64_t)4ULL) << 40);
3862 break;
3863# endif /* !VBOX */
3864#ifdef TARGET_X86_64
3865 case MSR_LSTAR:
3866 val = env->lstar;
3867 break;
3868 case MSR_CSTAR:
3869 val = env->cstar;
3870 break;
3871 case MSR_FMASK:
3872 val = env->fmask;
3873 break;
3874 case MSR_FSBASE:
3875 val = env->segs[R_FS].base;
3876 break;
3877 case MSR_GSBASE:
3878 val = env->segs[R_GS].base;
3879 break;
3880 case MSR_KERNELGSBASE:
3881 val = env->kernelgsbase;
3882 break;
3883# ifndef VBOX
3884 case MSR_TSC_AUX:
3885 val = env->tsc_aux;
3886 break;
3887# endif /*!VBOX*/
3888#endif
3889# ifndef VBOX
3890 case MSR_MTRRphysBase(0):
3891 case MSR_MTRRphysBase(1):
3892 case MSR_MTRRphysBase(2):
3893 case MSR_MTRRphysBase(3):
3894 case MSR_MTRRphysBase(4):
3895 case MSR_MTRRphysBase(5):
3896 case MSR_MTRRphysBase(6):
3897 case MSR_MTRRphysBase(7):
3898 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3899 break;
3900 case MSR_MTRRphysMask(0):
3901 case MSR_MTRRphysMask(1):
3902 case MSR_MTRRphysMask(2):
3903 case MSR_MTRRphysMask(3):
3904 case MSR_MTRRphysMask(4):
3905 case MSR_MTRRphysMask(5):
3906 case MSR_MTRRphysMask(6):
3907 case MSR_MTRRphysMask(7):
3908 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3909 break;
3910 case MSR_MTRRfix64K_00000:
3911 val = env->mtrr_fixed[0];
3912 break;
3913 case MSR_MTRRfix16K_80000:
3914 case MSR_MTRRfix16K_A0000:
3915 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3916 break;
3917 case MSR_MTRRfix4K_C0000:
3918 case MSR_MTRRfix4K_C8000:
3919 case MSR_MTRRfix4K_D0000:
3920 case MSR_MTRRfix4K_D8000:
3921 case MSR_MTRRfix4K_E0000:
3922 case MSR_MTRRfix4K_E8000:
3923 case MSR_MTRRfix4K_F0000:
3924 case MSR_MTRRfix4K_F8000:
3925 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3926 break;
3927 case MSR_MTRRdefType:
3928 val = env->mtrr_deftype;
3929 break;
3930 case MSR_MTRRcap:
3931 if (env->cpuid_features & CPUID_MTRR)
3932 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3933 else
3934 /* XXX: exception ? */
3935 val = 0;
3936 break;
3937 case MSR_MCG_CAP:
3938 val = env->mcg_cap;
3939 break;
3940 case MSR_MCG_CTL:
3941 if (env->mcg_cap & MCG_CTL_P)
3942 val = env->mcg_ctl;
3943 else
3944 val = 0;
3945 break;
3946 case MSR_MCG_STATUS:
3947 val = env->mcg_status;
3948 break;
3949# endif /* !VBOX */
3950 default:
3951# ifndef VBOX
3952 if ((uint32_t)ECX >= MSR_MC0_CTL
3953 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3954 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3955 val = env->mce_banks[offset];
3956 break;
3957 }
3958 /* XXX: exception ? */
3959 val = 0;
3960# else /* VBOX */
3961 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3962 {
3963 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3964 val = 0;
3965 }
3966# endif /* VBOX */
3967 break;
3968 }
3969 EAX = (uint32_t)(val);
3970 EDX = (uint32_t)(val >> 32);
3971
3972# ifdef VBOX_STRICT
3973 if ((uint32_t)ECX != MSR_IA32_TSC) {
3974 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3975 val = 0;
3976 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3977 }
3978# endif
3979}
3980#endif
3981
3982target_ulong helper_lsl(target_ulong selector1)
3983{
3984 unsigned int limit;
3985 uint32_t e1, e2, eflags, selector;
3986 int rpl, dpl, cpl, type;
3987
3988 selector = selector1 & 0xffff;
3989 eflags = helper_cc_compute_all(CC_OP);
3990 if ((selector & 0xfffc) == 0)
3991 goto fail;
3992 if (load_segment(&e1, &e2, selector) != 0)
3993 goto fail;
3994 rpl = selector & 3;
3995 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3996 cpl = env->hflags & HF_CPL_MASK;
3997 if (e2 & DESC_S_MASK) {
3998 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3999 /* conforming */
4000 } else {
4001 if (dpl < cpl || dpl < rpl)
4002 goto fail;
4003 }
4004 } else {
4005 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4006 switch(type) {
4007 case 1:
4008 case 2:
4009 case 3:
4010 case 9:
4011 case 11:
4012 break;
4013 default:
4014 goto fail;
4015 }
4016 if (dpl < cpl || dpl < rpl) {
4017 fail:
4018 CC_SRC = eflags & ~CC_Z;
4019 return 0;
4020 }
4021 }
4022 limit = get_seg_limit(e1, e2);
4023 CC_SRC = eflags | CC_Z;
4024 return limit;
4025}
4026
4027target_ulong helper_lar(target_ulong selector1)
4028{
4029 uint32_t e1, e2, eflags, selector;
4030 int rpl, dpl, cpl, type;
4031
4032 selector = selector1 & 0xffff;
4033 eflags = helper_cc_compute_all(CC_OP);
4034 if ((selector & 0xfffc) == 0)
4035 goto fail;
4036 if (load_segment(&e1, &e2, selector) != 0)
4037 goto fail;
4038 rpl = selector & 3;
4039 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4040 cpl = env->hflags & HF_CPL_MASK;
4041 if (e2 & DESC_S_MASK) {
4042 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4043 /* conforming */
4044 } else {
4045 if (dpl < cpl || dpl < rpl)
4046 goto fail;
4047 }
4048 } else {
4049 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4050 switch(type) {
4051 case 1:
4052 case 2:
4053 case 3:
4054 case 4:
4055 case 5:
4056 case 9:
4057 case 11:
4058 case 12:
4059 break;
4060 default:
4061 goto fail;
4062 }
4063 if (dpl < cpl || dpl < rpl) {
4064 fail:
4065 CC_SRC = eflags & ~CC_Z;
4066 return 0;
4067 }
4068 }
4069 CC_SRC = eflags | CC_Z;
4070 return e2 & 0x00f0ff00;
4071}
4072
4073void helper_verr(target_ulong selector1)
4074{
4075 uint32_t e1, e2, eflags, selector;
4076 int rpl, dpl, cpl;
4077
4078 selector = selector1 & 0xffff;
4079 eflags = helper_cc_compute_all(CC_OP);
4080 if ((selector & 0xfffc) == 0)
4081 goto fail;
4082 if (load_segment(&e1, &e2, selector) != 0)
4083 goto fail;
4084 if (!(e2 & DESC_S_MASK))
4085 goto fail;
4086 rpl = selector & 3;
4087 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4088 cpl = env->hflags & HF_CPL_MASK;
4089 if (e2 & DESC_CS_MASK) {
4090 if (!(e2 & DESC_R_MASK))
4091 goto fail;
4092 if (!(e2 & DESC_C_MASK)) {
4093 if (dpl < cpl || dpl < rpl)
4094 goto fail;
4095 }
4096 } else {
4097 if (dpl < cpl || dpl < rpl) {
4098 fail:
4099 CC_SRC = eflags & ~CC_Z;
4100 return;
4101 }
4102 }
4103 CC_SRC = eflags | CC_Z;
4104}
4105
4106void helper_verw(target_ulong selector1)
4107{
4108 uint32_t e1, e2, eflags, selector;
4109 int rpl, dpl, cpl;
4110
4111 selector = selector1 & 0xffff;
4112 eflags = helper_cc_compute_all(CC_OP);
4113 if ((selector & 0xfffc) == 0)
4114 goto fail;
4115 if (load_segment(&e1, &e2, selector) != 0)
4116 goto fail;
4117 if (!(e2 & DESC_S_MASK))
4118 goto fail;
4119 rpl = selector & 3;
4120 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4121 cpl = env->hflags & HF_CPL_MASK;
4122 if (e2 & DESC_CS_MASK) {
4123 goto fail;
4124 } else {
4125 if (dpl < cpl || dpl < rpl)
4126 goto fail;
4127 if (!(e2 & DESC_W_MASK)) {
4128 fail:
4129 CC_SRC = eflags & ~CC_Z;
4130 return;
4131 }
4132 }
4133 CC_SRC = eflags | CC_Z;
4134}
4135
4136/* x87 FPU helpers */
4137
4138static void fpu_set_exception(int mask)
4139{
4140 env->fpus |= mask;
4141 if (env->fpus & (~env->fpuc & FPUC_EM))
4142 env->fpus |= FPUS_SE | FPUS_B;
4143}
4144
4145static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4146{
4147 if (b == 0.0)
4148 fpu_set_exception(FPUS_ZE);
4149 return a / b;
4150}
4151
4152static void fpu_raise_exception(void)
4153{
4154 if (env->cr[0] & CR0_NE_MASK) {
4155 raise_exception(EXCP10_COPR);
4156 }
4157#if !defined(CONFIG_USER_ONLY)
4158 else {
4159 cpu_set_ferr(env);
4160 }
4161#endif
4162}
4163
4164void helper_flds_FT0(uint32_t val)
4165{
4166 union {
4167 float32 f;
4168 uint32_t i;
4169 } u;
4170 u.i = val;
4171 FT0 = float32_to_floatx(u.f, &env->fp_status);
4172}
4173
4174void helper_fldl_FT0(uint64_t val)
4175{
4176 union {
4177 float64 f;
4178 uint64_t i;
4179 } u;
4180 u.i = val;
4181 FT0 = float64_to_floatx(u.f, &env->fp_status);
4182}
4183
4184void helper_fildl_FT0(int32_t val)
4185{
4186 FT0 = int32_to_floatx(val, &env->fp_status);
4187}
4188
4189void helper_flds_ST0(uint32_t val)
4190{
4191 int new_fpstt;
4192 union {
4193 float32 f;
4194 uint32_t i;
4195 } u;
4196 new_fpstt = (env->fpstt - 1) & 7;
4197 u.i = val;
4198 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4199 env->fpstt = new_fpstt;
4200 env->fptags[new_fpstt] = 0; /* validate stack entry */
4201}
4202
4203void helper_fldl_ST0(uint64_t val)
4204{
4205 int new_fpstt;
4206 union {
4207 float64 f;
4208 uint64_t i;
4209 } u;
4210 new_fpstt = (env->fpstt - 1) & 7;
4211 u.i = val;
4212 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4213 env->fpstt = new_fpstt;
4214 env->fptags[new_fpstt] = 0; /* validate stack entry */
4215}
4216
4217void helper_fildl_ST0(int32_t val)
4218{
4219 int new_fpstt;
4220 new_fpstt = (env->fpstt - 1) & 7;
4221 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4222 env->fpstt = new_fpstt;
4223 env->fptags[new_fpstt] = 0; /* validate stack entry */
4224}
4225
4226void helper_fildll_ST0(int64_t val)
4227{
4228 int new_fpstt;
4229 new_fpstt = (env->fpstt - 1) & 7;
4230 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4231 env->fpstt = new_fpstt;
4232 env->fptags[new_fpstt] = 0; /* validate stack entry */
4233}
4234
4235#ifndef VBOX
4236uint32_t helper_fsts_ST0(void)
4237#else
4238RTCCUINTREG helper_fsts_ST0(void)
4239#endif
4240{
4241 union {
4242 float32 f;
4243 uint32_t i;
4244 } u;
4245 u.f = floatx_to_float32(ST0, &env->fp_status);
4246 return u.i;
4247}
4248
4249uint64_t helper_fstl_ST0(void)
4250{
4251 union {
4252 float64 f;
4253 uint64_t i;
4254 } u;
4255 u.f = floatx_to_float64(ST0, &env->fp_status);
4256 return u.i;
4257}
4258
4259#ifndef VBOX
4260int32_t helper_fist_ST0(void)
4261#else
4262RTCCINTREG helper_fist_ST0(void)
4263#endif
4264{
4265 int32_t val;
4266 val = floatx_to_int32(ST0, &env->fp_status);
4267 if (val != (int16_t)val)
4268 val = -32768;
4269 return val;
4270}
4271
4272#ifndef VBOX
4273int32_t helper_fistl_ST0(void)
4274#else
4275RTCCINTREG helper_fistl_ST0(void)
4276#endif
4277{
4278 int32_t val;
4279 val = floatx_to_int32(ST0, &env->fp_status);
4280 return val;
4281}
4282
4283int64_t helper_fistll_ST0(void)
4284{
4285 int64_t val;
4286 val = floatx_to_int64(ST0, &env->fp_status);
4287 return val;
4288}
4289
4290#ifndef VBOX
4291int32_t helper_fistt_ST0(void)
4292#else
4293RTCCINTREG helper_fistt_ST0(void)
4294#endif
4295{
4296 int32_t val;
4297 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4298 if (val != (int16_t)val)
4299 val = -32768;
4300 return val;
4301}
4302
4303#ifndef VBOX
4304int32_t helper_fisttl_ST0(void)
4305#else
4306RTCCINTREG helper_fisttl_ST0(void)
4307#endif
4308{
4309 int32_t val;
4310 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4311 return val;
4312}
4313
4314int64_t helper_fisttll_ST0(void)
4315{
4316 int64_t val;
4317 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4318 return val;
4319}
4320
4321void helper_fldt_ST0(target_ulong ptr)
4322{
4323 int new_fpstt;
4324 new_fpstt = (env->fpstt - 1) & 7;
4325 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4326 env->fpstt = new_fpstt;
4327 env->fptags[new_fpstt] = 0; /* validate stack entry */
4328}
4329
4330void helper_fstt_ST0(target_ulong ptr)
4331{
4332 helper_fstt(ST0, ptr);
4333}
4334
4335void helper_fpush(void)
4336{
4337 fpush();
4338}
4339
4340void helper_fpop(void)
4341{
4342 fpop();
4343}
4344
4345void helper_fdecstp(void)
4346{
4347 env->fpstt = (env->fpstt - 1) & 7;
4348 env->fpus &= (~0x4700);
4349}
4350
4351void helper_fincstp(void)
4352{
4353 env->fpstt = (env->fpstt + 1) & 7;
4354 env->fpus &= (~0x4700);
4355}
4356
4357/* FPU move */
4358
4359void helper_ffree_STN(int st_index)
4360{
4361 env->fptags[(env->fpstt + st_index) & 7] = 1;
4362}
4363
4364void helper_fmov_ST0_FT0(void)
4365{
4366 ST0 = FT0;
4367}
4368
4369void helper_fmov_FT0_STN(int st_index)
4370{
4371 FT0 = ST(st_index);
4372}
4373
4374void helper_fmov_ST0_STN(int st_index)
4375{
4376 ST0 = ST(st_index);
4377}
4378
4379void helper_fmov_STN_ST0(int st_index)
4380{
4381 ST(st_index) = ST0;
4382}
4383
4384void helper_fxchg_ST0_STN(int st_index)
4385{
4386 CPU86_LDouble tmp;
4387 tmp = ST(st_index);
4388 ST(st_index) = ST0;
4389 ST0 = tmp;
4390}
4391
4392/* FPU operations */
4393
4394static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4395
4396void helper_fcom_ST0_FT0(void)
4397{
4398 int ret;
4399
4400 ret = floatx_compare(ST0, FT0, &env->fp_status);
4401 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4402}
4403
4404void helper_fucom_ST0_FT0(void)
4405{
4406 int ret;
4407
4408 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4409 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4410}
4411
4412static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4413
4414void helper_fcomi_ST0_FT0(void)
4415{
4416 int eflags;
4417 int ret;
4418
4419 ret = floatx_compare(ST0, FT0, &env->fp_status);
4420 eflags = helper_cc_compute_all(CC_OP);
4421 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4422 CC_SRC = eflags;
4423}
4424
4425void helper_fucomi_ST0_FT0(void)
4426{
4427 int eflags;
4428 int ret;
4429
4430 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4431 eflags = helper_cc_compute_all(CC_OP);
4432 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4433 CC_SRC = eflags;
4434}
4435
4436void helper_fadd_ST0_FT0(void)
4437{
4438 ST0 += FT0;
4439}
4440
4441void helper_fmul_ST0_FT0(void)
4442{
4443 ST0 *= FT0;
4444}
4445
4446void helper_fsub_ST0_FT0(void)
4447{
4448 ST0 -= FT0;
4449}
4450
4451void helper_fsubr_ST0_FT0(void)
4452{
4453 ST0 = FT0 - ST0;
4454}
4455
4456void helper_fdiv_ST0_FT0(void)
4457{
4458 ST0 = helper_fdiv(ST0, FT0);
4459}
4460
4461void helper_fdivr_ST0_FT0(void)
4462{
4463 ST0 = helper_fdiv(FT0, ST0);
4464}
4465
4466/* fp operations between STN and ST0 */
4467
4468void helper_fadd_STN_ST0(int st_index)
4469{
4470 ST(st_index) += ST0;
4471}
4472
4473void helper_fmul_STN_ST0(int st_index)
4474{
4475 ST(st_index) *= ST0;
4476}
4477
4478void helper_fsub_STN_ST0(int st_index)
4479{
4480 ST(st_index) -= ST0;
4481}
4482
4483void helper_fsubr_STN_ST0(int st_index)
4484{
4485 CPU86_LDouble *p;
4486 p = &ST(st_index);
4487 *p = ST0 - *p;
4488}
4489
4490void helper_fdiv_STN_ST0(int st_index)
4491{
4492 CPU86_LDouble *p;
4493 p = &ST(st_index);
4494 *p = helper_fdiv(*p, ST0);
4495}
4496
4497void helper_fdivr_STN_ST0(int st_index)
4498{
4499 CPU86_LDouble *p;
4500 p = &ST(st_index);
4501 *p = helper_fdiv(ST0, *p);
4502}
4503
4504/* misc FPU operations */
4505void helper_fchs_ST0(void)
4506{
4507 ST0 = floatx_chs(ST0);
4508}
4509
4510void helper_fabs_ST0(void)
4511{
4512 ST0 = floatx_abs(ST0);
4513}
4514
4515void helper_fld1_ST0(void)
4516{
4517 ST0 = f15rk[1];
4518}
4519
4520void helper_fldl2t_ST0(void)
4521{
4522 ST0 = f15rk[6];
4523}
4524
4525void helper_fldl2e_ST0(void)
4526{
4527 ST0 = f15rk[5];
4528}
4529
4530void helper_fldpi_ST0(void)
4531{
4532 ST0 = f15rk[2];
4533}
4534
4535void helper_fldlg2_ST0(void)
4536{
4537 ST0 = f15rk[3];
4538}
4539
4540void helper_fldln2_ST0(void)
4541{
4542 ST0 = f15rk[4];
4543}
4544
4545void helper_fldz_ST0(void)
4546{
4547 ST0 = f15rk[0];
4548}
4549
4550void helper_fldz_FT0(void)
4551{
4552 FT0 = f15rk[0];
4553}
4554
4555#ifndef VBOX
4556uint32_t helper_fnstsw(void)
4557#else
4558RTCCUINTREG helper_fnstsw(void)
4559#endif
4560{
4561 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4562}
4563
4564#ifndef VBOX
4565uint32_t helper_fnstcw(void)
4566#else
4567RTCCUINTREG helper_fnstcw(void)
4568#endif
4569{
4570 return env->fpuc;
4571}
4572
4573static void update_fp_status(void)
4574{
4575 int rnd_type;
4576
4577 /* set rounding mode */
4578 switch(env->fpuc & RC_MASK) {
4579 default:
4580 case RC_NEAR:
4581 rnd_type = float_round_nearest_even;
4582 break;
4583 case RC_DOWN:
4584 rnd_type = float_round_down;
4585 break;
4586 case RC_UP:
4587 rnd_type = float_round_up;
4588 break;
4589 case RC_CHOP:
4590 rnd_type = float_round_to_zero;
4591 break;
4592 }
4593 set_float_rounding_mode(rnd_type, &env->fp_status);
4594#ifdef FLOATX80
4595 switch((env->fpuc >> 8) & 3) {
4596 case 0:
4597 rnd_type = 32;
4598 break;
4599 case 2:
4600 rnd_type = 64;
4601 break;
4602 case 3:
4603 default:
4604 rnd_type = 80;
4605 break;
4606 }
4607 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4608#endif
4609}
4610
4611void helper_fldcw(uint32_t val)
4612{
4613 env->fpuc = val;
4614 update_fp_status();
4615}
4616
4617void helper_fclex(void)
4618{
4619 env->fpus &= 0x7f00;
4620}
4621
4622void helper_fwait(void)
4623{
4624 if (env->fpus & FPUS_SE)
4625 fpu_raise_exception();
4626}
4627
4628void helper_fninit(void)
4629{
4630 env->fpus = 0;
4631 env->fpstt = 0;
4632 env->fpuc = 0x37f;
4633 env->fptags[0] = 1;
4634 env->fptags[1] = 1;
4635 env->fptags[2] = 1;
4636 env->fptags[3] = 1;
4637 env->fptags[4] = 1;
4638 env->fptags[5] = 1;
4639 env->fptags[6] = 1;
4640 env->fptags[7] = 1;
4641}
4642
4643/* BCD ops */
4644
4645void helper_fbld_ST0(target_ulong ptr)
4646{
4647 CPU86_LDouble tmp;
4648 uint64_t val;
4649 unsigned int v;
4650 int i;
4651
4652 val = 0;
4653 for(i = 8; i >= 0; i--) {
4654 v = ldub(ptr + i);
4655 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4656 }
4657 tmp = val;
4658 if (ldub(ptr + 9) & 0x80)
4659 tmp = -tmp;
4660 fpush();
4661 ST0 = tmp;
4662}
4663
4664void helper_fbst_ST0(target_ulong ptr)
4665{
4666 int v;
4667 target_ulong mem_ref, mem_end;
4668 int64_t val;
4669
4670 val = floatx_to_int64(ST0, &env->fp_status);
4671 mem_ref = ptr;
4672 mem_end = mem_ref + 9;
4673 if (val < 0) {
4674 stb(mem_end, 0x80);
4675 val = -val;
4676 } else {
4677 stb(mem_end, 0x00);
4678 }
4679 while (mem_ref < mem_end) {
4680 if (val == 0)
4681 break;
4682 v = val % 100;
4683 val = val / 100;
4684 v = ((v / 10) << 4) | (v % 10);
4685 stb(mem_ref++, v);
4686 }
4687 while (mem_ref < mem_end) {
4688 stb(mem_ref++, 0);
4689 }
4690}
4691
4692void helper_f2xm1(void)
4693{
4694 ST0 = pow(2.0,ST0) - 1.0;
4695}
4696
4697void helper_fyl2x(void)
4698{
4699 CPU86_LDouble fptemp;
4700
4701 fptemp = ST0;
4702 if (fptemp>0.0){
4703 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4704 ST1 *= fptemp;
4705 fpop();
4706 } else {
4707 env->fpus &= (~0x4700);
4708 env->fpus |= 0x400;
4709 }
4710}
4711
4712void helper_fptan(void)
4713{
4714 CPU86_LDouble fptemp;
4715
4716 fptemp = ST0;
4717 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4718 env->fpus |= 0x400;
4719 } else {
4720 ST0 = tan(fptemp);
4721 fpush();
4722 ST0 = 1.0;
4723 env->fpus &= (~0x400); /* C2 <-- 0 */
4724 /* the above code is for |arg| < 2**52 only */
4725 }
4726}
4727
4728void helper_fpatan(void)
4729{
4730 CPU86_LDouble fptemp, fpsrcop;
4731
4732 fpsrcop = ST1;
4733 fptemp = ST0;
4734 ST1 = atan2(fpsrcop,fptemp);
4735 fpop();
4736}
4737
4738void helper_fxtract(void)
4739{
4740 CPU86_LDoubleU temp;
4741 unsigned int expdif;
4742
4743 temp.d = ST0;
4744 expdif = EXPD(temp) - EXPBIAS;
4745 /*DP exponent bias*/
4746 ST0 = expdif;
4747 fpush();
4748 BIASEXPONENT(temp);
4749 ST0 = temp.d;
4750}
4751
4752void helper_fprem1(void)
4753{
4754 CPU86_LDouble dblq, fpsrcop, fptemp;
4755 CPU86_LDoubleU fpsrcop1, fptemp1;
4756 int expdif;
4757 signed long long int q;
4758
4759#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4760 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4761#else
4762 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4763#endif
4764 ST0 = 0.0 / 0.0; /* NaN */
4765 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4766 return;
4767 }
4768
4769 fpsrcop = ST0;
4770 fptemp = ST1;
4771 fpsrcop1.d = fpsrcop;
4772 fptemp1.d = fptemp;
4773 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4774
4775 if (expdif < 0) {
4776 /* optimisation? taken from the AMD docs */
4777 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4778 /* ST0 is unchanged */
4779 return;
4780 }
4781
4782 if (expdif < 53) {
4783 dblq = fpsrcop / fptemp;
4784 /* round dblq towards nearest integer */
4785 dblq = rint(dblq);
4786 ST0 = fpsrcop - fptemp * dblq;
4787
4788 /* convert dblq to q by truncating towards zero */
4789 if (dblq < 0.0)
4790 q = (signed long long int)(-dblq);
4791 else
4792 q = (signed long long int)dblq;
4793
4794 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4795 /* (C0,C3,C1) <-- (q2,q1,q0) */
4796 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4797 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4798 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4799 } else {
4800 env->fpus |= 0x400; /* C2 <-- 1 */
4801 fptemp = pow(2.0, expdif - 50);
4802 fpsrcop = (ST0 / ST1) / fptemp;
4803 /* fpsrcop = integer obtained by chopping */
4804 fpsrcop = (fpsrcop < 0.0) ?
4805 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4806 ST0 -= (ST1 * fpsrcop * fptemp);
4807 }
4808}
4809
4810void helper_fprem(void)
4811{
4812 CPU86_LDouble dblq, fpsrcop, fptemp;
4813 CPU86_LDoubleU fpsrcop1, fptemp1;
4814 int expdif;
4815 signed long long int q;
4816
4817#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4818 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4819#else
4820 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4821#endif
4822 ST0 = 0.0 / 0.0; /* NaN */
4823 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4824 return;
4825 }
4826
4827 fpsrcop = (CPU86_LDouble)ST0;
4828 fptemp = (CPU86_LDouble)ST1;
4829 fpsrcop1.d = fpsrcop;
4830 fptemp1.d = fptemp;
4831 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4832
4833 if (expdif < 0) {
4834 /* optimisation? taken from the AMD docs */
4835 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4836 /* ST0 is unchanged */
4837 return;
4838 }
4839
4840 if ( expdif < 53 ) {
4841 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4842 /* round dblq towards zero */
4843 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4844 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4845
4846 /* convert dblq to q by truncating towards zero */
4847 if (dblq < 0.0)
4848 q = (signed long long int)(-dblq);
4849 else
4850 q = (signed long long int)dblq;
4851
4852 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4853 /* (C0,C3,C1) <-- (q2,q1,q0) */
4854 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4855 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4856 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4857 } else {
4858 int N = 32 + (expdif % 32); /* as per AMD docs */
4859 env->fpus |= 0x400; /* C2 <-- 1 */
4860 fptemp = pow(2.0, (double)(expdif - N));
4861 fpsrcop = (ST0 / ST1) / fptemp;
4862 /* fpsrcop = integer obtained by chopping */
4863 fpsrcop = (fpsrcop < 0.0) ?
4864 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4865 ST0 -= (ST1 * fpsrcop * fptemp);
4866 }
4867}
4868
4869void helper_fyl2xp1(void)
4870{
4871 CPU86_LDouble fptemp;
4872
4873 fptemp = ST0;
4874 if ((fptemp+1.0)>0.0) {
4875 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4876 ST1 *= fptemp;
4877 fpop();
4878 } else {
4879 env->fpus &= (~0x4700);
4880 env->fpus |= 0x400;
4881 }
4882}
4883
4884void helper_fsqrt(void)
4885{
4886 CPU86_LDouble fptemp;
4887
4888 fptemp = ST0;
4889 if (fptemp<0.0) {
4890 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4891 env->fpus |= 0x400;
4892 }
4893 ST0 = sqrt(fptemp);
4894}
4895
4896void helper_fsincos(void)
4897{
4898 CPU86_LDouble fptemp;
4899
4900 fptemp = ST0;
4901 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4902 env->fpus |= 0x400;
4903 } else {
4904 ST0 = sin(fptemp);
4905 fpush();
4906 ST0 = cos(fptemp);
4907 env->fpus &= (~0x400); /* C2 <-- 0 */
4908 /* the above code is for |arg| < 2**63 only */
4909 }
4910}
4911
4912void helper_frndint(void)
4913{
4914 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4915}
4916
4917void helper_fscale(void)
4918{
4919 ST0 = ldexp (ST0, (int)(ST1));
4920}
4921
4922void helper_fsin(void)
4923{
4924 CPU86_LDouble fptemp;
4925
4926 fptemp = ST0;
4927 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4928 env->fpus |= 0x400;
4929 } else {
4930 ST0 = sin(fptemp);
4931 env->fpus &= (~0x400); /* C2 <-- 0 */
4932 /* the above code is for |arg| < 2**53 only */
4933 }
4934}
4935
4936void helper_fcos(void)
4937{
4938 CPU86_LDouble fptemp;
4939
4940 fptemp = ST0;
4941 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4942 env->fpus |= 0x400;
4943 } else {
4944 ST0 = cos(fptemp);
4945 env->fpus &= (~0x400); /* C2 <-- 0 */
4946 /* the above code is for |arg5 < 2**63 only */
4947 }
4948}
4949
4950void helper_fxam_ST0(void)
4951{
4952 CPU86_LDoubleU temp;
4953 int expdif;
4954
4955 temp.d = ST0;
4956
4957 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4958 if (SIGND(temp))
4959 env->fpus |= 0x200; /* C1 <-- 1 */
4960
4961 /* XXX: test fptags too */
4962 expdif = EXPD(temp);
4963 if (expdif == MAXEXPD) {
4964#ifdef USE_X86LDOUBLE
4965 if (MANTD(temp) == 0x8000000000000000ULL)
4966#else
4967 if (MANTD(temp) == 0)
4968#endif
4969 env->fpus |= 0x500 /*Infinity*/;
4970 else
4971 env->fpus |= 0x100 /*NaN*/;
4972 } else if (expdif == 0) {
4973 if (MANTD(temp) == 0)
4974 env->fpus |= 0x4000 /*Zero*/;
4975 else
4976 env->fpus |= 0x4400 /*Denormal*/;
4977 } else {
4978 env->fpus |= 0x400;
4979 }
4980}
4981
4982void helper_fstenv(target_ulong ptr, int data32)
4983{
4984 int fpus, fptag, exp, i;
4985 uint64_t mant;
4986 CPU86_LDoubleU tmp;
4987
4988 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4989 fptag = 0;
4990 for (i=7; i>=0; i--) {
4991 fptag <<= 2;
4992 if (env->fptags[i]) {
4993 fptag |= 3;
4994 } else {
4995 tmp.d = env->fpregs[i].d;
4996 exp = EXPD(tmp);
4997 mant = MANTD(tmp);
4998 if (exp == 0 && mant == 0) {
4999 /* zero */
5000 fptag |= 1;
5001 } else if (exp == 0 || exp == MAXEXPD
5002#ifdef USE_X86LDOUBLE
5003 || (mant & (1LL << 63)) == 0
5004#endif
5005 ) {
5006 /* NaNs, infinity, denormal */
5007 fptag |= 2;
5008 }
5009 }
5010 }
5011 if (data32) {
5012 /* 32 bit */
5013 stl(ptr, env->fpuc);
5014 stl(ptr + 4, fpus);
5015 stl(ptr + 8, fptag);
5016 stl(ptr + 12, 0); /* fpip */
5017 stl(ptr + 16, 0); /* fpcs */
5018 stl(ptr + 20, 0); /* fpoo */
5019 stl(ptr + 24, 0); /* fpos */
5020 } else {
5021 /* 16 bit */
5022 stw(ptr, env->fpuc);
5023 stw(ptr + 2, fpus);
5024 stw(ptr + 4, fptag);
5025 stw(ptr + 6, 0);
5026 stw(ptr + 8, 0);
5027 stw(ptr + 10, 0);
5028 stw(ptr + 12, 0);
5029 }
5030}
5031
5032void helper_fldenv(target_ulong ptr, int data32)
5033{
5034 int i, fpus, fptag;
5035
5036 if (data32) {
5037 env->fpuc = lduw(ptr);
5038 fpus = lduw(ptr + 4);
5039 fptag = lduw(ptr + 8);
5040 }
5041 else {
5042 env->fpuc = lduw(ptr);
5043 fpus = lduw(ptr + 2);
5044 fptag = lduw(ptr + 4);
5045 }
5046 env->fpstt = (fpus >> 11) & 7;
5047 env->fpus = fpus & ~0x3800;
5048 for(i = 0;i < 8; i++) {
5049 env->fptags[i] = ((fptag & 3) == 3);
5050 fptag >>= 2;
5051 }
5052}
5053
5054void helper_fsave(target_ulong ptr, int data32)
5055{
5056 CPU86_LDouble tmp;
5057 int i;
5058
5059 helper_fstenv(ptr, data32);
5060
5061 ptr += (14 << data32);
5062 for(i = 0;i < 8; i++) {
5063 tmp = ST(i);
5064 helper_fstt(tmp, ptr);
5065 ptr += 10;
5066 }
5067
5068 /* fninit */
5069 env->fpus = 0;
5070 env->fpstt = 0;
5071 env->fpuc = 0x37f;
5072 env->fptags[0] = 1;
5073 env->fptags[1] = 1;
5074 env->fptags[2] = 1;
5075 env->fptags[3] = 1;
5076 env->fptags[4] = 1;
5077 env->fptags[5] = 1;
5078 env->fptags[6] = 1;
5079 env->fptags[7] = 1;
5080}
5081
5082void helper_frstor(target_ulong ptr, int data32)
5083{
5084 CPU86_LDouble tmp;
5085 int i;
5086
5087 helper_fldenv(ptr, data32);
5088 ptr += (14 << data32);
5089
5090 for(i = 0;i < 8; i++) {
5091 tmp = helper_fldt(ptr);
5092 ST(i) = tmp;
5093 ptr += 10;
5094 }
5095}
5096
5097void helper_fxsave(target_ulong ptr, int data64)
5098{
5099 int fpus, fptag, i, nb_xmm_regs;
5100 CPU86_LDouble tmp;
5101 target_ulong addr;
5102
5103 /* The operand must be 16 byte aligned */
5104 if (ptr & 0xf) {
5105 raise_exception(EXCP0D_GPF);
5106 }
5107
5108 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5109 fptag = 0;
5110 for(i = 0; i < 8; i++) {
5111 fptag |= (env->fptags[i] << i);
5112 }
5113 stw(ptr, env->fpuc);
5114 stw(ptr + 2, fpus);
5115 stw(ptr + 4, fptag ^ 0xff);
5116#ifdef TARGET_X86_64
5117 if (data64) {
5118 stq(ptr + 0x08, 0); /* rip */
5119 stq(ptr + 0x10, 0); /* rdp */
5120 } else
5121#endif
5122 {
5123 stl(ptr + 0x08, 0); /* eip */
5124 stl(ptr + 0x0c, 0); /* sel */
5125 stl(ptr + 0x10, 0); /* dp */
5126 stl(ptr + 0x14, 0); /* sel */
5127 }
5128
5129 addr = ptr + 0x20;
5130 for(i = 0;i < 8; i++) {
5131 tmp = ST(i);
5132 helper_fstt(tmp, addr);
5133 addr += 16;
5134 }
5135
5136 if (env->cr[4] & CR4_OSFXSR_MASK) {
5137 /* XXX: finish it */
5138 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5139 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5140 if (env->hflags & HF_CS64_MASK)
5141 nb_xmm_regs = 16;
5142 else
5143 nb_xmm_regs = 8;
5144 addr = ptr + 0xa0;
5145 /* Fast FXSAVE leaves out the XMM registers */
5146 if (!(env->efer & MSR_EFER_FFXSR)
5147 || (env->hflags & HF_CPL_MASK)
5148 || !(env->hflags & HF_LMA_MASK)) {
5149 for(i = 0; i < nb_xmm_regs; i++) {
5150 stq(addr, env->xmm_regs[i].XMM_Q(0));
5151 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5152 addr += 16;
5153 }
5154 }
5155 }
5156}
5157
5158void helper_fxrstor(target_ulong ptr, int data64)
5159{
5160 int i, fpus, fptag, nb_xmm_regs;
5161 CPU86_LDouble tmp;
5162 target_ulong addr;
5163
5164 /* The operand must be 16 byte aligned */
5165 if (ptr & 0xf) {
5166 raise_exception(EXCP0D_GPF);
5167 }
5168
5169 env->fpuc = lduw(ptr);
5170 fpus = lduw(ptr + 2);
5171 fptag = lduw(ptr + 4);
5172 env->fpstt = (fpus >> 11) & 7;
5173 env->fpus = fpus & ~0x3800;
5174 fptag ^= 0xff;
5175 for(i = 0;i < 8; i++) {
5176 env->fptags[i] = ((fptag >> i) & 1);
5177 }
5178
5179 addr = ptr + 0x20;
5180 for(i = 0;i < 8; i++) {
5181 tmp = helper_fldt(addr);
5182 ST(i) = tmp;
5183 addr += 16;
5184 }
5185
5186 if (env->cr[4] & CR4_OSFXSR_MASK) {
5187 /* XXX: finish it */
5188 env->mxcsr = ldl(ptr + 0x18);
5189 //ldl(ptr + 0x1c);
5190 if (env->hflags & HF_CS64_MASK)
5191 nb_xmm_regs = 16;
5192 else
5193 nb_xmm_regs = 8;
5194 addr = ptr + 0xa0;
5195 /* Fast FXRESTORE leaves out the XMM registers */
5196 if (!(env->efer & MSR_EFER_FFXSR)
5197 || (env->hflags & HF_CPL_MASK)
5198 || !(env->hflags & HF_LMA_MASK)) {
5199 for(i = 0; i < nb_xmm_regs; i++) {
5200#if !defined(VBOX) || __GNUC__ < 4
5201 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5202 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5203#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5204# if 1
5205 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5206 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5207 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5208 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5209# else
5210 /* this works fine on Mac OS X, gcc 4.0.1 */
5211 uint64_t u64 = ldq(addr);
5212 env->xmm_regs[i].XMM_Q(0);
5213 u64 = ldq(addr + 4);
5214 env->xmm_regs[i].XMM_Q(1) = u64;
5215# endif
5216#endif
5217 addr += 16;
5218 }
5219 }
5220 }
5221}
5222
5223#ifndef USE_X86LDOUBLE
5224
5225void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5226{
5227 CPU86_LDoubleU temp;
5228 int e;
5229
5230 temp.d = f;
5231 /* mantissa */
5232 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5233 /* exponent + sign */
5234 e = EXPD(temp) - EXPBIAS + 16383;
5235 e |= SIGND(temp) >> 16;
5236 *pexp = e;
5237}
5238
5239CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5240{
5241 CPU86_LDoubleU temp;
5242 int e;
5243 uint64_t ll;
5244
5245 /* XXX: handle overflow ? */
5246 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5247 e |= (upper >> 4) & 0x800; /* sign */
5248 ll = (mant >> 11) & ((1LL << 52) - 1);
5249#ifdef __arm__
5250 temp.l.upper = (e << 20) | (ll >> 32);
5251 temp.l.lower = ll;
5252#else
5253 temp.ll = ll | ((uint64_t)e << 52);
5254#endif
5255 return temp.d;
5256}
5257
5258#else
5259
5260void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5261{
5262 CPU86_LDoubleU temp;
5263
5264 temp.d = f;
5265 *pmant = temp.l.lower;
5266 *pexp = temp.l.upper;
5267}
5268
5269CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5270{
5271 CPU86_LDoubleU temp;
5272
5273 temp.l.upper = upper;
5274 temp.l.lower = mant;
5275 return temp.d;
5276}
5277#endif
5278
5279#ifdef TARGET_X86_64
5280
5281//#define DEBUG_MULDIV
5282
5283static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5284{
5285 *plow += a;
5286 /* carry test */
5287 if (*plow < a)
5288 (*phigh)++;
5289 *phigh += b;
5290}
5291
5292static void neg128(uint64_t *plow, uint64_t *phigh)
5293{
5294 *plow = ~ *plow;
5295 *phigh = ~ *phigh;
5296 add128(plow, phigh, 1, 0);
5297}
5298
5299/* return TRUE if overflow */
5300static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5301{
5302 uint64_t q, r, a1, a0;
5303 int i, qb, ab;
5304
5305 a0 = *plow;
5306 a1 = *phigh;
5307 if (a1 == 0) {
5308 q = a0 / b;
5309 r = a0 % b;
5310 *plow = q;
5311 *phigh = r;
5312 } else {
5313 if (a1 >= b)
5314 return 1;
5315 /* XXX: use a better algorithm */
5316 for(i = 0; i < 64; i++) {
5317 ab = a1 >> 63;
5318 a1 = (a1 << 1) | (a0 >> 63);
5319 if (ab || a1 >= b) {
5320 a1 -= b;
5321 qb = 1;
5322 } else {
5323 qb = 0;
5324 }
5325 a0 = (a0 << 1) | qb;
5326 }
5327#if defined(DEBUG_MULDIV)
5328 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5329 *phigh, *plow, b, a0, a1);
5330#endif
5331 *plow = a0;
5332 *phigh = a1;
5333 }
5334 return 0;
5335}
5336
5337/* return TRUE if overflow */
5338static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5339{
5340 int sa, sb;
5341 sa = ((int64_t)*phigh < 0);
5342 if (sa)
5343 neg128(plow, phigh);
5344 sb = (b < 0);
5345 if (sb)
5346 b = -b;
5347 if (div64(plow, phigh, b) != 0)
5348 return 1;
5349 if (sa ^ sb) {
5350 if (*plow > (1ULL << 63))
5351 return 1;
5352 *plow = - *plow;
5353 } else {
5354 if (*plow >= (1ULL << 63))
5355 return 1;
5356 }
5357 if (sa)
5358 *phigh = - *phigh;
5359 return 0;
5360}
5361
5362void helper_mulq_EAX_T0(target_ulong t0)
5363{
5364 uint64_t r0, r1;
5365
5366 mulu64(&r0, &r1, EAX, t0);
5367 EAX = r0;
5368 EDX = r1;
5369 CC_DST = r0;
5370 CC_SRC = r1;
5371}
5372
5373void helper_imulq_EAX_T0(target_ulong t0)
5374{
5375 uint64_t r0, r1;
5376
5377 muls64(&r0, &r1, EAX, t0);
5378 EAX = r0;
5379 EDX = r1;
5380 CC_DST = r0;
5381 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5382}
5383
5384target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5385{
5386 uint64_t r0, r1;
5387
5388 muls64(&r0, &r1, t0, t1);
5389 CC_DST = r0;
5390 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5391 return r0;
5392}
5393
5394void helper_divq_EAX(target_ulong t0)
5395{
5396 uint64_t r0, r1;
5397 if (t0 == 0) {
5398 raise_exception(EXCP00_DIVZ);
5399 }
5400 r0 = EAX;
5401 r1 = EDX;
5402 if (div64(&r0, &r1, t0))
5403 raise_exception(EXCP00_DIVZ);
5404 EAX = r0;
5405 EDX = r1;
5406}
5407
5408void helper_idivq_EAX(target_ulong t0)
5409{
5410 uint64_t r0, r1;
5411 if (t0 == 0) {
5412 raise_exception(EXCP00_DIVZ);
5413 }
5414 r0 = EAX;
5415 r1 = EDX;
5416 if (idiv64(&r0, &r1, t0))
5417 raise_exception(EXCP00_DIVZ);
5418 EAX = r0;
5419 EDX = r1;
5420}
5421#endif
5422
5423static void do_hlt(void)
5424{
5425 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5426 env->halted = 1;
5427 env->exception_index = EXCP_HLT;
5428 cpu_loop_exit();
5429}
5430
5431void helper_hlt(int next_eip_addend)
5432{
5433 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5434 EIP += next_eip_addend;
5435
5436 do_hlt();
5437}
5438
5439void helper_monitor(target_ulong ptr)
5440{
5441#ifdef VBOX
5442 if ((uint32_t)ECX > 1)
5443 raise_exception(EXCP0D_GPF);
5444#else /* !VBOX */
5445 if ((uint32_t)ECX != 0)
5446 raise_exception(EXCP0D_GPF);
5447#endif /* !VBOX */
5448 /* XXX: store address ? */
5449 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5450}
5451
5452void helper_mwait(int next_eip_addend)
5453{
5454 if ((uint32_t)ECX != 0)
5455 raise_exception(EXCP0D_GPF);
5456#ifdef VBOX
5457 helper_hlt(next_eip_addend);
5458#else /* !VBOX */
5459 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5460 EIP += next_eip_addend;
5461
5462 /* XXX: not complete but not completely erroneous */
5463 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5464 /* more than one CPU: do not sleep because another CPU may
5465 wake this one */
5466 } else {
5467 do_hlt();
5468 }
5469#endif /* !VBOX */
5470}
5471
5472void helper_debug(void)
5473{
5474 env->exception_index = EXCP_DEBUG;
5475 cpu_loop_exit();
5476}
5477
5478void helper_reset_rf(void)
5479{
5480 env->eflags &= ~RF_MASK;
5481}
5482
5483void helper_raise_interrupt(int intno, int next_eip_addend)
5484{
5485 raise_interrupt(intno, 1, 0, next_eip_addend);
5486}
5487
5488void helper_raise_exception(int exception_index)
5489{
5490 raise_exception(exception_index);
5491}
5492
5493void helper_cli(void)
5494{
5495 env->eflags &= ~IF_MASK;
5496}
5497
5498void helper_sti(void)
5499{
5500 env->eflags |= IF_MASK;
5501}
5502
5503#ifdef VBOX
5504void helper_cli_vme(void)
5505{
5506 env->eflags &= ~VIF_MASK;
5507}
5508
5509void helper_sti_vme(void)
5510{
5511 /* First check, then change eflags according to the AMD manual */
5512 if (env->eflags & VIP_MASK) {
5513 raise_exception(EXCP0D_GPF);
5514 }
5515 env->eflags |= VIF_MASK;
5516}
5517#endif /* VBOX */
5518
5519#if 0
5520/* vm86plus instructions */
5521void helper_cli_vm(void)
5522{
5523 env->eflags &= ~VIF_MASK;
5524}
5525
5526void helper_sti_vm(void)
5527{
5528 env->eflags |= VIF_MASK;
5529 if (env->eflags & VIP_MASK) {
5530 raise_exception(EXCP0D_GPF);
5531 }
5532}
5533#endif
5534
5535void helper_set_inhibit_irq(void)
5536{
5537 env->hflags |= HF_INHIBIT_IRQ_MASK;
5538}
5539
5540void helper_reset_inhibit_irq(void)
5541{
5542 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5543}
5544
5545void helper_boundw(target_ulong a0, int v)
5546{
5547 int low, high;
5548 low = ldsw(a0);
5549 high = ldsw(a0 + 2);
5550 v = (int16_t)v;
5551 if (v < low || v > high) {
5552 raise_exception(EXCP05_BOUND);
5553 }
5554}
5555
5556void helper_boundl(target_ulong a0, int v)
5557{
5558 int low, high;
5559 low = ldl(a0);
5560 high = ldl(a0 + 4);
5561 if (v < low || v > high) {
5562 raise_exception(EXCP05_BOUND);
5563 }
5564}
5565
5566static float approx_rsqrt(float a)
5567{
5568 return 1.0 / sqrt(a);
5569}
5570
5571static float approx_rcp(float a)
5572{
5573 return 1.0 / a;
5574}
5575
5576#if !defined(CONFIG_USER_ONLY)
5577
5578#define MMUSUFFIX _mmu
5579
5580#define SHIFT 0
5581#include "softmmu_template.h"
5582
5583#define SHIFT 1
5584#include "softmmu_template.h"
5585
5586#define SHIFT 2
5587#include "softmmu_template.h"
5588
5589#define SHIFT 3
5590#include "softmmu_template.h"
5591
5592#endif
5593
5594#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5595/* This code assumes real physical address always fit into host CPU reg,
5596 which is wrong in general, but true for our current use cases. */
5597RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5598{
5599 return remR3PhysReadS8(addr);
5600}
5601RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5602{
5603 return remR3PhysReadU8(addr);
5604}
5605void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5606{
5607 remR3PhysWriteU8(addr, val);
5608}
5609RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5610{
5611 return remR3PhysReadS16(addr);
5612}
5613RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5614{
5615 return remR3PhysReadU16(addr);
5616}
5617void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5618{
5619 remR3PhysWriteU16(addr, val);
5620}
5621RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5622{
5623 return remR3PhysReadS32(addr);
5624}
5625RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5626{
5627 return remR3PhysReadU32(addr);
5628}
5629void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5630{
5631 remR3PhysWriteU32(addr, val);
5632}
5633uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5634{
5635 return remR3PhysReadU64(addr);
5636}
5637void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5638{
5639 remR3PhysWriteU64(addr, val);
5640}
5641#endif /* VBOX */
5642
5643#if !defined(CONFIG_USER_ONLY)
5644/* try to fill the TLB and return an exception if error. If retaddr is
5645 NULL, it means that the function was called in C code (i.e. not
5646 from generated code or from helper.c) */
5647/* XXX: fix it to restore all registers */
5648void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5649{
5650 TranslationBlock *tb;
5651 int ret;
5652 uintptr_t pc;
5653 CPUX86State *saved_env;
5654
5655 /* XXX: hack to restore env in all cases, even if not called from
5656 generated code */
5657 saved_env = env;
5658 env = cpu_single_env;
5659
5660 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5661 if (ret) {
5662 if (retaddr) {
5663 /* now we have a real cpu fault */
5664 pc = (uintptr_t)retaddr;
5665 tb = tb_find_pc(pc);
5666 if (tb) {
5667 /* the PC is inside the translated code. It means that we have
5668 a virtual CPU fault */
5669 cpu_restore_state(tb, env, pc, NULL);
5670 }
5671 }
5672 raise_exception_err(env->exception_index, env->error_code);
5673 }
5674 env = saved_env;
5675}
5676#endif
5677
5678#ifdef VBOX
5679
5680/**
5681 * Correctly computes the eflags.
5682 * @returns eflags.
5683 * @param env1 CPU environment.
5684 */
5685uint32_t raw_compute_eflags(CPUX86State *env1)
5686{
5687 CPUX86State *savedenv = env;
5688 uint32_t efl;
5689 env = env1;
5690 efl = compute_eflags();
5691 env = savedenv;
5692 return efl;
5693}
5694
5695/**
5696 * Reads byte from virtual address in guest memory area.
5697 * XXX: is it working for any addresses? swapped out pages?
5698 * @returns read data byte.
5699 * @param env1 CPU environment.
5700 * @param pvAddr GC Virtual address.
5701 */
5702uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5703{
5704 CPUX86State *savedenv = env;
5705 uint8_t u8;
5706 env = env1;
5707 u8 = ldub_kernel(addr);
5708 env = savedenv;
5709 return u8;
5710}
5711
5712/**
5713 * Reads byte from virtual address in guest memory area.
5714 * XXX: is it working for any addresses? swapped out pages?
5715 * @returns read data byte.
5716 * @param env1 CPU environment.
5717 * @param pvAddr GC Virtual address.
5718 */
5719uint16_t read_word(CPUX86State *env1, target_ulong addr)
5720{
5721 CPUX86State *savedenv = env;
5722 uint16_t u16;
5723 env = env1;
5724 u16 = lduw_kernel(addr);
5725 env = savedenv;
5726 return u16;
5727}
5728
5729/**
5730 * Reads byte from virtual address in guest memory area.
5731 * XXX: is it working for any addresses? swapped out pages?
5732 * @returns read data byte.
5733 * @param env1 CPU environment.
5734 * @param pvAddr GC Virtual address.
5735 */
5736uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5737{
5738 CPUX86State *savedenv = env;
5739 uint32_t u32;
5740 env = env1;
5741 u32 = ldl_kernel(addr);
5742 env = savedenv;
5743 return u32;
5744}
5745
5746/**
5747 * Writes byte to virtual address in guest memory area.
5748 * XXX: is it working for any addresses? swapped out pages?
5749 * @returns read data byte.
5750 * @param env1 CPU environment.
5751 * @param pvAddr GC Virtual address.
5752 * @param val byte value
5753 */
5754void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5755{
5756 CPUX86State *savedenv = env;
5757 env = env1;
5758 stb(addr, val);
5759 env = savedenv;
5760}
5761
5762void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5763{
5764 CPUX86State *savedenv = env;
5765 env = env1;
5766 stw(addr, val);
5767 env = savedenv;
5768}
5769
5770void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5771{
5772 CPUX86State *savedenv = env;
5773 env = env1;
5774 stl(addr, val);
5775 env = savedenv;
5776}
5777
5778/**
5779 * Correctly loads selector into segment register with updating internal
5780 * qemu data/caches.
5781 * @param env1 CPU environment.
5782 * @param seg_reg Segment register.
5783 * @param selector Selector to load.
5784 */
5785void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5786{
5787 CPUX86State *savedenv = env;
5788#ifdef FORCE_SEGMENT_SYNC
5789 jmp_buf old_buf;
5790#endif
5791
5792 env = env1;
5793
5794 if ( env->eflags & X86_EFL_VM
5795 || !(env->cr[0] & X86_CR0_PE))
5796 {
5797 load_seg_vm(seg_reg, selector);
5798
5799 env = savedenv;
5800
5801 /* Successful sync. */
5802 Assert(env1->segs[seg_reg].newselector == 0);
5803 }
5804 else
5805 {
5806 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5807 time critical - let's not do that */
5808#ifdef FORCE_SEGMENT_SYNC
5809 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5810#endif
5811 if (setjmp(env1->jmp_env) == 0)
5812 {
5813 if (seg_reg == R_CS)
5814 {
5815 uint32_t e1, e2;
5816 e1 = e2 = 0;
5817 load_segment(&e1, &e2, selector);
5818 cpu_x86_load_seg_cache(env, R_CS, selector,
5819 get_seg_base(e1, e2),
5820 get_seg_limit(e1, e2),
5821 e2);
5822 }
5823 else
5824 helper_load_seg(seg_reg, selector);
5825 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5826 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5827
5828 env = savedenv;
5829
5830 /* Successful sync. */
5831 Assert(env1->segs[seg_reg].newselector == 0);
5832 }
5833 else
5834 {
5835 env = savedenv;
5836
5837 /* Postpone sync until the guest uses the selector. */
5838 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5839 env1->segs[seg_reg].newselector = selector;
5840 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5841 env1->exception_index = -1;
5842 env1->error_code = 0;
5843 env1->old_exception = -1;
5844 }
5845#ifdef FORCE_SEGMENT_SYNC
5846 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5847#endif
5848 }
5849
5850}
5851
5852DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5853{
5854 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5855}
5856
5857
5858int emulate_single_instr(CPUX86State *env1)
5859{
5860 TranslationBlock *tb;
5861 TranslationBlock *current;
5862 int flags;
5863 uint8_t *tc_ptr;
5864 target_ulong old_eip;
5865
5866 /* ensures env is loaded! */
5867 CPUX86State *savedenv = env;
5868 env = env1;
5869
5870 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5871
5872 current = env->current_tb;
5873 env->current_tb = NULL;
5874 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5875
5876 /*
5877 * Translate only one instruction.
5878 */
5879 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5880 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5881 env->segs[R_CS].base, flags, 0);
5882
5883 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5884
5885
5886 /* tb_link_phys: */
5887 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5888 tb->jmp_next[0] = NULL;
5889 tb->jmp_next[1] = NULL;
5890 Assert(tb->jmp_next[0] == NULL);
5891 Assert(tb->jmp_next[1] == NULL);
5892 if (tb->tb_next_offset[0] != 0xffff)
5893 tb_reset_jump(tb, 0);
5894 if (tb->tb_next_offset[1] != 0xffff)
5895 tb_reset_jump(tb, 1);
5896
5897 /*
5898 * Execute it using emulation
5899 */
5900 old_eip = env->eip;
5901 env->current_tb = tb;
5902
5903 /*
5904 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5905 * perhaps not a very safe hack
5906 */
5907 while (old_eip == env->eip)
5908 {
5909 tc_ptr = tb->tc_ptr;
5910
5911#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5912 int fake_ret;
5913 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5914#else
5915 tcg_qemu_tb_exec(tc_ptr);
5916#endif
5917
5918 /*
5919 * Exit once we detect an external interrupt and interrupts are enabled
5920 */
5921 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
5922 || ( (env->eflags & IF_MASK)
5923 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
5924 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
5925 )
5926 {
5927 break;
5928 }
5929 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
5930 tlb_flush(env, true);
5931 }
5932 }
5933 env->current_tb = current;
5934
5935 tb_phys_invalidate(tb, -1);
5936 tb_free(tb);
5937/*
5938 Assert(tb->tb_next_offset[0] == 0xffff);
5939 Assert(tb->tb_next_offset[1] == 0xffff);
5940 Assert(tb->tb_next[0] == 0xffff);
5941 Assert(tb->tb_next[1] == 0xffff);
5942 Assert(tb->jmp_next[0] == NULL);
5943 Assert(tb->jmp_next[1] == NULL);
5944 Assert(tb->jmp_first == NULL); */
5945
5946 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5947
5948 /*
5949 * Execute the next instruction when we encounter instruction fusing.
5950 */
5951 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5952 {
5953 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5954 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5955 emulate_single_instr(env);
5956 }
5957
5958 env = savedenv;
5959 return 0;
5960}
5961
5962/**
5963 * Correctly loads a new ldtr selector.
5964 *
5965 * @param env1 CPU environment.
5966 * @param selector Selector to load.
5967 */
5968void sync_ldtr(CPUX86State *env1, int selector)
5969{
5970 CPUX86State *saved_env = env;
5971 if (setjmp(env1->jmp_env) == 0)
5972 {
5973 env = env1;
5974 helper_lldt(selector);
5975 env = saved_env;
5976 }
5977 else
5978 {
5979 env = saved_env;
5980#ifdef VBOX_STRICT
5981 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5982#endif
5983 }
5984}
5985
5986int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5987 uint32_t *esp_ptr, int dpl)
5988{
5989 int type, index, shift;
5990
5991 CPUX86State *savedenv = env;
5992 env = env1;
5993
5994 if (!(env->tr.flags & DESC_P_MASK))
5995 cpu_abort(env, "invalid tss");
5996 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5997 if ((type & 7) != 1)
5998 cpu_abort(env, "invalid tss type %d", type);
5999 shift = type >> 3;
6000 index = (dpl * 4 + 2) << shift;
6001 if (index + (4 << shift) - 1 > env->tr.limit)
6002 {
6003 env = savedenv;
6004 return 0;
6005 }
6006 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6007
6008 if (shift == 0) {
6009 *esp_ptr = lduw_kernel(env->tr.base + index);
6010 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6011 } else {
6012 *esp_ptr = ldl_kernel(env->tr.base + index);
6013 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6014 }
6015
6016 env = savedenv;
6017 return 1;
6018}
6019
6020//*****************************************************************************
6021// Needs to be at the bottom of the file (overriding macros)
6022
6023static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6024{
6025#ifdef USE_X86LDOUBLE
6026 CPU86_LDoubleU tmp;
6027 tmp.l.lower = *(uint64_t const *)ptr;
6028 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6029 return tmp.d;
6030#else
6031# error "Busted FPU saving/restoring!"
6032 return *(CPU86_LDouble *)ptr;
6033#endif
6034}
6035
6036static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6037{
6038#ifdef USE_X86LDOUBLE
6039 CPU86_LDoubleU tmp;
6040 tmp.d = f;
6041 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6042 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6043 *(uint16_t *)(ptr + 10) = 0;
6044 *(uint32_t *)(ptr + 12) = 0;
6045 AssertCompile(sizeof(long double) > 8);
6046#else
6047# error "Busted FPU saving/restoring!"
6048 *(CPU86_LDouble *)ptr = f;
6049#endif
6050}
6051
6052#undef stw
6053#undef stl
6054#undef stq
6055#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6056#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6057#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6058
6059//*****************************************************************************
6060void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6061{
6062 int fpus, fptag, i, nb_xmm_regs;
6063 CPU86_LDouble tmp;
6064 uint8_t *addr;
6065 int data64 = !!(env->hflags & HF_LMA_MASK);
6066
6067 if (env->cpuid_features & CPUID_FXSR)
6068 {
6069 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6070 fptag = 0;
6071 for(i = 0; i < 8; i++) {
6072 fptag |= (env->fptags[i] << i);
6073 }
6074 stw(ptr, env->fpuc);
6075 stw(ptr + 2, fpus);
6076 stw(ptr + 4, fptag ^ 0xff);
6077
6078 addr = ptr + 0x20;
6079 for(i = 0;i < 8; i++) {
6080 tmp = ST(i);
6081 helper_fstt_raw(tmp, addr);
6082 addr += 16;
6083 }
6084
6085 if (env->cr[4] & CR4_OSFXSR_MASK) {
6086 /* XXX: finish it */
6087 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6088 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6089 nb_xmm_regs = 8 << data64;
6090 addr = ptr + 0xa0;
6091 for(i = 0; i < nb_xmm_regs; i++) {
6092#if __GNUC__ < 4
6093 stq(addr, env->xmm_regs[i].XMM_Q(0));
6094 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6095#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6096 stl(addr, env->xmm_regs[i].XMM_L(0));
6097 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6098 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6099 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6100#endif
6101 addr += 16;
6102 }
6103 }
6104 }
6105 else
6106 {
6107 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6108 int fptag;
6109
6110 fp->FCW = env->fpuc;
6111 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6112 fptag = 0;
6113 for (i=7; i>=0; i--) {
6114 fptag <<= 2;
6115 if (env->fptags[i]) {
6116 fptag |= 3;
6117 } else {
6118 /* the FPU automatically computes it */
6119 }
6120 }
6121 fp->FTW = fptag;
6122
6123 for(i = 0;i < 8; i++) {
6124 tmp = ST(i);
6125 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6126 }
6127 }
6128}
6129
6130//*****************************************************************************
6131#undef lduw
6132#undef ldl
6133#undef ldq
6134#define lduw(a) *(uint16_t *)(a)
6135#define ldl(a) *(uint32_t *)(a)
6136#define ldq(a) *(uint64_t *)(a)
6137//*****************************************************************************
6138void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6139{
6140 int i, fpus, fptag, nb_xmm_regs;
6141 CPU86_LDouble tmp;
6142 uint8_t *addr;
6143 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6144
6145 if (env->cpuid_features & CPUID_FXSR)
6146 {
6147 env->fpuc = lduw(ptr);
6148 fpus = lduw(ptr + 2);
6149 fptag = lduw(ptr + 4);
6150 env->fpstt = (fpus >> 11) & 7;
6151 env->fpus = fpus & ~0x3800;
6152 fptag ^= 0xff;
6153 for(i = 0;i < 8; i++) {
6154 env->fptags[i] = ((fptag >> i) & 1);
6155 }
6156
6157 addr = ptr + 0x20;
6158 for(i = 0;i < 8; i++) {
6159 tmp = helper_fldt_raw(addr);
6160 ST(i) = tmp;
6161 addr += 16;
6162 }
6163
6164 if (env->cr[4] & CR4_OSFXSR_MASK) {
6165 /* XXX: finish it, endianness */
6166 env->mxcsr = ldl(ptr + 0x18);
6167 //ldl(ptr + 0x1c);
6168 nb_xmm_regs = 8 << data64;
6169 addr = ptr + 0xa0;
6170 for(i = 0; i < nb_xmm_regs; i++) {
6171#if HC_ARCH_BITS == 32
6172 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6173 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6174 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6175 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6176 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6177#else
6178 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6179 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6180#endif
6181 addr += 16;
6182 }
6183 }
6184 }
6185 else
6186 {
6187 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6188 int fptag, j;
6189
6190 env->fpuc = fp->FCW;
6191 env->fpstt = (fp->FSW >> 11) & 7;
6192 env->fpus = fp->FSW & ~0x3800;
6193 fptag = fp->FTW;
6194 for(i = 0;i < 8; i++) {
6195 env->fptags[i] = ((fptag & 3) == 3);
6196 fptag >>= 2;
6197 }
6198 j = env->fpstt;
6199 for(i = 0;i < 8; i++) {
6200 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6201 ST(i) = tmp;
6202 }
6203 }
6204}
6205//*****************************************************************************
6206//*****************************************************************************
6207
6208#endif /* VBOX */
6209
6210/* Secure Virtual Machine helpers */
6211
6212#if defined(CONFIG_USER_ONLY)
6213
6214void helper_vmrun(int aflag, int next_eip_addend)
6215{
6216}
6217void helper_vmmcall(void)
6218{
6219}
6220void helper_vmload(int aflag)
6221{
6222}
6223void helper_vmsave(int aflag)
6224{
6225}
6226void helper_stgi(void)
6227{
6228}
6229void helper_clgi(void)
6230{
6231}
6232void helper_skinit(void)
6233{
6234}
6235void helper_invlpga(int aflag)
6236{
6237}
6238void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6239{
6240}
6241void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6242{
6243}
6244
6245void helper_svm_check_io(uint32_t port, uint32_t param,
6246 uint32_t next_eip_addend)
6247{
6248}
6249#else
6250
6251static inline void svm_save_seg(target_phys_addr_t addr,
6252 const SegmentCache *sc)
6253{
6254 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6255 sc->selector);
6256 stq_phys(addr + offsetof(struct vmcb_seg, base),
6257 sc->base);
6258 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6259 sc->limit);
6260 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6261 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6262}
6263
6264static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6265{
6266 unsigned int flags;
6267
6268 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6269 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6270 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6271 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6272 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6273}
6274
6275static inline void svm_load_seg_cache(target_phys_addr_t addr,
6276 CPUState *env, int seg_reg)
6277{
6278 SegmentCache sc1, *sc = &sc1;
6279 svm_load_seg(addr, sc);
6280 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6281 sc->base, sc->limit, sc->flags);
6282}
6283
6284void helper_vmrun(int aflag, int next_eip_addend)
6285{
6286 target_ulong addr;
6287 uint32_t event_inj;
6288 uint32_t int_ctl;
6289
6290 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6291
6292 if (aflag == 2)
6293 addr = EAX;
6294 else
6295 addr = (uint32_t)EAX;
6296
6297 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6298
6299 env->vm_vmcb = addr;
6300
6301 /* save the current CPU state in the hsave page */
6302 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6303 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6304
6305 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6306 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6307
6308 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6309 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6310 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6311 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6312 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6313 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6314
6315 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6316 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6317
6318 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6319 &env->segs[R_ES]);
6320 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6321 &env->segs[R_CS]);
6322 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6323 &env->segs[R_SS]);
6324 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6325 &env->segs[R_DS]);
6326
6327 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6328 EIP + next_eip_addend);
6329 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6330 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6331
6332 /* load the interception bitmaps so we do not need to access the
6333 vmcb in svm mode */
6334 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6335 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6336 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6337 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6338 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6339 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6340
6341 /* enable intercepts */
6342 env->hflags |= HF_SVMI_MASK;
6343
6344 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6345
6346 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6347 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6348
6349 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6350 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6351
6352 /* clear exit_info_2 so we behave like the real hardware */
6353 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6354
6355 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6356 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6357 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6358 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6359 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6360 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6361 if (int_ctl & V_INTR_MASKING_MASK) {
6362 env->v_tpr = int_ctl & V_TPR_MASK;
6363 env->hflags2 |= HF2_VINTR_MASK;
6364 if (env->eflags & IF_MASK)
6365 env->hflags2 |= HF2_HIF_MASK;
6366 }
6367
6368 cpu_load_efer(env,
6369 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6370 env->eflags = 0;
6371 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6372 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6373 CC_OP = CC_OP_EFLAGS;
6374
6375 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6376 env, R_ES);
6377 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6378 env, R_CS);
6379 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6380 env, R_SS);
6381 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6382 env, R_DS);
6383
6384 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6385 env->eip = EIP;
6386 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6387 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6388 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6389 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6390 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6391
6392 /* FIXME: guest state consistency checks */
6393
6394 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6395 case TLB_CONTROL_DO_NOTHING:
6396 break;
6397 case TLB_CONTROL_FLUSH_ALL_ASID:
6398 /* FIXME: this is not 100% correct but should work for now */
6399 tlb_flush(env, 1);
6400 break;
6401 }
6402
6403 env->hflags2 |= HF2_GIF_MASK;
6404
6405 if (int_ctl & V_IRQ_MASK) {
6406 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6407 }
6408
6409 /* maybe we need to inject an event */
6410 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6411 if (event_inj & SVM_EVTINJ_VALID) {
6412 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6413 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6414 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6415
6416 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6417 /* FIXME: need to implement valid_err */
6418 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6419 case SVM_EVTINJ_TYPE_INTR:
6420 env->exception_index = vector;
6421 env->error_code = event_inj_err;
6422 env->exception_is_int = 0;
6423 env->exception_next_eip = -1;
6424 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6425 /* XXX: is it always correct ? */
6426 do_interrupt(vector, 0, 0, 0, 1);
6427 break;
6428 case SVM_EVTINJ_TYPE_NMI:
6429 env->exception_index = EXCP02_NMI;
6430 env->error_code = event_inj_err;
6431 env->exception_is_int = 0;
6432 env->exception_next_eip = EIP;
6433 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6434 cpu_loop_exit();
6435 break;
6436 case SVM_EVTINJ_TYPE_EXEPT:
6437 env->exception_index = vector;
6438 env->error_code = event_inj_err;
6439 env->exception_is_int = 0;
6440 env->exception_next_eip = -1;
6441 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6442 cpu_loop_exit();
6443 break;
6444 case SVM_EVTINJ_TYPE_SOFT:
6445 env->exception_index = vector;
6446 env->error_code = event_inj_err;
6447 env->exception_is_int = 1;
6448 env->exception_next_eip = EIP;
6449 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6450 cpu_loop_exit();
6451 break;
6452 }
6453 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6454 }
6455}
6456
6457void helper_vmmcall(void)
6458{
6459 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6460 raise_exception(EXCP06_ILLOP);
6461}
6462
6463void helper_vmload(int aflag)
6464{
6465 target_ulong addr;
6466 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6467
6468 if (aflag == 2)
6469 addr = EAX;
6470 else
6471 addr = (uint32_t)EAX;
6472
6473 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6474 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6475 env->segs[R_FS].base);
6476
6477 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6478 env, R_FS);
6479 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6480 env, R_GS);
6481 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6482 &env->tr);
6483 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6484 &env->ldt);
6485
6486#ifdef TARGET_X86_64
6487 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6488 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6489 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6490 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6491#endif
6492 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6493 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6494 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6495 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6496}
6497
6498void helper_vmsave(int aflag)
6499{
6500 target_ulong addr;
6501 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6502
6503 if (aflag == 2)
6504 addr = EAX;
6505 else
6506 addr = (uint32_t)EAX;
6507
6508 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6509 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6510 env->segs[R_FS].base);
6511
6512 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6513 &env->segs[R_FS]);
6514 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6515 &env->segs[R_GS]);
6516 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6517 &env->tr);
6518 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6519 &env->ldt);
6520
6521#ifdef TARGET_X86_64
6522 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6523 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6524 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6525 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6526#endif
6527 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6528 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6529 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6530 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6531}
6532
6533void helper_stgi(void)
6534{
6535 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6536 env->hflags2 |= HF2_GIF_MASK;
6537}
6538
6539void helper_clgi(void)
6540{
6541 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6542 env->hflags2 &= ~HF2_GIF_MASK;
6543}
6544
6545void helper_skinit(void)
6546{
6547 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6548 /* XXX: not implemented */
6549 raise_exception(EXCP06_ILLOP);
6550}
6551
6552void helper_invlpga(int aflag)
6553{
6554 target_ulong addr;
6555 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6556
6557 if (aflag == 2)
6558 addr = EAX;
6559 else
6560 addr = (uint32_t)EAX;
6561
6562 /* XXX: could use the ASID to see if it is needed to do the
6563 flush */
6564 tlb_flush_page(env, addr);
6565}
6566
6567void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6568{
6569 if (likely(!(env->hflags & HF_SVMI_MASK)))
6570 return;
6571#ifndef VBOX
6572 switch(type) {
6573 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6574 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6575 helper_vmexit(type, param);
6576 }
6577 break;
6578 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6579 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6580 helper_vmexit(type, param);
6581 }
6582 break;
6583 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6584 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6585 helper_vmexit(type, param);
6586 }
6587 break;
6588 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6589 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6590 helper_vmexit(type, param);
6591 }
6592 break;
6593 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6594 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6595 helper_vmexit(type, param);
6596 }
6597 break;
6598 case SVM_EXIT_MSR:
6599 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6600 /* FIXME: this should be read in at vmrun (faster this way?) */
6601 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6602 uint32_t t0, t1;
6603 switch((uint32_t)ECX) {
6604 case 0 ... 0x1fff:
6605 t0 = (ECX * 2) % 8;
6606 t1 = ECX / 8;
6607 break;
6608 case 0xc0000000 ... 0xc0001fff:
6609 t0 = (8192 + ECX - 0xc0000000) * 2;
6610 t1 = (t0 / 8);
6611 t0 %= 8;
6612 break;
6613 case 0xc0010000 ... 0xc0011fff:
6614 t0 = (16384 + ECX - 0xc0010000) * 2;
6615 t1 = (t0 / 8);
6616 t0 %= 8;
6617 break;
6618 default:
6619 helper_vmexit(type, param);
6620 t0 = 0;
6621 t1 = 0;
6622 break;
6623 }
6624 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6625 helper_vmexit(type, param);
6626 }
6627 break;
6628 default:
6629 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6630 helper_vmexit(type, param);
6631 }
6632 break;
6633 }
6634#else /* VBOX */
6635 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6636#endif /* VBOX */
6637}
6638
6639void helper_svm_check_io(uint32_t port, uint32_t param,
6640 uint32_t next_eip_addend)
6641{
6642 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6643 /* FIXME: this should be read in at vmrun (faster this way?) */
6644 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6645 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6646 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6647 /* next EIP */
6648 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6649 env->eip + next_eip_addend);
6650 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6651 }
6652 }
6653}
6654
6655/* Note: currently only 32 bits of exit_code are used */
6656void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6657{
6658 uint32_t int_ctl;
6659
6660 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6661 exit_code, exit_info_1,
6662 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6663 EIP);
6664
6665 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6666 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6667 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6668 } else {
6669 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6670 }
6671
6672 /* Save the VM state in the vmcb */
6673 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6674 &env->segs[R_ES]);
6675 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6676 &env->segs[R_CS]);
6677 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6678 &env->segs[R_SS]);
6679 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6680 &env->segs[R_DS]);
6681
6682 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6683 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6684
6685 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6686 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6687
6688 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6689 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6690 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6691 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6692 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6693
6694 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6695 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6696 int_ctl |= env->v_tpr & V_TPR_MASK;
6697 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6698 int_ctl |= V_IRQ_MASK;
6699 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6700
6701 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6702 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6703 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6704 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6705 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6706 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6707 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6708
6709 /* Reload the host state from vm_hsave */
6710 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6711 env->hflags &= ~HF_SVMI_MASK;
6712 env->intercept = 0;
6713 env->intercept_exceptions = 0;
6714 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6715 env->tsc_offset = 0;
6716
6717 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6718 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6719
6720 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6721 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6722
6723 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6724 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6725 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6726 /* we need to set the efer after the crs so the hidden flags get
6727 set properly */
6728 cpu_load_efer(env,
6729 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6730 env->eflags = 0;
6731 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6732 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6733 CC_OP = CC_OP_EFLAGS;
6734
6735 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6736 env, R_ES);
6737 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6738 env, R_CS);
6739 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6740 env, R_SS);
6741 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6742 env, R_DS);
6743
6744 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6745 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6746 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6747
6748 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6749 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6750
6751 /* other setups */
6752 cpu_x86_set_cpl(env, 0);
6753 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6754 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6755
6756 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6757 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6758 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6759 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6760 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6761
6762 env->hflags2 &= ~HF2_GIF_MASK;
6763 /* FIXME: Resets the current ASID register to zero (host ASID). */
6764
6765 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6766
6767 /* Clears the TSC_OFFSET inside the processor. */
6768
6769 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6770 from the page table indicated the host's CR3. If the PDPEs contain
6771 illegal state, the processor causes a shutdown. */
6772
6773 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6774 env->cr[0] |= CR0_PE_MASK;
6775 env->eflags &= ~VM_MASK;
6776
6777 /* Disables all breakpoints in the host DR7 register. */
6778
6779 /* Checks the reloaded host state for consistency. */
6780
6781 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6782 host's code segment or non-canonical (in the case of long mode), a
6783 #GP fault is delivered inside the host.) */
6784
6785 /* remove any pending exception */
6786 env->exception_index = -1;
6787 env->error_code = 0;
6788 env->old_exception = -1;
6789
6790 cpu_loop_exit();
6791}
6792
6793#endif
6794
6795/* MMX/SSE */
6796/* XXX: optimize by storing fptt and fptags in the static cpu state */
6797void helper_enter_mmx(void)
6798{
6799 env->fpstt = 0;
6800 *(uint32_t *)(env->fptags) = 0;
6801 *(uint32_t *)(env->fptags + 4) = 0;
6802}
6803
6804void helper_emms(void)
6805{
6806 /* set to empty state */
6807 *(uint32_t *)(env->fptags) = 0x01010101;
6808 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6809}
6810
6811/* XXX: suppress */
6812void helper_movq(void *d, void *s)
6813{
6814 *(uint64_t *)d = *(uint64_t *)s;
6815}
6816
6817#define SHIFT 0
6818#include "ops_sse.h"
6819
6820#define SHIFT 1
6821#include "ops_sse.h"
6822
6823#define SHIFT 0
6824#include "helper_template.h"
6825#undef SHIFT
6826
6827#define SHIFT 1
6828#include "helper_template.h"
6829#undef SHIFT
6830
6831#define SHIFT 2
6832#include "helper_template.h"
6833#undef SHIFT
6834
6835#ifdef TARGET_X86_64
6836
6837#define SHIFT 3
6838#include "helper_template.h"
6839#undef SHIFT
6840
6841#endif
6842
6843/* bit operations */
6844target_ulong helper_bsf(target_ulong t0)
6845{
6846 int count;
6847 target_ulong res;
6848
6849 res = t0;
6850 count = 0;
6851 while ((res & 1) == 0) {
6852 count++;
6853 res >>= 1;
6854 }
6855 return count;
6856}
6857
6858target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6859{
6860 int count;
6861 target_ulong res, mask;
6862
6863 if (wordsize > 0 && t0 == 0) {
6864 return wordsize;
6865 }
6866 res = t0;
6867 count = TARGET_LONG_BITS - 1;
6868 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6869 while ((res & mask) == 0) {
6870 count--;
6871 res <<= 1;
6872 }
6873 if (wordsize > 0) {
6874 return wordsize - 1 - count;
6875 }
6876 return count;
6877}
6878
6879target_ulong helper_bsr(target_ulong t0)
6880{
6881 return helper_lzcnt(t0, 0);
6882}
6883
6884static int compute_all_eflags(void)
6885{
6886 return CC_SRC;
6887}
6888
6889static int compute_c_eflags(void)
6890{
6891 return CC_SRC & CC_C;
6892}
6893
6894uint32_t helper_cc_compute_all(int op)
6895{
6896 switch (op) {
6897 default: /* should never happen */ return 0;
6898
6899 case CC_OP_EFLAGS: return compute_all_eflags();
6900
6901 case CC_OP_MULB: return compute_all_mulb();
6902 case CC_OP_MULW: return compute_all_mulw();
6903 case CC_OP_MULL: return compute_all_mull();
6904
6905 case CC_OP_ADDB: return compute_all_addb();
6906 case CC_OP_ADDW: return compute_all_addw();
6907 case CC_OP_ADDL: return compute_all_addl();
6908
6909 case CC_OP_ADCB: return compute_all_adcb();
6910 case CC_OP_ADCW: return compute_all_adcw();
6911 case CC_OP_ADCL: return compute_all_adcl();
6912
6913 case CC_OP_SUBB: return compute_all_subb();
6914 case CC_OP_SUBW: return compute_all_subw();
6915 case CC_OP_SUBL: return compute_all_subl();
6916
6917 case CC_OP_SBBB: return compute_all_sbbb();
6918 case CC_OP_SBBW: return compute_all_sbbw();
6919 case CC_OP_SBBL: return compute_all_sbbl();
6920
6921 case CC_OP_LOGICB: return compute_all_logicb();
6922 case CC_OP_LOGICW: return compute_all_logicw();
6923 case CC_OP_LOGICL: return compute_all_logicl();
6924
6925 case CC_OP_INCB: return compute_all_incb();
6926 case CC_OP_INCW: return compute_all_incw();
6927 case CC_OP_INCL: return compute_all_incl();
6928
6929 case CC_OP_DECB: return compute_all_decb();
6930 case CC_OP_DECW: return compute_all_decw();
6931 case CC_OP_DECL: return compute_all_decl();
6932
6933 case CC_OP_SHLB: return compute_all_shlb();
6934 case CC_OP_SHLW: return compute_all_shlw();
6935 case CC_OP_SHLL: return compute_all_shll();
6936
6937 case CC_OP_SARB: return compute_all_sarb();
6938 case CC_OP_SARW: return compute_all_sarw();
6939 case CC_OP_SARL: return compute_all_sarl();
6940
6941#ifdef TARGET_X86_64
6942 case CC_OP_MULQ: return compute_all_mulq();
6943
6944 case CC_OP_ADDQ: return compute_all_addq();
6945
6946 case CC_OP_ADCQ: return compute_all_adcq();
6947
6948 case CC_OP_SUBQ: return compute_all_subq();
6949
6950 case CC_OP_SBBQ: return compute_all_sbbq();
6951
6952 case CC_OP_LOGICQ: return compute_all_logicq();
6953
6954 case CC_OP_INCQ: return compute_all_incq();
6955
6956 case CC_OP_DECQ: return compute_all_decq();
6957
6958 case CC_OP_SHLQ: return compute_all_shlq();
6959
6960 case CC_OP_SARQ: return compute_all_sarq();
6961#endif
6962 }
6963}
6964
6965uint32_t helper_cc_compute_c(int op)
6966{
6967 switch (op) {
6968 default: /* should never happen */ return 0;
6969
6970 case CC_OP_EFLAGS: return compute_c_eflags();
6971
6972 case CC_OP_MULB: return compute_c_mull();
6973 case CC_OP_MULW: return compute_c_mull();
6974 case CC_OP_MULL: return compute_c_mull();
6975
6976 case CC_OP_ADDB: return compute_c_addb();
6977 case CC_OP_ADDW: return compute_c_addw();
6978 case CC_OP_ADDL: return compute_c_addl();
6979
6980 case CC_OP_ADCB: return compute_c_adcb();
6981 case CC_OP_ADCW: return compute_c_adcw();
6982 case CC_OP_ADCL: return compute_c_adcl();
6983
6984 case CC_OP_SUBB: return compute_c_subb();
6985 case CC_OP_SUBW: return compute_c_subw();
6986 case CC_OP_SUBL: return compute_c_subl();
6987
6988 case CC_OP_SBBB: return compute_c_sbbb();
6989 case CC_OP_SBBW: return compute_c_sbbw();
6990 case CC_OP_SBBL: return compute_c_sbbl();
6991
6992 case CC_OP_LOGICB: return compute_c_logicb();
6993 case CC_OP_LOGICW: return compute_c_logicw();
6994 case CC_OP_LOGICL: return compute_c_logicl();
6995
6996 case CC_OP_INCB: return compute_c_incl();
6997 case CC_OP_INCW: return compute_c_incl();
6998 case CC_OP_INCL: return compute_c_incl();
6999
7000 case CC_OP_DECB: return compute_c_incl();
7001 case CC_OP_DECW: return compute_c_incl();
7002 case CC_OP_DECL: return compute_c_incl();
7003
7004 case CC_OP_SHLB: return compute_c_shlb();
7005 case CC_OP_SHLW: return compute_c_shlw();
7006 case CC_OP_SHLL: return compute_c_shll();
7007
7008 case CC_OP_SARB: return compute_c_sarl();
7009 case CC_OP_SARW: return compute_c_sarl();
7010 case CC_OP_SARL: return compute_c_sarl();
7011
7012#ifdef TARGET_X86_64
7013 case CC_OP_MULQ: return compute_c_mull();
7014
7015 case CC_OP_ADDQ: return compute_c_addq();
7016
7017 case CC_OP_ADCQ: return compute_c_adcq();
7018
7019 case CC_OP_SUBQ: return compute_c_subq();
7020
7021 case CC_OP_SBBQ: return compute_c_sbbq();
7022
7023 case CC_OP_LOGICQ: return compute_c_logicq();
7024
7025 case CC_OP_INCQ: return compute_c_incl();
7026
7027 case CC_OP_DECQ: return compute_c_incl();
7028
7029 case CC_OP_SHLQ: return compute_c_shlq();
7030
7031 case CC_OP_SARQ: return compute_c_sarl();
7032#endif
7033 }
7034}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette