VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 77978

Last change on this file since 77978 was 76397, checked in by vboxsync, 6 years ago

VBox/vmm/hm_svm.h,hm_vmx.h: Try avoid including VBox/err.h in widely used headers, so split out the inline stuff from hm_vmx.h into hmvmxinline.h. bugref:9344

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 200.9 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38# include <VBox/err.h>
39#endif /* VBOX */
40
41//#define DEBUG_PCALL
42
43
44#ifdef DEBUG_PCALL
45# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
46# define LOG_PCALL_STATE(env) \
47 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
48#else
49# define LOG_PCALL(...) do { } while (0)
50# define LOG_PCALL_STATE(env) do { } while (0)
51#endif
52
53
54#if 0
55#define raise_exception_err(a, b)\
56do {\
57 qemu_log("raise_exception line=%d\n", __LINE__);\
58 (raise_exception_err)(a, b);\
59} while (0)
60#endif
61
62static const uint8_t parity_table[256] = {
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
82 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
90 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
91 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
94 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
95};
96
97/* modulo 17 table */
98static const uint8_t rclw_table[32] = {
99 0, 1, 2, 3, 4, 5, 6, 7,
100 8, 9,10,11,12,13,14,15,
101 16, 0, 1, 2, 3, 4, 5, 6,
102 7, 8, 9,10,11,12,13,14,
103};
104
105/* modulo 9 table */
106static const uint8_t rclb_table[32] = {
107 0, 1, 2, 3, 4, 5, 6, 7,
108 8, 0, 1, 2, 3, 4, 5, 6,
109 7, 8, 0, 1, 2, 3, 4, 5,
110 6, 7, 8, 0, 1, 2, 3, 4,
111};
112
113static const CPU86_LDouble f15rk[7] =
114{
115 0.00000000000000000000L,
116 1.00000000000000000000L,
117 3.14159265358979323851L, /*pi*/
118 0.30102999566398119523L, /*lg2*/
119 0.69314718055994530943L, /*ln2*/
120 1.44269504088896340739L, /*l2e*/
121 3.32192809488736234781L, /*l2t*/
122};
123
124/* broken thread support */
125
126static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
127
128void helper_lock(void)
129{
130 spin_lock(&global_cpu_lock);
131}
132
133void helper_unlock(void)
134{
135 spin_unlock(&global_cpu_lock);
136}
137
138void helper_write_eflags(target_ulong t0, uint32_t update_mask)
139{
140 load_eflags(t0, update_mask);
141}
142
143target_ulong helper_read_eflags(void)
144{
145 uint32_t eflags;
146 eflags = helper_cc_compute_all(CC_OP);
147 eflags |= (DF & DF_MASK);
148 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
149 return eflags;
150}
151
152#ifdef VBOX
153
154void helper_write_eflags_vme(target_ulong t0)
155{
156 unsigned int new_eflags = t0;
157
158 assert(env->eflags & (1<<VM_SHIFT));
159
160 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
161 /* if TF will be set -> #GP */
162 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
163 || (new_eflags & TF_MASK)) {
164 raise_exception(EXCP0D_GPF);
165 } else {
166 load_eflags(new_eflags,
167 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
168
169 if (new_eflags & IF_MASK) {
170 env->eflags |= VIF_MASK;
171 } else {
172 env->eflags &= ~VIF_MASK;
173 }
174 }
175}
176
177target_ulong helper_read_eflags_vme(void)
178{
179 uint32_t eflags;
180 eflags = helper_cc_compute_all(CC_OP);
181 eflags |= (DF & DF_MASK);
182 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
183 if (env->eflags & VIF_MASK)
184 eflags |= IF_MASK;
185 else
186 eflags &= ~IF_MASK;
187
188 /* According to AMD manual, should be read with IOPL == 3 */
189 eflags |= (3 << IOPL_SHIFT);
190
191 /* We only use helper_read_eflags_vme() in 16-bits mode */
192 return eflags & 0xffff;
193}
194
195void helper_dump_state()
196{
197 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
198 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
199 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
200 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
201 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
202 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
203 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
204}
205
206/**
207 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
208 * returns the updated e2.
209 *
210 * @returns e2 with A set.
211 * @param e2 The 2nd selector DWORD.
212 */
213static uint32_t set_segment_accessed(int selector, uint32_t e2)
214{
215 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
216 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
217
218 e2 |= DESC_A_MASK;
219 stl_kernel(ptr + 4, e2);
220 return e2;
221}
222
223#endif /* VBOX */
224
225/* return non zero if error */
226static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
227 int selector)
228{
229 SegmentCache *dt;
230 int index;
231 target_ulong ptr;
232
233 if (selector & 0x4)
234 dt = &env->ldt;
235 else
236 dt = &env->gdt;
237 index = selector & ~7;
238 if ((index + 7) > dt->limit)
239 return -1;
240 ptr = dt->base + index;
241 *e1_ptr = ldl_kernel(ptr);
242 *e2_ptr = ldl_kernel(ptr + 4);
243 return 0;
244}
245
246static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
247{
248 unsigned int limit;
249 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
250 if (e2 & DESC_G_MASK)
251 limit = (limit << 12) | 0xfff;
252 return limit;
253}
254
255static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
256{
257 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
258}
259
260static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
261{
262 sc->base = get_seg_base(e1, e2);
263 sc->limit = get_seg_limit(e1, e2);
264#ifndef VBOX
265 sc->flags = e2;
266#else
267 sc->flags = e2 & DESC_RAW_FLAG_BITS;
268 sc->newselector = 0;
269 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
270#endif
271}
272
273/* init the segment cache in vm86 mode. */
274static inline void load_seg_vm(int seg, int selector)
275{
276 selector &= 0xffff;
277#ifdef VBOX
278 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
279 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
280 flags |= (3 << DESC_DPL_SHIFT);
281
282 cpu_x86_load_seg_cache(env, seg, selector,
283 (selector << 4), 0xffff, flags);
284#else /* VBOX */
285 cpu_x86_load_seg_cache(env, seg, selector,
286 (selector << 4), 0xffff, 0);
287#endif /* VBOX */
288}
289
290static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
291 uint32_t *esp_ptr, int dpl)
292{
293#ifndef VBOX
294 int type, index, shift;
295#else
296 unsigned int type, index, shift;
297#endif
298
299#if 0
300 {
301 int i;
302 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
303 for(i=0;i<env->tr.limit;i++) {
304 printf("%02x ", env->tr.base[i]);
305 if ((i & 7) == 7) printf("\n");
306 }
307 printf("\n");
308 }
309#endif
310
311 if (!(env->tr.flags & DESC_P_MASK))
312 cpu_abort(env, "invalid tss");
313 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
314 if ((type & 7) != 3)
315 cpu_abort(env, "invalid tss type");
316 shift = type >> 3;
317 index = (dpl * 4 + 2) << shift;
318 if (index + (4 << shift) - 1 > env->tr.limit)
319 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
320 if (shift == 0) {
321 *esp_ptr = lduw_kernel(env->tr.base + index);
322 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
323 } else {
324 *esp_ptr = ldl_kernel(env->tr.base + index);
325 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
326 }
327}
328
329/* XXX: merge with load_seg() */
330static void tss_load_seg(int seg_reg, int selector)
331{
332 uint32_t e1, e2;
333 int rpl, dpl, cpl;
334
335#ifdef VBOX
336 e1 = e2 = 0; /* gcc warning? */
337 cpl = env->hflags & HF_CPL_MASK;
338 /* Trying to load a selector with CPL=1? */
339 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
340 {
341 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
342 selector = selector & 0xfffc;
343 }
344#endif /* VBOX */
345
346 if ((selector & 0xfffc) != 0) {
347 if (load_segment(&e1, &e2, selector) != 0)
348 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
349 if (!(e2 & DESC_S_MASK))
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 rpl = selector & 3;
352 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
353 cpl = env->hflags & HF_CPL_MASK;
354 if (seg_reg == R_CS) {
355 if (!(e2 & DESC_CS_MASK))
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 /* XXX: is it correct ? */
358 if (dpl != rpl)
359 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
360 if ((e2 & DESC_C_MASK) && dpl > rpl)
361 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
362 } else if (seg_reg == R_SS) {
363 /* SS must be writable data */
364 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
366 if (dpl != cpl || dpl != rpl)
367 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
368 } else {
369 /* not readable code */
370 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372 /* if data or non conforming code, checks the rights */
373 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
374 if (dpl < cpl || dpl < rpl)
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376 }
377 }
378 if (!(e2 & DESC_P_MASK))
379 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
380 cpu_x86_load_seg_cache(env, seg_reg, selector,
381 get_seg_base(e1, e2),
382 get_seg_limit(e1, e2),
383 e2);
384 } else {
385 if (seg_reg == R_SS || seg_reg == R_CS)
386 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
387#ifdef VBOX
388# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
389 cpu_x86_load_seg_cache(env, seg_reg, selector,
390 0, 0, 0);
391# endif
392#endif /* VBOX */
393 }
394}
395
396#define SWITCH_TSS_JMP 0
397#define SWITCH_TSS_IRET 1
398#define SWITCH_TSS_CALL 2
399
400/* XXX: restore CPU state in registers (PowerPC case) */
401static void switch_tss(int tss_selector,
402 uint32_t e1, uint32_t e2, int source,
403 uint32_t next_eip)
404{
405 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
406 target_ulong tss_base;
407 uint32_t new_regs[8], new_segs[6];
408 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
409 uint32_t old_eflags, eflags_mask;
410 SegmentCache *dt;
411#ifndef VBOX
412 int index;
413#else
414 unsigned int index;
415#endif
416 target_ulong ptr;
417
418 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
419 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
420
421 /* if task gate, we read the TSS segment and we load it */
422 if (type == 5) {
423 if (!(e2 & DESC_P_MASK))
424 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
425 tss_selector = e1 >> 16;
426 if (tss_selector & 4)
427 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
428 if (load_segment(&e1, &e2, tss_selector) != 0)
429 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
430 if (e2 & DESC_S_MASK)
431 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
432 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
433 if ((type & 7) != 1)
434 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
435 }
436
437 if (!(e2 & DESC_P_MASK))
438 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
439
440 if (type & 8)
441 tss_limit_max = 103;
442 else
443 tss_limit_max = 43;
444 tss_limit = get_seg_limit(e1, e2);
445 tss_base = get_seg_base(e1, e2);
446 if ((tss_selector & 4) != 0 ||
447 tss_limit < tss_limit_max)
448 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
449 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
450 if (old_type & 8)
451 old_tss_limit_max = 103;
452 else
453 old_tss_limit_max = 43;
454
455#ifndef VBOX /* The old TSS is written first... */
456 /* read all the registers from the new TSS */
457 if (type & 8) {
458 /* 32 bit */
459 new_cr3 = ldl_kernel(tss_base + 0x1c);
460 new_eip = ldl_kernel(tss_base + 0x20);
461 new_eflags = ldl_kernel(tss_base + 0x24);
462 for(i = 0; i < 8; i++)
463 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
464 for(i = 0; i < 6; i++)
465 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
466 new_ldt = lduw_kernel(tss_base + 0x60);
467 new_trap = ldl_kernel(tss_base + 0x64);
468 } else {
469 /* 16 bit */
470 new_cr3 = 0;
471 new_eip = lduw_kernel(tss_base + 0x0e);
472 new_eflags = lduw_kernel(tss_base + 0x10);
473 for(i = 0; i < 8; i++)
474 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
475 for(i = 0; i < 4; i++)
476 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
477 new_ldt = lduw_kernel(tss_base + 0x2a);
478 new_segs[R_FS] = 0;
479 new_segs[R_GS] = 0;
480 new_trap = 0;
481 }
482#endif
483
484 /* NOTE: we must avoid memory exceptions during the task switch,
485 so we make dummy accesses before */
486 /* XXX: it can still fail in some cases, so a bigger hack is
487 necessary to valid the TLB after having done the accesses */
488
489 v1 = ldub_kernel(env->tr.base);
490 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
491 stb_kernel(env->tr.base, v1);
492 stb_kernel(env->tr.base + old_tss_limit_max, v2);
493
494 /* clear busy bit (it is restartable) */
495 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
496 target_ulong ptr;
497 uint32_t e2;
498 ptr = env->gdt.base + (env->tr.selector & ~7);
499 e2 = ldl_kernel(ptr + 4);
500 e2 &= ~DESC_TSS_BUSY_MASK;
501 stl_kernel(ptr + 4, e2);
502 }
503 old_eflags = compute_eflags();
504 if (source == SWITCH_TSS_IRET)
505 old_eflags &= ~NT_MASK;
506
507 /* save the current state in the old TSS */
508 if (type & 8) {
509 /* 32 bit */
510 stl_kernel(env->tr.base + 0x20, next_eip);
511 stl_kernel(env->tr.base + 0x24, old_eflags);
512 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
513 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
514 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
515 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
516 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
517 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
518 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
519 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
520 for(i = 0; i < 6; i++)
521 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
522#if defined(VBOX) && defined(DEBUG)
523 printf("TSS 32 bits switch\n");
524 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
525#endif
526 } else {
527 /* 16 bit */
528 stw_kernel(env->tr.base + 0x0e, next_eip);
529 stw_kernel(env->tr.base + 0x10, old_eflags);
530 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
531 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
532 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
533 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
534 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
535 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
536 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
537 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
538 for(i = 0; i < 4; i++)
539 stw_kernel(env->tr.base + (0x22 + i * 2), env->segs[i].selector);
540 }
541
542#ifdef VBOX
543 /* read all the registers from the new TSS - may be the same as the old one */
544 if (type & 8) {
545 /* 32 bit */
546 new_cr3 = ldl_kernel(tss_base + 0x1c);
547 new_eip = ldl_kernel(tss_base + 0x20);
548 new_eflags = ldl_kernel(tss_base + 0x24);
549 for(i = 0; i < 8; i++)
550 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
551 for(i = 0; i < 6; i++)
552 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
553 new_ldt = lduw_kernel(tss_base + 0x60);
554 new_trap = ldl_kernel(tss_base + 0x64);
555 } else {
556 /* 16 bit */
557 new_cr3 = 0;
558 new_eip = lduw_kernel(tss_base + 0x0e);
559 new_eflags = lduw_kernel(tss_base + 0x10);
560 for(i = 0; i < 8; i++)
561 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
562 for(i = 0; i < 4; i++)
563 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 2));
564 new_ldt = lduw_kernel(tss_base + 0x2a);
565 new_segs[R_FS] = 0;
566 new_segs[R_GS] = 0;
567 new_trap = 0;
568 }
569#endif
570
571 /* now if an exception occurs, it will occurs in the next task
572 context */
573
574 if (source == SWITCH_TSS_CALL) {
575 stw_kernel(tss_base, env->tr.selector);
576 new_eflags |= NT_MASK;
577 }
578
579 /* set busy bit */
580 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
581 target_ulong ptr;
582 uint32_t e2;
583 ptr = env->gdt.base + (tss_selector & ~7);
584 e2 = ldl_kernel(ptr + 4);
585 e2 |= DESC_TSS_BUSY_MASK;
586 stl_kernel(ptr + 4, e2);
587 }
588
589 /* set the new CPU state */
590 /* from this point, any exception which occurs can give problems */
591 env->cr[0] |= CR0_TS_MASK;
592 env->hflags |= HF_TS_MASK;
593 env->tr.selector = tss_selector;
594 env->tr.base = tss_base;
595 env->tr.limit = tss_limit;
596#ifndef VBOX
597 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
598#else
599 env->tr.flags = (e2 | DESC_TSS_BUSY_MASK) & DESC_RAW_FLAG_BITS;
600 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
601 env->tr.newselector = 0;
602#endif
603
604 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
605 cpu_x86_update_cr3(env, new_cr3);
606 }
607
608 /* load all registers without an exception, then reload them with
609 possible exception */
610 env->eip = new_eip;
611 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
612 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
613 if (!(type & 8))
614 eflags_mask &= 0xffff;
615 load_eflags(new_eflags, eflags_mask);
616 /* XXX: what to do in 16 bit case ? */
617 EAX = new_regs[0];
618 ECX = new_regs[1];
619 EDX = new_regs[2];
620 EBX = new_regs[3];
621 ESP = new_regs[4];
622 EBP = new_regs[5];
623 ESI = new_regs[6];
624 EDI = new_regs[7];
625 if (new_eflags & VM_MASK) {
626 for(i = 0; i < 6; i++)
627 load_seg_vm(i, new_segs[i]);
628 /* in vm86, CPL is always 3 */
629 cpu_x86_set_cpl(env, 3);
630 } else {
631 /* CPL is set the RPL of CS */
632 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
633 /* first just selectors as the rest may trigger exceptions */
634 for(i = 0; i < 6; i++)
635 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
636 }
637
638 env->ldt.selector = new_ldt & ~4;
639 env->ldt.base = 0;
640 env->ldt.limit = 0;
641 env->ldt.flags = 0;
642#ifdef VBOX
643 env->ldt.flags = DESC_INTEL_UNUSABLE;
644 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
645 env->ldt.newselector = 0;
646#endif
647
648 /* load the LDT */
649 if (new_ldt & 4)
650 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
651
652 if ((new_ldt & 0xfffc) != 0) {
653 dt = &env->gdt;
654 index = new_ldt & ~7;
655 if ((index + 7) > dt->limit)
656 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
657 ptr = dt->base + index;
658 e1 = ldl_kernel(ptr);
659 e2 = ldl_kernel(ptr + 4);
660 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
661 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
662 if (!(e2 & DESC_P_MASK))
663 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
664 load_seg_cache_raw_dt(&env->ldt, e1, e2);
665 }
666
667 /* load the segments */
668 if (!(new_eflags & VM_MASK)) {
669 tss_load_seg(R_CS, new_segs[R_CS]);
670 tss_load_seg(R_SS, new_segs[R_SS]);
671 tss_load_seg(R_ES, new_segs[R_ES]);
672 tss_load_seg(R_DS, new_segs[R_DS]);
673 tss_load_seg(R_FS, new_segs[R_FS]);
674 tss_load_seg(R_GS, new_segs[R_GS]);
675 }
676
677 /* check that EIP is in the CS segment limits */
678 if (new_eip > env->segs[R_CS].limit) {
679 /* XXX: different exception if CALL ? */
680 raise_exception_err(EXCP0D_GPF, 0);
681 }
682
683#ifndef CONFIG_USER_ONLY
684 /* reset local breakpoints */
685 if (env->dr[7] & 0x55) {
686 for (i = 0; i < 4; i++) {
687 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
688 hw_breakpoint_remove(env, i);
689 }
690 env->dr[7] &= ~0x55;
691 }
692#endif
693}
694
695/* check if Port I/O is allowed in TSS */
696static inline void check_io(int addr, int size)
697{
698#ifndef VBOX
699 int io_offset, val, mask;
700#else
701 int val, mask;
702 unsigned int io_offset;
703#endif /* VBOX */
704
705 /* TSS must be a valid 32 bit one */
706 if (!(env->tr.flags & DESC_P_MASK) ||
707 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 11 ||
708 env->tr.limit < 103)
709 goto fail;
710 io_offset = lduw_kernel(env->tr.base + 0x66);
711 io_offset += (addr >> 3);
712 /* Note: the check needs two bytes */
713 if ((io_offset + 1) > env->tr.limit)
714 goto fail;
715 val = lduw_kernel(env->tr.base + io_offset);
716 val >>= (addr & 7);
717 mask = (1 << size) - 1;
718 /* all bits must be zero to allow the I/O */
719 if ((val & mask) != 0) {
720 fail:
721 raise_exception_err(EXCP0D_GPF, 0);
722 }
723}
724
725#ifdef VBOX
726
727/* Keep in sync with gen_check_external_event() */
728void helper_check_external_event()
729{
730 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
731 | CPU_INTERRUPT_EXTERNAL_EXIT
732 | CPU_INTERRUPT_EXTERNAL_TIMER
733 | CPU_INTERRUPT_EXTERNAL_DMA))
734 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
735 && (env->eflags & IF_MASK)
736 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
737 {
738 helper_external_event();
739 }
740
741}
742
743void helper_sync_seg(uint32_t reg)
744{
745 if (env->segs[reg].newselector)
746 sync_seg(env, reg, env->segs[reg].newselector);
747}
748
749#endif /* VBOX */
750
751void helper_check_iob(uint32_t t0)
752{
753 check_io(t0, 1);
754}
755
756void helper_check_iow(uint32_t t0)
757{
758 check_io(t0, 2);
759}
760
761void helper_check_iol(uint32_t t0)
762{
763 check_io(t0, 4);
764}
765
766void helper_outb(uint32_t port, uint32_t data)
767{
768#ifndef VBOX
769 cpu_outb(port, data & 0xff);
770#else
771 cpu_outb(env, port, data & 0xff);
772#endif
773}
774
775target_ulong helper_inb(uint32_t port)
776{
777#ifndef VBOX
778 return cpu_inb(port);
779#else
780 return cpu_inb(env, port);
781#endif
782}
783
784void helper_outw(uint32_t port, uint32_t data)
785{
786#ifndef VBOX
787 cpu_outw(port, data & 0xffff);
788#else
789 cpu_outw(env, port, data & 0xffff);
790#endif
791}
792
793target_ulong helper_inw(uint32_t port)
794{
795#ifndef VBOX
796 return cpu_inw(port);
797#else
798 return cpu_inw(env, port);
799#endif
800}
801
802void helper_outl(uint32_t port, uint32_t data)
803{
804#ifndef VBOX
805 cpu_outl(port, data);
806#else
807 cpu_outl(env, port, data);
808#endif
809}
810
811target_ulong helper_inl(uint32_t port)
812{
813#ifndef VBOX
814 return cpu_inl(port);
815#else
816 return cpu_inl(env, port);
817#endif
818}
819
820static inline unsigned int get_sp_mask(unsigned int e2)
821{
822 if (e2 & DESC_B_MASK)
823 return 0xffffffff;
824 else
825 return 0xffff;
826}
827
828static int exeption_has_error_code(int intno)
829{
830 switch(intno) {
831 case 8:
832 case 10:
833 case 11:
834 case 12:
835 case 13:
836 case 14:
837 case 17:
838 return 1;
839 }
840 return 0;
841}
842
843#ifdef TARGET_X86_64
844#define SET_ESP(val, sp_mask)\
845do {\
846 if ((sp_mask) == 0xffff)\
847 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
848 else if ((sp_mask) == 0xffffffffLL)\
849 ESP = (uint32_t)(val);\
850 else\
851 ESP = (val);\
852} while (0)
853#else
854#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
855#endif
856
857/* in 64-bit machines, this can overflow. So this segment addition macro
858 * can be used to trim the value to 32-bit whenever needed */
859#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
860
861/* XXX: add a is_user flag to have proper security support */
862#define PUSHW(ssp, sp, sp_mask, val)\
863{\
864 sp -= 2;\
865 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
866}
867
868#define PUSHL(ssp, sp, sp_mask, val)\
869{\
870 sp -= 4;\
871 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
872}
873
874#define POPW(ssp, sp, sp_mask, val)\
875{\
876 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
877 sp += 2;\
878}
879
880#define POPL(ssp, sp, sp_mask, val)\
881{\
882 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
883 sp += 4;\
884}
885
886/* protected mode interrupt */
887static void do_interrupt_protected(int intno, int is_int, int error_code,
888 unsigned int next_eip, int is_hw)
889{
890 SegmentCache *dt;
891 target_ulong ptr, ssp;
892 int type, dpl, selector, ss_dpl, cpl;
893 int has_error_code, new_stack, shift;
894 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
895 uint32_t old_eip, sp_mask;
896
897#ifdef VBOX
898 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
899 cpu_loop_exit();
900#endif
901
902 has_error_code = 0;
903 if (!is_int && !is_hw)
904 has_error_code = exeption_has_error_code(intno);
905 if (is_int)
906 old_eip = next_eip;
907 else
908 old_eip = env->eip;
909
910 dt = &env->idt;
911#ifndef VBOX
912 if (intno * 8 + 7 > dt->limit)
913#else
914 if ((unsigned)intno * 8 + 7 > dt->limit)
915#endif
916 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
917 ptr = dt->base + intno * 8;
918 e1 = ldl_kernel(ptr);
919 e2 = ldl_kernel(ptr + 4);
920 /* check gate type */
921 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
922 switch(type) {
923 case 5: /* task gate */
924#ifdef VBOX
925 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
926 cpl = env->hflags & HF_CPL_MASK;
927 /* check privilege if software int */
928 if (is_int && dpl < cpl)
929 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
930#endif
931 /* must do that check here to return the correct error code */
932 if (!(e2 & DESC_P_MASK))
933 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
934 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
935 if (has_error_code) {
936 int type;
937 uint32_t mask;
938 /* push the error code */
939 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
940 shift = type >> 3;
941 if (env->segs[R_SS].flags & DESC_B_MASK)
942 mask = 0xffffffff;
943 else
944 mask = 0xffff;
945 esp = (ESP - (2 << shift)) & mask;
946 ssp = env->segs[R_SS].base + esp;
947 if (shift)
948 stl_kernel(ssp, error_code);
949 else
950 stw_kernel(ssp, error_code);
951 SET_ESP(esp, mask);
952 }
953 return;
954 case 6: /* 286 interrupt gate */
955 case 7: /* 286 trap gate */
956 case 14: /* 386 interrupt gate */
957 case 15: /* 386 trap gate */
958 break;
959 default:
960 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
961 break;
962 }
963 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
964 cpl = env->hflags & HF_CPL_MASK;
965 /* check privilege if software int */
966 if (is_int && dpl < cpl)
967 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
968 /* check valid bit */
969 if (!(e2 & DESC_P_MASK))
970 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
971 selector = e1 >> 16;
972 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
973 if ((selector & 0xfffc) == 0)
974 raise_exception_err(EXCP0D_GPF, 0);
975
976 if (load_segment(&e1, &e2, selector) != 0)
977 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
978#ifdef VBOX /** @todo figure out when this is done one day... */
979 if (!(e2 & DESC_A_MASK))
980 e2 = set_segment_accessed(selector, e2);
981#endif
982 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
983 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
984 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
985 if (dpl > cpl)
986 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
987 if (!(e2 & DESC_P_MASK))
988 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
989 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
990 /* to inner privilege */
991 get_ss_esp_from_tss(&ss, &esp, dpl);
992 if ((ss & 0xfffc) == 0)
993 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
994 if ((ss & 3) != dpl)
995 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
996 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
997 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
998#ifdef VBOX /** @todo figure out when this is done one day... */
999 if (!(ss_e2 & DESC_A_MASK))
1000 ss_e2 = set_segment_accessed(ss, ss_e2);
1001#endif
1002 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1003 if (ss_dpl != dpl)
1004 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1005 if (!(ss_e2 & DESC_S_MASK) ||
1006 (ss_e2 & DESC_CS_MASK) ||
1007 !(ss_e2 & DESC_W_MASK))
1008 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1009 if (!(ss_e2 & DESC_P_MASK))
1010#ifdef VBOX /* See page 3-477 of 253666.pdf */
1011 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
1012#else
1013 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1014#endif
1015 new_stack = 1;
1016 sp_mask = get_sp_mask(ss_e2);
1017 ssp = get_seg_base(ss_e1, ss_e2);
1018#if defined(VBOX) && defined(DEBUG)
1019 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
1020#endif
1021 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1022 /* to same privilege */
1023 if (env->eflags & VM_MASK)
1024 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1025 new_stack = 0;
1026 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1027 ssp = env->segs[R_SS].base;
1028 esp = ESP;
1029 dpl = cpl;
1030 } else {
1031 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1032 new_stack = 0; /* avoid warning */
1033 sp_mask = 0; /* avoid warning */
1034 ssp = 0; /* avoid warning */
1035 esp = 0; /* avoid warning */
1036 }
1037
1038 shift = type >> 3;
1039
1040#if 0
1041 /* XXX: check that enough room is available */
1042 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1043 if (env->eflags & VM_MASK)
1044 push_size += 8;
1045 push_size <<= shift;
1046#endif
1047 if (shift == 1) {
1048 if (new_stack) {
1049 if (env->eflags & VM_MASK) {
1050 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1051 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1052 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1053 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1054 }
1055 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1056 PUSHL(ssp, esp, sp_mask, ESP);
1057 }
1058 PUSHL(ssp, esp, sp_mask, compute_eflags());
1059 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1060 PUSHL(ssp, esp, sp_mask, old_eip);
1061 if (has_error_code) {
1062 PUSHL(ssp, esp, sp_mask, error_code);
1063 }
1064 } else {
1065 if (new_stack) {
1066 if (env->eflags & VM_MASK) {
1067 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1068 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1069 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1070 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1071 }
1072 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1073 PUSHW(ssp, esp, sp_mask, ESP);
1074 }
1075 PUSHW(ssp, esp, sp_mask, compute_eflags());
1076 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1077 PUSHW(ssp, esp, sp_mask, old_eip);
1078 if (has_error_code) {
1079 PUSHW(ssp, esp, sp_mask, error_code);
1080 }
1081 }
1082
1083 if (new_stack) {
1084 if (env->eflags & VM_MASK) {
1085 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1086 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1087 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1088 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1089 }
1090 ss = (ss & ~3) | dpl;
1091 cpu_x86_load_seg_cache(env, R_SS, ss,
1092 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1093 }
1094 SET_ESP(esp, sp_mask);
1095
1096 selector = (selector & ~3) | dpl;
1097 cpu_x86_load_seg_cache(env, R_CS, selector,
1098 get_seg_base(e1, e2),
1099 get_seg_limit(e1, e2),
1100 e2);
1101 cpu_x86_set_cpl(env, dpl);
1102 env->eip = offset;
1103
1104 /* interrupt gate clear IF mask */
1105 if ((type & 1) == 0) {
1106 env->eflags &= ~IF_MASK;
1107 }
1108#ifndef VBOX
1109 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1110#else
1111 /*
1112 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1113 * gets confused by seemingly changed EFLAGS. See #3491 and
1114 * public bug #2341.
1115 */
1116 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1117#endif
1118}
1119
1120#ifdef VBOX
1121
1122/* check if VME interrupt redirection is enabled in TSS */
1123DECLINLINE(bool) is_vme_irq_redirected(int intno)
1124{
1125 unsigned int io_offset, intredir_offset;
1126 unsigned char val, mask;
1127
1128 /* TSS must be a valid 32 bit one */
1129 if (!(env->tr.flags & DESC_P_MASK) ||
1130 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 11 ||
1131 env->tr.limit < 103)
1132 goto fail;
1133 io_offset = lduw_kernel(env->tr.base + 0x66);
1134 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1135 if (io_offset < 0x68 + 0x20)
1136 io_offset = 0x68 + 0x20;
1137 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1138 intredir_offset = io_offset - 0x20;
1139
1140 intredir_offset += (intno >> 3);
1141 if ((intredir_offset) > env->tr.limit)
1142 goto fail;
1143
1144 val = ldub_kernel(env->tr.base + intredir_offset);
1145 mask = 1 << (unsigned char)(intno & 7);
1146
1147 /* bit set means no redirection. */
1148 if ((val & mask) != 0) {
1149 return false;
1150 }
1151 return true;
1152
1153fail:
1154 raise_exception_err(EXCP0D_GPF, 0);
1155 return true;
1156}
1157
1158/* V86 mode software interrupt with CR4.VME=1 */
1159static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1160{
1161 target_ulong ptr, ssp;
1162 int selector;
1163 uint32_t offset, esp;
1164 uint32_t old_cs, old_eflags;
1165 uint32_t iopl;
1166
1167 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1168
1169 if (!is_vme_irq_redirected(intno))
1170 {
1171 if (iopl == 3)
1172 {
1173 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1174 return;
1175 }
1176 else
1177 raise_exception_err(EXCP0D_GPF, 0);
1178 }
1179
1180 /* virtual mode idt is at linear address 0 */
1181 ptr = 0 + intno * 4;
1182 offset = lduw_kernel(ptr);
1183 selector = lduw_kernel(ptr + 2);
1184 esp = ESP;
1185 ssp = env->segs[R_SS].base;
1186 old_cs = env->segs[R_CS].selector;
1187
1188 old_eflags = compute_eflags();
1189 if (iopl < 3)
1190 {
1191 /* copy VIF into IF and set IOPL to 3 */
1192 if (env->eflags & VIF_MASK)
1193 old_eflags |= IF_MASK;
1194 else
1195 old_eflags &= ~IF_MASK;
1196
1197 old_eflags |= (3 << IOPL_SHIFT);
1198 }
1199
1200 /* XXX: use SS segment size ? */
1201 PUSHW(ssp, esp, 0xffff, old_eflags);
1202 PUSHW(ssp, esp, 0xffff, old_cs);
1203 PUSHW(ssp, esp, 0xffff, next_eip);
1204
1205 /* update processor state */
1206 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1207 env->eip = offset;
1208 env->segs[R_CS].selector = selector;
1209 env->segs[R_CS].base = (selector << 4);
1210 env->eflags &= ~(TF_MASK | RF_MASK);
1211
1212 if (iopl < 3)
1213 env->eflags &= ~VIF_MASK;
1214 else
1215 env->eflags &= ~IF_MASK;
1216}
1217
1218#endif /* VBOX */
1219
1220#ifdef TARGET_X86_64
1221
1222#define PUSHQ(sp, val)\
1223{\
1224 sp -= 8;\
1225 stq_kernel(sp, (val));\
1226}
1227
1228#define POPQ(sp, val)\
1229{\
1230 val = ldq_kernel(sp);\
1231 sp += 8;\
1232}
1233
1234static inline target_ulong get_rsp_from_tss(int level)
1235{
1236 int index;
1237
1238#if 0
1239 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1240 env->tr.base, env->tr.limit);
1241#endif
1242
1243 if (!(env->tr.flags & DESC_P_MASK))
1244 cpu_abort(env, "invalid tss");
1245 index = 8 * level + 4;
1246 if ((index + 7) > env->tr.limit)
1247 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1248 return ldq_kernel(env->tr.base + index);
1249}
1250
1251/* 64 bit interrupt */
1252static void do_interrupt64(int intno, int is_int, int error_code,
1253 target_ulong next_eip, int is_hw)
1254{
1255 SegmentCache *dt;
1256 target_ulong ptr;
1257 int type, dpl, selector, cpl, ist;
1258 int has_error_code, new_stack;
1259 uint32_t e1, e2, e3, ss;
1260 target_ulong old_eip, esp, offset;
1261
1262#ifdef VBOX
1263 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1264 cpu_loop_exit();
1265#endif
1266
1267 has_error_code = 0;
1268 if (!is_int && !is_hw)
1269 has_error_code = exeption_has_error_code(intno);
1270 if (is_int)
1271 old_eip = next_eip;
1272 else
1273 old_eip = env->eip;
1274
1275 dt = &env->idt;
1276 if (intno * 16 + 15 > dt->limit)
1277 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1278 ptr = dt->base + intno * 16;
1279 e1 = ldl_kernel(ptr);
1280 e2 = ldl_kernel(ptr + 4);
1281 e3 = ldl_kernel(ptr + 8);
1282 /* check gate type */
1283 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1284 switch(type) {
1285 case 14: /* 386 interrupt gate */
1286 case 15: /* 386 trap gate */
1287 break;
1288 default:
1289 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1290 break;
1291 }
1292 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1293 cpl = env->hflags & HF_CPL_MASK;
1294 /* check privilege if software int */
1295 if (is_int && dpl < cpl)
1296 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1297 /* check valid bit */
1298 if (!(e2 & DESC_P_MASK))
1299 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1300 selector = e1 >> 16;
1301 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1302 ist = e2 & 7;
1303 if ((selector & 0xfffc) == 0)
1304 raise_exception_err(EXCP0D_GPF, 0);
1305
1306 if (load_segment(&e1, &e2, selector) != 0)
1307 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1308 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1309 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1310 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1311 if (dpl > cpl)
1312 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1313 if (!(e2 & DESC_P_MASK))
1314 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1315 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1316 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1317 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1318 /* to inner privilege */
1319 if (ist != 0)
1320 esp = get_rsp_from_tss(ist + 3);
1321 else
1322 esp = get_rsp_from_tss(dpl);
1323 esp &= ~0xfLL; /* align stack */
1324 ss = 0;
1325 new_stack = 1;
1326 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1327 /* to same privilege */
1328 if (env->eflags & VM_MASK)
1329 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1330 new_stack = 0;
1331 if (ist != 0)
1332 esp = get_rsp_from_tss(ist + 3);
1333 else
1334 esp = ESP;
1335 esp &= ~0xfLL; /* align stack */
1336 dpl = cpl;
1337 } else {
1338 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1339 new_stack = 0; /* avoid warning */
1340 esp = 0; /* avoid warning */
1341 }
1342
1343 PUSHQ(esp, env->segs[R_SS].selector);
1344 PUSHQ(esp, ESP);
1345 PUSHQ(esp, compute_eflags());
1346 PUSHQ(esp, env->segs[R_CS].selector);
1347 PUSHQ(esp, old_eip);
1348 if (has_error_code) {
1349 PUSHQ(esp, error_code);
1350 }
1351
1352 if (new_stack) {
1353 ss = 0 | dpl;
1354#ifndef VBOX
1355 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1356#else
1357 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1358#endif
1359 }
1360 ESP = esp;
1361
1362 selector = (selector & ~3) | dpl;
1363 cpu_x86_load_seg_cache(env, R_CS, selector,
1364 get_seg_base(e1, e2),
1365 get_seg_limit(e1, e2),
1366 e2);
1367 cpu_x86_set_cpl(env, dpl);
1368 env->eip = offset;
1369
1370 /* interrupt gate clear IF mask */
1371 if ((type & 1) == 0) {
1372 env->eflags &= ~IF_MASK;
1373 }
1374#ifndef VBOX
1375 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1376#else /* VBOX */
1377 /*
1378 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1379 * gets confused by seemingly changed EFLAGS. See #3491 and
1380 * public bug #2341.
1381 */
1382 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1383#endif /* VBOX */
1384}
1385#endif
1386
1387#ifdef TARGET_X86_64
1388#if defined(CONFIG_USER_ONLY)
1389void helper_syscall(int next_eip_addend)
1390{
1391 env->exception_index = EXCP_SYSCALL;
1392 env->exception_next_eip = env->eip + next_eip_addend;
1393 cpu_loop_exit();
1394}
1395#else
1396void helper_syscall(int next_eip_addend)
1397{
1398 int selector;
1399
1400 if (!(env->efer & MSR_EFER_SCE)) {
1401 raise_exception_err(EXCP06_ILLOP, 0);
1402 }
1403 selector = (env->star >> 32) & 0xffff;
1404 if (env->hflags & HF_LMA_MASK) {
1405 int code64;
1406
1407 ECX = env->eip + next_eip_addend;
1408 env->regs[11] = compute_eflags();
1409
1410 code64 = env->hflags & HF_CS64_MASK;
1411
1412 cpu_x86_set_cpl(env, 0);
1413 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1414 0, 0xffffffff,
1415 DESC_G_MASK | DESC_P_MASK |
1416 DESC_S_MASK |
1417 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1418 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1419 0, 0xffffffff,
1420 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1421 DESC_S_MASK |
1422 DESC_W_MASK | DESC_A_MASK);
1423 env->eflags &= ~env->fmask;
1424 load_eflags(env->eflags, 0);
1425 if (code64)
1426 env->eip = env->lstar;
1427 else
1428 env->eip = env->cstar;
1429 } else {
1430 ECX = (uint32_t)(env->eip + next_eip_addend);
1431
1432 cpu_x86_set_cpl(env, 0);
1433 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1434 0, 0xffffffff,
1435 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1436 DESC_S_MASK |
1437 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1438 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1439 0, 0xffffffff,
1440 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1441 DESC_S_MASK |
1442 DESC_W_MASK | DESC_A_MASK);
1443 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1444 env->eip = (uint32_t)env->star;
1445 }
1446}
1447#endif
1448#endif
1449
1450#ifdef TARGET_X86_64
1451void helper_sysret(int dflag)
1452{
1453 int cpl, selector;
1454
1455 if (!(env->efer & MSR_EFER_SCE)) {
1456 raise_exception_err(EXCP06_ILLOP, 0);
1457 }
1458 cpl = env->hflags & HF_CPL_MASK;
1459 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1460 raise_exception_err(EXCP0D_GPF, 0);
1461 }
1462 selector = (env->star >> 48) & 0xffff;
1463 if (env->hflags & HF_LMA_MASK) {
1464 if (dflag == 2) {
1465 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1466 0, 0xffffffff,
1467 DESC_G_MASK | DESC_P_MASK |
1468 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1469 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1470 DESC_L_MASK);
1471 env->eip = ECX;
1472 } else {
1473 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1474 0, 0xffffffff,
1475 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1476 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1477 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1478 env->eip = (uint32_t)ECX;
1479 }
1480 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1481 0, 0xffffffff,
1482 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1483 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1484 DESC_W_MASK | DESC_A_MASK);
1485 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1486 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1487 cpu_x86_set_cpl(env, 3);
1488 } else {
1489 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1490 0, 0xffffffff,
1491 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1492 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1493 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1494 env->eip = (uint32_t)ECX;
1495 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1496 0, 0xffffffff,
1497 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1498 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1499 DESC_W_MASK | DESC_A_MASK);
1500 env->eflags |= IF_MASK;
1501 cpu_x86_set_cpl(env, 3);
1502 }
1503}
1504#endif
1505
1506#ifdef VBOX
1507
1508/**
1509 * Checks and processes external VMM events.
1510 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1511 */
1512void helper_external_event(void)
1513{
1514# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1515 uintptr_t uSP;
1516# ifdef RT_ARCH_AMD64
1517 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1518# else
1519 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1520# endif
1521 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1522# endif
1523 /* Keep in sync with flags checked by gen_check_external_event() */
1524 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1525 {
1526 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1527 ~CPU_INTERRUPT_EXTERNAL_HARD);
1528 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1529 }
1530 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1531 {
1532 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1533 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1534 cpu_exit(env);
1535 }
1536 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1537 {
1538 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1539 ~CPU_INTERRUPT_EXTERNAL_DMA);
1540 remR3DmaRun(env);
1541 }
1542 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1543 {
1544 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1545 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1546 remR3TimersRun(env);
1547 }
1548 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1549 {
1550 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1551 ~CPU_INTERRUPT_EXTERNAL_HARD);
1552 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1553 }
1554}
1555
1556/* helper for recording call instruction addresses for later scanning */
1557void helper_record_call()
1558{
1559 if ( !(env->state & CPU_RAW_RING0)
1560 && (env->cr[0] & CR0_PG_MASK)
1561 && !(env->eflags & X86_EFL_IF))
1562 remR3RecordCall(env);
1563}
1564
1565#endif /* VBOX */
1566
1567/* real mode interrupt */
1568static void do_interrupt_real(int intno, int is_int, int error_code,
1569 unsigned int next_eip)
1570{
1571 SegmentCache *dt;
1572 target_ulong ptr, ssp;
1573 int selector;
1574 uint32_t offset, esp;
1575 uint32_t old_cs, old_eip;
1576
1577 /* real mode (simpler !) */
1578 dt = &env->idt;
1579#ifndef VBOX
1580 if (intno * 4 + 3 > dt->limit)
1581#else
1582 if ((unsigned)intno * 4 + 3 > dt->limit)
1583#endif
1584 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1585 ptr = dt->base + intno * 4;
1586 offset = lduw_kernel(ptr);
1587 selector = lduw_kernel(ptr + 2);
1588 esp = ESP;
1589 ssp = env->segs[R_SS].base;
1590 if (is_int)
1591 old_eip = next_eip;
1592 else
1593 old_eip = env->eip;
1594 old_cs = env->segs[R_CS].selector;
1595 /* XXX: use SS segment size ? */
1596 PUSHW(ssp, esp, 0xffff, compute_eflags());
1597 PUSHW(ssp, esp, 0xffff, old_cs);
1598 PUSHW(ssp, esp, 0xffff, old_eip);
1599
1600 /* update processor state */
1601 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1602 env->eip = offset;
1603 env->segs[R_CS].selector = selector;
1604 env->segs[R_CS].base = (selector << 4);
1605 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1606}
1607
1608/* fake user mode interrupt */
1609void do_interrupt_user(int intno, int is_int, int error_code,
1610 target_ulong next_eip)
1611{
1612 SegmentCache *dt;
1613 target_ulong ptr;
1614 int dpl, cpl, shift;
1615 uint32_t e2;
1616
1617 dt = &env->idt;
1618 if (env->hflags & HF_LMA_MASK) {
1619 shift = 4;
1620 } else {
1621 shift = 3;
1622 }
1623 ptr = dt->base + (intno << shift);
1624 e2 = ldl_kernel(ptr + 4);
1625
1626 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1627 cpl = env->hflags & HF_CPL_MASK;
1628 /* check privilege if software int */
1629 if (is_int && dpl < cpl)
1630 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1631
1632 /* Since we emulate only user space, we cannot do more than
1633 exiting the emulation with the suitable exception and error
1634 code */
1635 if (is_int)
1636 EIP = next_eip;
1637}
1638
1639#if !defined(CONFIG_USER_ONLY)
1640static void handle_even_inj(int intno, int is_int, int error_code,
1641 int is_hw, int rm)
1642{
1643 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1644 if (!(event_inj & SVM_EVTINJ_VALID)) {
1645 int type;
1646 if (is_int)
1647 type = SVM_EVTINJ_TYPE_SOFT;
1648 else
1649 type = SVM_EVTINJ_TYPE_EXEPT;
1650 event_inj = intno | type | SVM_EVTINJ_VALID;
1651 if (!rm && exeption_has_error_code(intno)) {
1652 event_inj |= SVM_EVTINJ_VALID_ERR;
1653 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1654 }
1655 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1656 }
1657}
1658#endif
1659
1660/*
1661 * Begin execution of an interruption. is_int is TRUE if coming from
1662 * the int instruction. next_eip is the EIP value AFTER the interrupt
1663 * instruction. It is only relevant if is_int is TRUE.
1664 */
1665void do_interrupt(int intno, int is_int, int error_code,
1666 target_ulong next_eip, int is_hw)
1667{
1668 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1669 if ((env->cr[0] & CR0_PE_MASK)) {
1670 static int count;
1671 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1672 count, intno, error_code, is_int,
1673 env->hflags & HF_CPL_MASK,
1674 env->segs[R_CS].selector, EIP,
1675 (int)env->segs[R_CS].base + EIP,
1676 env->segs[R_SS].selector, ESP);
1677 if (intno == 0x0e) {
1678 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1679 } else {
1680 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1681 }
1682 qemu_log("\n");
1683 log_cpu_state(env, X86_DUMP_CCOP);
1684#if 0
1685 {
1686 int i;
1687 uint8_t *ptr;
1688 qemu_log(" code=");
1689 ptr = env->segs[R_CS].base + env->eip;
1690 for(i = 0; i < 16; i++) {
1691 qemu_log(" %02x", ldub(ptr + i));
1692 }
1693 qemu_log("\n");
1694 }
1695#endif
1696 count++;
1697 }
1698 }
1699#ifdef VBOX
1700 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1701 if (is_int) {
1702 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1703 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1704 } else {
1705 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1706 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1707 }
1708 }
1709#endif
1710 if (env->cr[0] & CR0_PE_MASK) {
1711#if !defined(CONFIG_USER_ONLY)
1712 if (env->hflags & HF_SVMI_MASK)
1713 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1714#endif
1715#ifdef TARGET_X86_64
1716 if (env->hflags & HF_LMA_MASK) {
1717 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1718 } else
1719#endif
1720 {
1721#ifdef VBOX
1722 /* int xx *, v86 code and VME enabled? */
1723 if ( (env->eflags & VM_MASK)
1724 && (env->cr[4] & CR4_VME_MASK)
1725 && is_int
1726 && !is_hw
1727 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1728 )
1729 do_soft_interrupt_vme(intno, error_code, next_eip);
1730 else
1731#endif /* VBOX */
1732 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1733 }
1734 } else {
1735#if !defined(CONFIG_USER_ONLY)
1736 if (env->hflags & HF_SVMI_MASK)
1737 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1738#endif
1739 do_interrupt_real(intno, is_int, error_code, next_eip);
1740 }
1741
1742#if !defined(CONFIG_USER_ONLY)
1743 if (env->hflags & HF_SVMI_MASK) {
1744 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1745 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1746 }
1747#endif
1748}
1749
1750/* This should come from sysemu.h - if we could include it here... */
1751void qemu_system_reset_request(void);
1752
1753/*
1754 * Check nested exceptions and change to double or triple fault if
1755 * needed. It should only be called, if this is not an interrupt.
1756 * Returns the new exception number.
1757 */
1758static int check_exception(int intno, int *error_code)
1759{
1760 int first_contributory = env->old_exception == 0 ||
1761 (env->old_exception >= 10 &&
1762 env->old_exception <= 13);
1763 int second_contributory = intno == 0 ||
1764 (intno >= 10 && intno <= 13);
1765
1766 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1767 env->old_exception, intno);
1768
1769#if !defined(CONFIG_USER_ONLY)
1770 if (env->old_exception == EXCP08_DBLE) {
1771 if (env->hflags & HF_SVMI_MASK)
1772 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1773
1774 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1775
1776# ifndef VBOX
1777 qemu_system_reset_request();
1778 return EXCP_HLT;
1779# else
1780 remR3RaiseRC(env->pVM, VINF_EM_TRIPLE_FAULT);
1781 return EXCP_RC;
1782# endif
1783 }
1784#endif
1785
1786 if ((first_contributory && second_contributory)
1787 || (env->old_exception == EXCP0E_PAGE &&
1788 (second_contributory || (intno == EXCP0E_PAGE)))) {
1789 intno = EXCP08_DBLE;
1790 *error_code = 0;
1791 }
1792
1793 if (second_contributory || (intno == EXCP0E_PAGE) ||
1794 (intno == EXCP08_DBLE))
1795 env->old_exception = intno;
1796
1797 return intno;
1798}
1799
1800/*
1801 * Signal an interruption. It is executed in the main CPU loop.
1802 * is_int is TRUE if coming from the int instruction. next_eip is the
1803 * EIP value AFTER the interrupt instruction. It is only relevant if
1804 * is_int is TRUE.
1805 */
1806static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1807 int next_eip_addend)
1808{
1809#if defined(VBOX) && defined(DEBUG)
1810 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1811#endif
1812 if (!is_int) {
1813 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1814 intno = check_exception(intno, &error_code);
1815 } else {
1816 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1817 }
1818
1819 env->exception_index = intno;
1820 env->error_code = error_code;
1821 env->exception_is_int = is_int;
1822 env->exception_next_eip = env->eip + next_eip_addend;
1823 cpu_loop_exit();
1824}
1825
1826/* shortcuts to generate exceptions */
1827
1828void raise_exception_err(int exception_index, int error_code)
1829{
1830 raise_interrupt(exception_index, 0, error_code, 0);
1831}
1832
1833void raise_exception(int exception_index)
1834{
1835 raise_interrupt(exception_index, 0, 0, 0);
1836}
1837
1838void raise_exception_env(int exception_index, CPUState *nenv)
1839{
1840 env = nenv;
1841 raise_exception(exception_index);
1842}
1843/* SMM support */
1844
1845#if defined(CONFIG_USER_ONLY)
1846
1847void do_smm_enter(void)
1848{
1849}
1850
1851void helper_rsm(void)
1852{
1853}
1854
1855#else
1856
1857#ifdef TARGET_X86_64
1858#define SMM_REVISION_ID 0x00020064
1859#else
1860#define SMM_REVISION_ID 0x00020000
1861#endif
1862
1863void do_smm_enter(void)
1864{
1865 target_ulong sm_state;
1866 SegmentCache *dt;
1867 int i, offset;
1868
1869 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1870 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1871
1872 env->hflags |= HF_SMM_MASK;
1873 cpu_smm_update(env);
1874
1875 sm_state = env->smbase + 0x8000;
1876
1877#ifdef TARGET_X86_64
1878 for(i = 0; i < 6; i++) {
1879 dt = &env->segs[i];
1880 offset = 0x7e00 + i * 16;
1881 stw_phys(sm_state + offset, dt->selector);
1882 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1883 stl_phys(sm_state + offset + 4, dt->limit);
1884 stq_phys(sm_state + offset + 8, dt->base);
1885 }
1886
1887 stq_phys(sm_state + 0x7e68, env->gdt.base);
1888 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1889
1890 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1891 stq_phys(sm_state + 0x7e78, env->ldt.base);
1892 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1893 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1894
1895 stq_phys(sm_state + 0x7e88, env->idt.base);
1896 stl_phys(sm_state + 0x7e84, env->idt.limit);
1897
1898 stw_phys(sm_state + 0x7e90, env->tr.selector);
1899 stq_phys(sm_state + 0x7e98, env->tr.base);
1900 stl_phys(sm_state + 0x7e94, env->tr.limit);
1901 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1902
1903 stq_phys(sm_state + 0x7ed0, env->efer);
1904
1905 stq_phys(sm_state + 0x7ff8, EAX);
1906 stq_phys(sm_state + 0x7ff0, ECX);
1907 stq_phys(sm_state + 0x7fe8, EDX);
1908 stq_phys(sm_state + 0x7fe0, EBX);
1909 stq_phys(sm_state + 0x7fd8, ESP);
1910 stq_phys(sm_state + 0x7fd0, EBP);
1911 stq_phys(sm_state + 0x7fc8, ESI);
1912 stq_phys(sm_state + 0x7fc0, EDI);
1913 for(i = 8; i < 16; i++)
1914 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1915 stq_phys(sm_state + 0x7f78, env->eip);
1916 stl_phys(sm_state + 0x7f70, compute_eflags());
1917 stl_phys(sm_state + 0x7f68, env->dr[6]);
1918 stl_phys(sm_state + 0x7f60, env->dr[7]);
1919
1920 stl_phys(sm_state + 0x7f48, env->cr[4]);
1921 stl_phys(sm_state + 0x7f50, env->cr[3]);
1922 stl_phys(sm_state + 0x7f58, env->cr[0]);
1923
1924 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1925 stl_phys(sm_state + 0x7f00, env->smbase);
1926#else
1927 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1928 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1929 stl_phys(sm_state + 0x7ff4, compute_eflags());
1930 stl_phys(sm_state + 0x7ff0, env->eip);
1931 stl_phys(sm_state + 0x7fec, EDI);
1932 stl_phys(sm_state + 0x7fe8, ESI);
1933 stl_phys(sm_state + 0x7fe4, EBP);
1934 stl_phys(sm_state + 0x7fe0, ESP);
1935 stl_phys(sm_state + 0x7fdc, EBX);
1936 stl_phys(sm_state + 0x7fd8, EDX);
1937 stl_phys(sm_state + 0x7fd4, ECX);
1938 stl_phys(sm_state + 0x7fd0, EAX);
1939 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1940 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1941
1942 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1943 stl_phys(sm_state + 0x7f64, env->tr.base);
1944 stl_phys(sm_state + 0x7f60, env->tr.limit);
1945 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1946
1947 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1948 stl_phys(sm_state + 0x7f80, env->ldt.base);
1949 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1950 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1951
1952 stl_phys(sm_state + 0x7f74, env->gdt.base);
1953 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1954
1955 stl_phys(sm_state + 0x7f58, env->idt.base);
1956 stl_phys(sm_state + 0x7f54, env->idt.limit);
1957
1958 for(i = 0; i < 6; i++) {
1959 dt = &env->segs[i];
1960 if (i < 3)
1961 offset = 0x7f84 + i * 12;
1962 else
1963 offset = 0x7f2c + (i - 3) * 12;
1964 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1965 stl_phys(sm_state + offset + 8, dt->base);
1966 stl_phys(sm_state + offset + 4, dt->limit);
1967 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1968 }
1969 stl_phys(sm_state + 0x7f14, env->cr[4]);
1970
1971 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1972 stl_phys(sm_state + 0x7ef8, env->smbase);
1973#endif
1974 /* init SMM cpu state */
1975
1976#ifdef TARGET_X86_64
1977 cpu_load_efer(env, 0);
1978#endif
1979 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1980 env->eip = 0x00008000;
1981 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1982 0xffffffff, 0);
1983 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1984 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1985 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1986 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1987 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1988
1989 cpu_x86_update_cr0(env,
1990 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1991 cpu_x86_update_cr4(env, 0);
1992 env->dr[7] = 0x00000400;
1993 CC_OP = CC_OP_EFLAGS;
1994}
1995
1996void helper_rsm(void)
1997{
1998#ifdef VBOX
1999 cpu_abort(env, "helper_rsm");
2000#else /* !VBOX */
2001 target_ulong sm_state;
2002 int i, offset;
2003 uint32_t val;
2004
2005 sm_state = env->smbase + 0x8000;
2006#ifdef TARGET_X86_64
2007 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
2008
2009 for(i = 0; i < 6; i++) {
2010 offset = 0x7e00 + i * 16;
2011 cpu_x86_load_seg_cache(env, i,
2012 lduw_phys(sm_state + offset),
2013 ldq_phys(sm_state + offset + 8),
2014 ldl_phys(sm_state + offset + 4),
2015 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
2016 }
2017
2018 env->gdt.base = ldq_phys(sm_state + 0x7e68);
2019 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
2020
2021 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
2022 env->ldt.base = ldq_phys(sm_state + 0x7e78);
2023 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
2024 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
2025#ifdef VBOX
2026 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2027 env->ldt.newselector = 0;
2028#endif
2029
2030 env->idt.base = ldq_phys(sm_state + 0x7e88);
2031 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2032
2033 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2034 env->tr.base = ldq_phys(sm_state + 0x7e98);
2035 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2036 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2037#ifdef VBOX
2038 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2039 env->tr.newselector = 0;
2040#endif
2041
2042 EAX = ldq_phys(sm_state + 0x7ff8);
2043 ECX = ldq_phys(sm_state + 0x7ff0);
2044 EDX = ldq_phys(sm_state + 0x7fe8);
2045 EBX = ldq_phys(sm_state + 0x7fe0);
2046 ESP = ldq_phys(sm_state + 0x7fd8);
2047 EBP = ldq_phys(sm_state + 0x7fd0);
2048 ESI = ldq_phys(sm_state + 0x7fc8);
2049 EDI = ldq_phys(sm_state + 0x7fc0);
2050 for(i = 8; i < 16; i++)
2051 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2052 env->eip = ldq_phys(sm_state + 0x7f78);
2053 load_eflags(ldl_phys(sm_state + 0x7f70),
2054 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2055 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2056 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2057
2058 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2059 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2060 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2061
2062 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2063 if (val & 0x20000) {
2064 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2065 }
2066#else
2067 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2068 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2069 load_eflags(ldl_phys(sm_state + 0x7ff4),
2070 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2071 env->eip = ldl_phys(sm_state + 0x7ff0);
2072 EDI = ldl_phys(sm_state + 0x7fec);
2073 ESI = ldl_phys(sm_state + 0x7fe8);
2074 EBP = ldl_phys(sm_state + 0x7fe4);
2075 ESP = ldl_phys(sm_state + 0x7fe0);
2076 EBX = ldl_phys(sm_state + 0x7fdc);
2077 EDX = ldl_phys(sm_state + 0x7fd8);
2078 ECX = ldl_phys(sm_state + 0x7fd4);
2079 EAX = ldl_phys(sm_state + 0x7fd0);
2080 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2081 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2082
2083 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2084 env->tr.base = ldl_phys(sm_state + 0x7f64);
2085 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2086 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2087#ifdef VBOX
2088 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2089 env->tr.newselector = 0;
2090#endif
2091
2092 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2093 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2094 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2095 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2096#ifdef VBOX
2097 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2098 env->ldt.newselector = 0;
2099#endif
2100
2101 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2102 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2103
2104 env->idt.base = ldl_phys(sm_state + 0x7f58);
2105 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2106
2107 for(i = 0; i < 6; i++) {
2108 if (i < 3)
2109 offset = 0x7f84 + i * 12;
2110 else
2111 offset = 0x7f2c + (i - 3) * 12;
2112 cpu_x86_load_seg_cache(env, i,
2113 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2114 ldl_phys(sm_state + offset + 8),
2115 ldl_phys(sm_state + offset + 4),
2116 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2117 }
2118 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2119
2120 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2121 if (val & 0x20000) {
2122 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2123 }
2124#endif
2125 CC_OP = CC_OP_EFLAGS;
2126 env->hflags &= ~HF_SMM_MASK;
2127 cpu_smm_update(env);
2128
2129 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2130 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2131#endif /* !VBOX */
2132}
2133
2134#endif /* !CONFIG_USER_ONLY */
2135
2136
2137/* division, flags are undefined */
2138
2139void helper_divb_AL(target_ulong t0)
2140{
2141 unsigned int num, den, q, r;
2142
2143 num = (EAX & 0xffff);
2144 den = (t0 & 0xff);
2145 if (den == 0) {
2146 raise_exception(EXCP00_DIVZ);
2147 }
2148 q = (num / den);
2149 if (q > 0xff)
2150 raise_exception(EXCP00_DIVZ);
2151 q &= 0xff;
2152 r = (num % den) & 0xff;
2153 EAX = (EAX & ~0xffff) | (r << 8) | q;
2154}
2155
2156void helper_idivb_AL(target_ulong t0)
2157{
2158 int num, den, q, r;
2159
2160 num = (int16_t)EAX;
2161 den = (int8_t)t0;
2162 if (den == 0) {
2163 raise_exception(EXCP00_DIVZ);
2164 }
2165 q = (num / den);
2166 if (q != (int8_t)q)
2167 raise_exception(EXCP00_DIVZ);
2168 q &= 0xff;
2169 r = (num % den) & 0xff;
2170 EAX = (EAX & ~0xffff) | (r << 8) | q;
2171}
2172
2173void helper_divw_AX(target_ulong t0)
2174{
2175 unsigned int num, den, q, r;
2176
2177 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2178 den = (t0 & 0xffff);
2179 if (den == 0) {
2180 raise_exception(EXCP00_DIVZ);
2181 }
2182 q = (num / den);
2183 if (q > 0xffff)
2184 raise_exception(EXCP00_DIVZ);
2185 q &= 0xffff;
2186 r = (num % den) & 0xffff;
2187 EAX = (EAX & ~0xffff) | q;
2188 EDX = (EDX & ~0xffff) | r;
2189}
2190
2191void helper_idivw_AX(target_ulong t0)
2192{
2193 int num, den, q, r;
2194
2195 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2196 den = (int16_t)t0;
2197 if (den == 0) {
2198 raise_exception(EXCP00_DIVZ);
2199 }
2200 q = (num / den);
2201 if (q != (int16_t)q)
2202 raise_exception(EXCP00_DIVZ);
2203 q &= 0xffff;
2204 r = (num % den) & 0xffff;
2205 EAX = (EAX & ~0xffff) | q;
2206 EDX = (EDX & ~0xffff) | r;
2207}
2208
2209void helper_divl_EAX(target_ulong t0)
2210{
2211 unsigned int den, r;
2212 uint64_t num, q;
2213
2214 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2215 den = t0;
2216 if (den == 0) {
2217 raise_exception(EXCP00_DIVZ);
2218 }
2219 q = (num / den);
2220 r = (num % den);
2221 if (q > 0xffffffff)
2222 raise_exception(EXCP00_DIVZ);
2223 EAX = (uint32_t)q;
2224 EDX = (uint32_t)r;
2225}
2226
2227void helper_idivl_EAX(target_ulong t0)
2228{
2229 int den, r;
2230 int64_t num, q;
2231
2232 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2233 den = t0;
2234 if (den == 0) {
2235 raise_exception(EXCP00_DIVZ);
2236 }
2237 q = (num / den);
2238 r = (num % den);
2239 if (q != (int32_t)q)
2240 raise_exception(EXCP00_DIVZ);
2241 EAX = (uint32_t)q;
2242 EDX = (uint32_t)r;
2243}
2244
2245/* bcd */
2246
2247/* XXX: exception */
2248void helper_aam(int base)
2249{
2250 int al, ah;
2251 al = EAX & 0xff;
2252 ah = al / base;
2253 al = al % base;
2254 EAX = (EAX & ~0xffff) | al | (ah << 8);
2255 CC_DST = al;
2256}
2257
2258void helper_aad(int base)
2259{
2260 int al, ah;
2261 al = EAX & 0xff;
2262 ah = (EAX >> 8) & 0xff;
2263 al = ((ah * base) + al) & 0xff;
2264 EAX = (EAX & ~0xffff) | al;
2265 CC_DST = al;
2266}
2267
2268void helper_aaa(void)
2269{
2270 int icarry;
2271 int al, ah, af;
2272 int eflags;
2273
2274 eflags = helper_cc_compute_all(CC_OP);
2275 af = eflags & CC_A;
2276 al = EAX & 0xff;
2277 ah = (EAX >> 8) & 0xff;
2278
2279 icarry = (al > 0xf9);
2280 if (((al & 0x0f) > 9 ) || af) {
2281 al = (al + 6) & 0x0f;
2282 ah = (ah + 1 + icarry) & 0xff;
2283 eflags |= CC_C | CC_A;
2284 } else {
2285 eflags &= ~(CC_C | CC_A);
2286 al &= 0x0f;
2287 }
2288 EAX = (EAX & ~0xffff) | al | (ah << 8);
2289 CC_SRC = eflags;
2290}
2291
2292void helper_aas(void)
2293{
2294 int icarry;
2295 int al, ah, af;
2296 int eflags;
2297
2298 eflags = helper_cc_compute_all(CC_OP);
2299 af = eflags & CC_A;
2300 al = EAX & 0xff;
2301 ah = (EAX >> 8) & 0xff;
2302
2303 icarry = (al < 6);
2304 if (((al & 0x0f) > 9 ) || af) {
2305 al = (al - 6) & 0x0f;
2306 ah = (ah - 1 - icarry) & 0xff;
2307 eflags |= CC_C | CC_A;
2308 } else {
2309 eflags &= ~(CC_C | CC_A);
2310 al &= 0x0f;
2311 }
2312 EAX = (EAX & ~0xffff) | al | (ah << 8);
2313 CC_SRC = eflags;
2314}
2315
2316void helper_daa(void)
2317{
2318 int al, af, cf;
2319 int eflags;
2320
2321 eflags = helper_cc_compute_all(CC_OP);
2322 cf = eflags & CC_C;
2323 af = eflags & CC_A;
2324 al = EAX & 0xff;
2325
2326 eflags = 0;
2327 if (((al & 0x0f) > 9 ) || af) {
2328 al = (al + 6) & 0xff;
2329 eflags |= CC_A;
2330 }
2331 if ((al > 0x9f) || cf) {
2332 al = (al + 0x60) & 0xff;
2333 eflags |= CC_C;
2334 }
2335 EAX = (EAX & ~0xff) | al;
2336 /* well, speed is not an issue here, so we compute the flags by hand */
2337 eflags |= (al == 0) << 6; /* zf */
2338 eflags |= parity_table[al]; /* pf */
2339 eflags |= (al & 0x80); /* sf */
2340 CC_SRC = eflags;
2341}
2342
2343void helper_das(void)
2344{
2345 int al, al1, af, cf;
2346 int eflags;
2347
2348 eflags = helper_cc_compute_all(CC_OP);
2349 cf = eflags & CC_C;
2350 af = eflags & CC_A;
2351 al = EAX & 0xff;
2352
2353 eflags = 0;
2354 al1 = al;
2355 if (((al & 0x0f) > 9 ) || af) {
2356 eflags |= CC_A;
2357 if (al < 6 || cf)
2358 eflags |= CC_C;
2359 al = (al - 6) & 0xff;
2360 }
2361 if ((al1 > 0x99) || cf) {
2362 al = (al - 0x60) & 0xff;
2363 eflags |= CC_C;
2364 }
2365 EAX = (EAX & ~0xff) | al;
2366 /* well, speed is not an issue here, so we compute the flags by hand */
2367 eflags |= (al == 0) << 6; /* zf */
2368 eflags |= parity_table[al]; /* pf */
2369 eflags |= (al & 0x80); /* sf */
2370 CC_SRC = eflags;
2371}
2372
2373void helper_into(int next_eip_addend)
2374{
2375 int eflags;
2376 eflags = helper_cc_compute_all(CC_OP);
2377 if (eflags & CC_O) {
2378 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2379 }
2380}
2381
2382void helper_cmpxchg8b(target_ulong a0)
2383{
2384 uint64_t d;
2385 int eflags;
2386
2387 eflags = helper_cc_compute_all(CC_OP);
2388 d = ldq(a0);
2389 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2390 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2391 eflags |= CC_Z;
2392 } else {
2393 /* always do the store */
2394 stq(a0, d);
2395 EDX = (uint32_t)(d >> 32);
2396 EAX = (uint32_t)d;
2397 eflags &= ~CC_Z;
2398 }
2399 CC_SRC = eflags;
2400}
2401
2402#ifdef TARGET_X86_64
2403void helper_cmpxchg16b(target_ulong a0)
2404{
2405 uint64_t d0, d1;
2406 int eflags;
2407
2408 if ((a0 & 0xf) != 0)
2409 raise_exception(EXCP0D_GPF);
2410 eflags = helper_cc_compute_all(CC_OP);
2411 d0 = ldq(a0);
2412 d1 = ldq(a0 + 8);
2413 if (d0 == EAX && d1 == EDX) {
2414 stq(a0, EBX);
2415 stq(a0 + 8, ECX);
2416 eflags |= CC_Z;
2417 } else {
2418 /* always do the store */
2419 stq(a0, d0);
2420 stq(a0 + 8, d1);
2421 EDX = d1;
2422 EAX = d0;
2423 eflags &= ~CC_Z;
2424 }
2425 CC_SRC = eflags;
2426}
2427#endif
2428
2429void helper_single_step(void)
2430{
2431#ifndef CONFIG_USER_ONLY
2432 check_hw_breakpoints(env, 1);
2433 env->dr[6] |= DR6_BS;
2434#endif
2435 raise_exception(EXCP01_DB);
2436}
2437
2438void helper_cpuid(void)
2439{
2440 uint32_t eax, ebx, ecx, edx;
2441
2442 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2443
2444 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2445 EAX = eax;
2446 EBX = ebx;
2447 ECX = ecx;
2448 EDX = edx;
2449}
2450
2451void helper_enter_level(int level, int data32, target_ulong t1)
2452{
2453 target_ulong ssp;
2454 uint32_t esp_mask, esp, ebp;
2455
2456 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2457 ssp = env->segs[R_SS].base;
2458 ebp = EBP;
2459 esp = ESP;
2460 if (data32) {
2461 /* 32 bit */
2462 esp -= 4;
2463 while (--level) {
2464 esp -= 4;
2465 ebp -= 4;
2466 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2467 }
2468 esp -= 4;
2469 stl(ssp + (esp & esp_mask), t1);
2470 } else {
2471 /* 16 bit */
2472 esp -= 2;
2473 while (--level) {
2474 esp -= 2;
2475 ebp -= 2;
2476 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2477 }
2478 esp -= 2;
2479 stw(ssp + (esp & esp_mask), t1);
2480 }
2481}
2482
2483#ifdef TARGET_X86_64
2484void helper_enter64_level(int level, int data64, target_ulong t1)
2485{
2486 target_ulong esp, ebp;
2487 ebp = EBP;
2488 esp = ESP;
2489
2490 if (data64) {
2491 /* 64 bit */
2492 esp -= 8;
2493 while (--level) {
2494 esp -= 8;
2495 ebp -= 8;
2496 stq(esp, ldq(ebp));
2497 }
2498 esp -= 8;
2499 stq(esp, t1);
2500 } else {
2501 /* 16 bit */
2502 esp -= 2;
2503 while (--level) {
2504 esp -= 2;
2505 ebp -= 2;
2506 stw(esp, lduw(ebp));
2507 }
2508 esp -= 2;
2509 stw(esp, t1);
2510 }
2511}
2512#endif
2513
2514void helper_lldt(int selector)
2515{
2516 SegmentCache *dt;
2517 uint32_t e1, e2;
2518#ifndef VBOX
2519 int index, entry_limit;
2520#else
2521 unsigned int index, entry_limit;
2522#endif
2523 target_ulong ptr;
2524
2525#ifdef VBOX
2526 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2527 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2528#endif
2529
2530 selector &= 0xffff;
2531 if ((selector & 0xfffc) == 0) {
2532 /* XXX: NULL selector case: invalid LDT */
2533 env->ldt.base = 0;
2534 env->ldt.limit = 0;
2535#ifdef VBOX
2536 env->ldt.flags = DESC_INTEL_UNUSABLE;
2537 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2538 env->ldt.newselector = 0;
2539#endif
2540 } else {
2541 if (selector & 0x4)
2542 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2543 dt = &env->gdt;
2544 index = selector & ~7;
2545#ifdef TARGET_X86_64
2546 if (env->hflags & HF_LMA_MASK)
2547 entry_limit = 15;
2548 else
2549#endif
2550 entry_limit = 7;
2551 if ((index + entry_limit) > dt->limit)
2552 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2553 ptr = dt->base + index;
2554 e1 = ldl_kernel(ptr);
2555 e2 = ldl_kernel(ptr + 4);
2556 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2557 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2558 if (!(e2 & DESC_P_MASK))
2559 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2560#ifdef TARGET_X86_64
2561 if (env->hflags & HF_LMA_MASK) {
2562 uint32_t e3;
2563 e3 = ldl_kernel(ptr + 8);
2564 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2565 env->ldt.base |= (target_ulong)e3 << 32;
2566 } else
2567#endif
2568 {
2569 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2570 }
2571 }
2572 env->ldt.selector = selector;
2573#ifdef VBOX
2574 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2575 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2576#endif
2577}
2578
2579void helper_ltr(int selector)
2580{
2581 SegmentCache *dt;
2582 uint32_t e1, e2;
2583#ifndef VBOX
2584 int index, type, entry_limit;
2585#else
2586 unsigned int index;
2587 int type, entry_limit;
2588#endif
2589 target_ulong ptr;
2590
2591#ifdef VBOX
2592 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2593 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2594 env->tr.flags, (RTSEL)(selector & 0xffff)));
2595#endif
2596 selector &= 0xffff;
2597 if ((selector & 0xfffc) == 0) {
2598 /* NULL selector case: invalid TR */
2599#ifdef VBOX
2600 raise_exception_err(EXCP0A_TSS, 0);
2601#else
2602 env->tr.base = 0;
2603 env->tr.limit = 0;
2604 env->tr.flags = 0;
2605#endif
2606 } else {
2607 if (selector & 0x4)
2608 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2609 dt = &env->gdt;
2610 index = selector & ~7;
2611#ifdef TARGET_X86_64
2612 if (env->hflags & HF_LMA_MASK)
2613 entry_limit = 15;
2614 else
2615#endif
2616 entry_limit = 7;
2617 if ((index + entry_limit) > dt->limit)
2618 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2619 ptr = dt->base + index;
2620 e1 = ldl_kernel(ptr);
2621 e2 = ldl_kernel(ptr + 4);
2622 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2623 if ((e2 & DESC_S_MASK) ||
2624 (type != 1 && type != 9))
2625 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2626 if (!(e2 & DESC_P_MASK))
2627 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2628#ifdef TARGET_X86_64
2629 if (env->hflags & HF_LMA_MASK) {
2630 uint32_t e3, e4;
2631 e3 = ldl_kernel(ptr + 8);
2632 e4 = ldl_kernel(ptr + 12);
2633 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2634 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2635 load_seg_cache_raw_dt(&env->tr, e1, e2);
2636 env->tr.base |= (target_ulong)e3 << 32;
2637 } else
2638#endif
2639 {
2640 load_seg_cache_raw_dt(&env->tr, e1, e2);
2641 }
2642 env->tr.flags |= DESC_TSS_BUSY_MASK;
2643 e2 |= DESC_TSS_BUSY_MASK;
2644 stl_kernel(ptr + 4, e2);
2645 }
2646 env->tr.selector = selector;
2647#ifdef VBOX
2648 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2649 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2650 env->tr.flags, (RTSEL)(selector & 0xffff)));
2651#endif
2652}
2653
2654/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2655void helper_load_seg(int seg_reg, int selector)
2656{
2657 uint32_t e1, e2;
2658 int cpl, dpl, rpl;
2659 SegmentCache *dt;
2660#ifndef VBOX
2661 int index;
2662#else
2663 unsigned int index;
2664#endif
2665 target_ulong ptr;
2666
2667 selector &= 0xffff;
2668 cpl = env->hflags & HF_CPL_MASK;
2669#ifdef VBOX
2670
2671 /* Trying to load a selector with CPL=1? */
2672 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2673 {
2674 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2675 selector = selector & 0xfffc;
2676 }
2677#endif /* VBOX */
2678 if ((selector & 0xfffc) == 0) {
2679 /* null selector case */
2680#ifndef VBOX
2681 if (seg_reg == R_SS
2682#ifdef TARGET_X86_64
2683 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2684#endif
2685 )
2686 raise_exception_err(EXCP0D_GPF, 0);
2687 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2688#else
2689 if (seg_reg == R_SS) {
2690 if (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2691 raise_exception_err(EXCP0D_GPF, 0);
2692 e2 = (cpl << DESC_DPL_SHIFT) | DESC_INTEL_UNUSABLE;
2693 } else {
2694 e2 = DESC_INTEL_UNUSABLE;
2695 }
2696 cpu_x86_load_seg_cache_with_clean_flags(env, seg_reg, selector, 0, 0, e2);
2697#endif
2698 } else {
2699
2700 if (selector & 0x4)
2701 dt = &env->ldt;
2702 else
2703 dt = &env->gdt;
2704 index = selector & ~7;
2705 if ((index + 7) > dt->limit)
2706 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2707 ptr = dt->base + index;
2708 e1 = ldl_kernel(ptr);
2709 e2 = ldl_kernel(ptr + 4);
2710
2711 if (!(e2 & DESC_S_MASK))
2712 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2713 rpl = selector & 3;
2714 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2715 if (seg_reg == R_SS) {
2716 /* must be writable segment */
2717 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2718 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2719 if (rpl != cpl || dpl != cpl)
2720 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2721 } else {
2722 /* must be readable segment */
2723 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2724 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2725
2726 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2727 /* if not conforming code, test rights */
2728 if (dpl < cpl || dpl < rpl)
2729 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2730 }
2731 }
2732
2733 if (!(e2 & DESC_P_MASK)) {
2734 if (seg_reg == R_SS)
2735 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2736 else
2737 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2738 }
2739
2740 /* set the access bit if not already set */
2741 if (!(e2 & DESC_A_MASK)) {
2742 e2 |= DESC_A_MASK;
2743 stl_kernel(ptr + 4, e2);
2744 }
2745
2746 cpu_x86_load_seg_cache(env, seg_reg, selector,
2747 get_seg_base(e1, e2),
2748 get_seg_limit(e1, e2),
2749 e2);
2750#if 0
2751 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2752 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2753#endif
2754 }
2755}
2756
2757/* protected mode jump */
2758void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2759 int next_eip_addend)
2760{
2761 int gate_cs, type;
2762 uint32_t e1, e2, cpl, dpl, rpl, limit;
2763 target_ulong next_eip;
2764
2765#ifdef VBOX /** @todo Why do we do this? */
2766 e1 = e2 = 0;
2767#endif
2768 if ((new_cs & 0xfffc) == 0)
2769 raise_exception_err(EXCP0D_GPF, 0);
2770 if (load_segment(&e1, &e2, new_cs) != 0)
2771 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2772 cpl = env->hflags & HF_CPL_MASK;
2773 if (e2 & DESC_S_MASK) {
2774 if (!(e2 & DESC_CS_MASK))
2775 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2776 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2777 if (e2 & DESC_C_MASK) {
2778 /* conforming code segment */
2779 if (dpl > cpl)
2780 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2781 } else {
2782 /* non conforming code segment */
2783 rpl = new_cs & 3;
2784 if (rpl > cpl)
2785 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2786 if (dpl != cpl)
2787 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2788 }
2789 if (!(e2 & DESC_P_MASK))
2790 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2791 limit = get_seg_limit(e1, e2);
2792 if (new_eip > limit &&
2793 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2794 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2795#ifdef VBOX
2796 if (!(e2 & DESC_A_MASK))
2797 e2 = set_segment_accessed(new_cs, e2);
2798#endif
2799 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2800 get_seg_base(e1, e2), limit, e2);
2801 EIP = new_eip;
2802 } else {
2803 /* jump to call or task gate */
2804 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2805 rpl = new_cs & 3;
2806 cpl = env->hflags & HF_CPL_MASK;
2807 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2808 switch(type) {
2809 case 1: /* 286 TSS */
2810 case 9: /* 386 TSS */
2811 case 5: /* task gate */
2812 if (dpl < cpl || dpl < rpl)
2813 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2814 next_eip = env->eip + next_eip_addend;
2815 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2816 CC_OP = CC_OP_EFLAGS;
2817 break;
2818 case 4: /* 286 call gate */
2819 case 12: /* 386 call gate */
2820 if ((dpl < cpl) || (dpl < rpl))
2821 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2822 if (!(e2 & DESC_P_MASK))
2823 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2824 gate_cs = e1 >> 16;
2825 new_eip = (e1 & 0xffff);
2826 if (type == 12)
2827 new_eip |= (e2 & 0xffff0000);
2828 if (load_segment(&e1, &e2, gate_cs) != 0)
2829 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2830 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2831 /* must be code segment */
2832 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2833 (DESC_S_MASK | DESC_CS_MASK)))
2834 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2835 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2836 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2837 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2838 if (!(e2 & DESC_P_MASK))
2839#ifdef VBOX /* See page 3-514 of 253666.pdf */
2840 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2841#else
2842 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2843#endif
2844 limit = get_seg_limit(e1, e2);
2845 if (new_eip > limit)
2846 raise_exception_err(EXCP0D_GPF, 0);
2847 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2848 get_seg_base(e1, e2), limit, e2);
2849 EIP = new_eip;
2850 break;
2851 default:
2852 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2853 break;
2854 }
2855 }
2856}
2857
2858/* real mode call */
2859void helper_lcall_real(int new_cs, target_ulong new_eip1,
2860 int shift, int next_eip)
2861{
2862 int new_eip;
2863 uint32_t esp, esp_mask;
2864 target_ulong ssp;
2865
2866 new_eip = new_eip1;
2867 esp = ESP;
2868 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2869 ssp = env->segs[R_SS].base;
2870 if (shift) {
2871 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2872 PUSHL(ssp, esp, esp_mask, next_eip);
2873 } else {
2874 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2875 PUSHW(ssp, esp, esp_mask, next_eip);
2876 }
2877
2878 SET_ESP(esp, esp_mask);
2879 env->eip = new_eip;
2880 env->segs[R_CS].selector = new_cs;
2881 env->segs[R_CS].base = (new_cs << 4);
2882}
2883
2884/* protected mode call */
2885void helper_lcall_protected(int new_cs, target_ulong new_eip,
2886 int shift, int next_eip_addend)
2887{
2888 int new_stack, i;
2889 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2890 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2891 uint32_t val, limit, old_sp_mask;
2892 target_ulong ssp, old_ssp, next_eip;
2893
2894#ifdef VBOX /** @todo Why do we do this? */
2895 e1 = e2 = 0;
2896#endif
2897 next_eip = env->eip + next_eip_addend;
2898 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2899 LOG_PCALL_STATE(env);
2900 if ((new_cs & 0xfffc) == 0)
2901 raise_exception_err(EXCP0D_GPF, 0);
2902 if (load_segment(&e1, &e2, new_cs) != 0)
2903 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2904 cpl = env->hflags & HF_CPL_MASK;
2905 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2906 if (e2 & DESC_S_MASK) {
2907 if (!(e2 & DESC_CS_MASK))
2908 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2909 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2910 if (e2 & DESC_C_MASK) {
2911 /* conforming code segment */
2912 if (dpl > cpl)
2913 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2914 } else {
2915 /* non conforming code segment */
2916 rpl = new_cs & 3;
2917 if (rpl > cpl)
2918 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2919 if (dpl != cpl)
2920 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2921 }
2922 if (!(e2 & DESC_P_MASK))
2923 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2924#ifdef VBOX
2925 if (!(e2 & DESC_A_MASK))
2926 e2 = set_segment_accessed(new_cs, e2);
2927#endif
2928
2929#ifdef TARGET_X86_64
2930 /* XXX: check 16/32 bit cases in long mode */
2931 if (shift == 2) {
2932 target_ulong rsp;
2933 /* 64 bit case */
2934 rsp = ESP;
2935 PUSHQ(rsp, env->segs[R_CS].selector);
2936 PUSHQ(rsp, next_eip);
2937 /* from this point, not restartable */
2938 ESP = rsp;
2939 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2940 get_seg_base(e1, e2),
2941 get_seg_limit(e1, e2), e2);
2942 EIP = new_eip;
2943 } else
2944#endif
2945 {
2946 sp = ESP;
2947 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2948 ssp = env->segs[R_SS].base;
2949 if (shift) {
2950 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2951 PUSHL(ssp, sp, sp_mask, next_eip);
2952 } else {
2953 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2954 PUSHW(ssp, sp, sp_mask, next_eip);
2955 }
2956
2957 limit = get_seg_limit(e1, e2);
2958 if (new_eip > limit)
2959 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2960 /* from this point, not restartable */
2961 SET_ESP(sp, sp_mask);
2962 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2963 get_seg_base(e1, e2), limit, e2);
2964 EIP = new_eip;
2965 }
2966 } else {
2967 /* check gate type */
2968 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2969 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2970 rpl = new_cs & 3;
2971 switch(type) {
2972 case 1: /* available 286 TSS */
2973 case 9: /* available 386 TSS */
2974 case 5: /* task gate */
2975 if (dpl < cpl || dpl < rpl)
2976 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2977 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2978 CC_OP = CC_OP_EFLAGS;
2979 return;
2980 case 4: /* 286 call gate */
2981 case 12: /* 386 call gate */
2982 break;
2983 default:
2984 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2985 break;
2986 }
2987 shift = type >> 3;
2988
2989 if (dpl < cpl || dpl < rpl)
2990 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2991 /* check valid bit */
2992 if (!(e2 & DESC_P_MASK))
2993 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2994 selector = e1 >> 16;
2995 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2996 param_count = e2 & 0x1f;
2997 if ((selector & 0xfffc) == 0)
2998 raise_exception_err(EXCP0D_GPF, 0);
2999
3000 if (load_segment(&e1, &e2, selector) != 0)
3001 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3002 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3003 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3004 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3005 if (dpl > cpl)
3006 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3007 if (!(e2 & DESC_P_MASK))
3008 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3009
3010 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3011 /* to inner privilege */
3012 get_ss_esp_from_tss(&ss, &sp, dpl);
3013 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3014 ss, sp, param_count, ESP);
3015 if ((ss & 0xfffc) == 0)
3016 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3017 if ((ss & 3) != dpl)
3018 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3019 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3020 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3021 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3022 if (ss_dpl != dpl)
3023 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3024 if (!(ss_e2 & DESC_S_MASK) ||
3025 (ss_e2 & DESC_CS_MASK) ||
3026 !(ss_e2 & DESC_W_MASK))
3027 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3028 if (!(ss_e2 & DESC_P_MASK))
3029#ifdef VBOX /* See page 3-99 of 253666.pdf */
3030 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3031#else
3032 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3033#endif
3034
3035 // push_size = ((param_count * 2) + 8) << shift;
3036
3037 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3038 old_ssp = env->segs[R_SS].base;
3039
3040 sp_mask = get_sp_mask(ss_e2);
3041 ssp = get_seg_base(ss_e1, ss_e2);
3042 if (shift) {
3043 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3044 PUSHL(ssp, sp, sp_mask, ESP);
3045 for(i = param_count - 1; i >= 0; i--) {
3046 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3047 PUSHL(ssp, sp, sp_mask, val);
3048 }
3049 } else {
3050 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3051 PUSHW(ssp, sp, sp_mask, ESP);
3052 for(i = param_count - 1; i >= 0; i--) {
3053 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3054 PUSHW(ssp, sp, sp_mask, val);
3055 }
3056 }
3057 new_stack = 1;
3058 } else {
3059 /* to same privilege */
3060 sp = ESP;
3061 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3062 ssp = env->segs[R_SS].base;
3063 // push_size = (4 << shift);
3064 new_stack = 0;
3065 }
3066
3067 if (shift) {
3068 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3069 PUSHL(ssp, sp, sp_mask, next_eip);
3070 } else {
3071 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3072 PUSHW(ssp, sp, sp_mask, next_eip);
3073 }
3074
3075 /* from this point, not restartable */
3076
3077 if (new_stack) {
3078 ss = (ss & ~3) | dpl;
3079 cpu_x86_load_seg_cache(env, R_SS, ss,
3080 ssp,
3081 get_seg_limit(ss_e1, ss_e2),
3082 ss_e2);
3083 }
3084
3085 selector = (selector & ~3) | dpl;
3086 cpu_x86_load_seg_cache(env, R_CS, selector,
3087 get_seg_base(e1, e2),
3088 get_seg_limit(e1, e2),
3089 e2);
3090 cpu_x86_set_cpl(env, dpl);
3091 SET_ESP(sp, sp_mask);
3092 EIP = offset;
3093 }
3094}
3095
3096/* real and vm86 mode iret */
3097void helper_iret_real(int shift)
3098{
3099 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3100 target_ulong ssp;
3101 int eflags_mask;
3102#ifdef VBOX
3103 bool fVME = false;
3104
3105 remR3TrapClear(env->pVM);
3106#endif /* VBOX */
3107
3108 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3109 sp = ESP;
3110 ssp = env->segs[R_SS].base;
3111 if (shift == 1) {
3112 /* 32 bits */
3113 POPL(ssp, sp, sp_mask, new_eip);
3114 POPL(ssp, sp, sp_mask, new_cs);
3115 new_cs &= 0xffff;
3116 POPL(ssp, sp, sp_mask, new_eflags);
3117 } else {
3118 /* 16 bits */
3119 POPW(ssp, sp, sp_mask, new_eip);
3120 POPW(ssp, sp, sp_mask, new_cs);
3121 POPW(ssp, sp, sp_mask, new_eflags);
3122 }
3123#ifdef VBOX
3124 if ( (env->eflags & VM_MASK)
3125 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3126 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3127 {
3128 fVME = true;
3129 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3130 /* if TF will be set -> #GP */
3131 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3132 || (new_eflags & TF_MASK))
3133 raise_exception(EXCP0D_GPF);
3134 }
3135#endif /* VBOX */
3136 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3137 env->segs[R_CS].selector = new_cs;
3138 env->segs[R_CS].base = (new_cs << 4);
3139 env->eip = new_eip;
3140#ifdef VBOX
3141 if (fVME)
3142 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3143 else
3144#endif
3145 if (env->eflags & VM_MASK)
3146 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3147 else
3148 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3149 if (shift == 0)
3150 eflags_mask &= 0xffff;
3151 load_eflags(new_eflags, eflags_mask);
3152 env->hflags2 &= ~HF2_NMI_MASK;
3153#ifdef VBOX
3154 if (fVME)
3155 {
3156 if (new_eflags & IF_MASK)
3157 env->eflags |= VIF_MASK;
3158 else
3159 env->eflags &= ~VIF_MASK;
3160 }
3161#endif /* VBOX */
3162}
3163
3164static inline void validate_seg(int seg_reg, int cpl)
3165{
3166 int dpl;
3167 uint32_t e2;
3168
3169 /* XXX: on x86_64, we do not want to nullify FS and GS because
3170 they may still contain a valid base. I would be interested to
3171 know how a real x86_64 CPU behaves */
3172 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3173 (env->segs[seg_reg].selector & 0xfffc) == 0)
3174 return;
3175
3176 e2 = env->segs[seg_reg].flags;
3177 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3178 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3179 /* data or non conforming code segment */
3180 if (dpl < cpl) {
3181 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3182 }
3183 }
3184}
3185
3186/* protected mode iret */
3187static inline void helper_ret_protected(int shift, int is_iret, int addend)
3188{
3189 uint32_t new_cs, new_eflags, new_ss;
3190 uint32_t new_es, new_ds, new_fs, new_gs;
3191 uint32_t e1, e2, ss_e1, ss_e2;
3192 int cpl, dpl, rpl, eflags_mask, iopl;
3193 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3194
3195#ifdef VBOX /** @todo Why do we do this? */
3196 ss_e1 = ss_e2 = e1 = e2 = 0;
3197#endif
3198
3199#ifdef TARGET_X86_64
3200 if (shift == 2)
3201 sp_mask = -1;
3202 else
3203#endif
3204 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3205 sp = ESP;
3206 ssp = env->segs[R_SS].base;
3207 new_eflags = 0; /* avoid warning */
3208#ifdef TARGET_X86_64
3209 if (shift == 2) {
3210 POPQ(sp, new_eip);
3211 POPQ(sp, new_cs);
3212 new_cs &= 0xffff;
3213 if (is_iret) {
3214 POPQ(sp, new_eflags);
3215 }
3216 } else
3217#endif
3218 if (shift == 1) {
3219 /* 32 bits */
3220 POPL(ssp, sp, sp_mask, new_eip);
3221 POPL(ssp, sp, sp_mask, new_cs);
3222 new_cs &= 0xffff;
3223 if (is_iret) {
3224 POPL(ssp, sp, sp_mask, new_eflags);
3225#define LOG_GROUP LOG_GROUP_REM
3226#if defined(VBOX) && defined(DEBUG)
3227 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3228 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3229 Log(("iret: new EFLAGS %08X\n", new_eflags));
3230 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3231#endif
3232 if (new_eflags & VM_MASK)
3233 goto return_to_vm86;
3234 }
3235#ifdef VBOX
3236 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3237 {
3238 if ( !EMIsRawRing1Enabled(env->pVM)
3239 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3240 {
3241 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3242 new_cs = new_cs & 0xfffc;
3243 }
3244 else
3245 {
3246 /* Ugly assumption: assume a genuine switch to ring-1. */
3247 Log(("Genuine switch to ring-1 (iret)\n"));
3248 }
3249 }
3250 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3251 {
3252 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3253 new_cs = (new_cs & 0xfffc) | 1;
3254 }
3255#endif
3256 } else {
3257 /* 16 bits */
3258 POPW(ssp, sp, sp_mask, new_eip);
3259 POPW(ssp, sp, sp_mask, new_cs);
3260 if (is_iret)
3261 POPW(ssp, sp, sp_mask, new_eflags);
3262 }
3263 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3264 new_cs, new_eip, shift, addend);
3265 LOG_PCALL_STATE(env);
3266 if ((new_cs & 0xfffc) == 0)
3267 {
3268#if defined(VBOX) && defined(DEBUG)
3269 Log(("new_cs & 0xfffc) == 0\n"));
3270#endif
3271 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3272 }
3273 if (load_segment(&e1, &e2, new_cs) != 0)
3274 {
3275#if defined(VBOX) && defined(DEBUG)
3276 Log(("load_segment failed\n"));
3277#endif
3278 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3279 }
3280 if (!(e2 & DESC_S_MASK) ||
3281 !(e2 & DESC_CS_MASK))
3282 {
3283#if defined(VBOX) && defined(DEBUG)
3284 Log(("e2 mask %08x\n", e2));
3285#endif
3286 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3287 }
3288 cpl = env->hflags & HF_CPL_MASK;
3289 rpl = new_cs & 3;
3290 if (rpl < cpl)
3291 {
3292#if defined(VBOX) && defined(DEBUG)
3293 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3294#endif
3295 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3296 }
3297 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3298
3299 if (e2 & DESC_C_MASK) {
3300 if (dpl > rpl)
3301 {
3302#if defined(VBOX) && defined(DEBUG)
3303 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3304#endif
3305 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3306 }
3307 } else {
3308 if (dpl != rpl)
3309 {
3310#if defined(VBOX) && defined(DEBUG)
3311 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3312#endif
3313 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3314 }
3315 }
3316 if (!(e2 & DESC_P_MASK))
3317 {
3318#if defined(VBOX) && defined(DEBUG)
3319 Log(("DESC_P_MASK e2=%08x\n", e2));
3320#endif
3321 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3322 }
3323
3324 sp += addend;
3325 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3326 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3327 /* return to same privilege level */
3328#ifdef VBOX
3329 if (!(e2 & DESC_A_MASK))
3330 e2 = set_segment_accessed(new_cs, e2);
3331#endif
3332 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3333 get_seg_base(e1, e2),
3334 get_seg_limit(e1, e2),
3335 e2);
3336 } else {
3337 /* return to different privilege level */
3338#ifdef TARGET_X86_64
3339 if (shift == 2) {
3340 POPQ(sp, new_esp);
3341 POPQ(sp, new_ss);
3342 new_ss &= 0xffff;
3343 } else
3344#endif
3345 if (shift == 1) {
3346 /* 32 bits */
3347 POPL(ssp, sp, sp_mask, new_esp);
3348 POPL(ssp, sp, sp_mask, new_ss);
3349 new_ss &= 0xffff;
3350 } else {
3351 /* 16 bits */
3352 POPW(ssp, sp, sp_mask, new_esp);
3353 POPW(ssp, sp, sp_mask, new_ss);
3354 }
3355 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3356 new_ss, new_esp);
3357 if ((new_ss & 0xfffc) == 0) {
3358#ifdef TARGET_X86_64
3359 /* NULL ss is allowed in long mode if cpl != 3*/
3360# ifndef VBOX
3361 /* XXX: test CS64 ? */
3362 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3363 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3364 0, 0xffffffff,
3365 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3366 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3367 DESC_W_MASK | DESC_A_MASK);
3368 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3369 } else
3370# else /* VBOX */
3371 if ((env->hflags & HF_LMA_MASK) && rpl != 3 && (e2 & DESC_L_MASK)) {
3372 if (!(e2 & DESC_A_MASK))
3373 e2 = set_segment_accessed(new_cs, e2);
3374 cpu_x86_load_seg_cache_with_clean_flags(env, R_SS, new_ss,
3375 0, 0xffffffff,
3376 DESC_INTEL_UNUSABLE | (rpl << DESC_DPL_SHIFT) );
3377 ss_e2 = DESC_B_MASK; /* not really used */
3378 } else
3379# endif
3380#endif
3381 {
3382#if defined(VBOX) && defined(DEBUG)
3383 Log(("NULL ss, rpl=%d\n", rpl));
3384#endif
3385 raise_exception_err(EXCP0D_GPF, 0);
3386 }
3387 } else {
3388 if ((new_ss & 3) != rpl)
3389 {
3390#if defined(VBOX) && defined(DEBUG)
3391 Log(("new_ss=%x != rpl=%d\n", new_ss, rpl));
3392#endif
3393 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3394 }
3395 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3396 {
3397#if defined(VBOX) && defined(DEBUG)
3398 Log(("new_ss=%x load error\n", new_ss));
3399#endif
3400 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3401 }
3402 if (!(ss_e2 & DESC_S_MASK) ||
3403 (ss_e2 & DESC_CS_MASK) ||
3404 !(ss_e2 & DESC_W_MASK))
3405 {
3406#if defined(VBOX) && defined(DEBUG)
3407 Log(("new_ss=%x ss_e2=%#x bad type\n", new_ss, ss_e2));
3408#endif
3409 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3410 }
3411 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3412 if (dpl != rpl)
3413 {
3414#if defined(VBOX) && defined(DEBUG)
3415 Log(("SS.dpl=%u != rpl=%u\n", dpl, rpl));
3416#endif
3417 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3418 }
3419 if (!(ss_e2 & DESC_P_MASK))
3420 {
3421#if defined(VBOX) && defined(DEBUG)
3422 Log(("new_ss=%#x #NP\n", new_ss));
3423#endif
3424 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3425 }
3426#ifdef VBOX
3427 if (!(e2 & DESC_A_MASK))
3428 e2 = set_segment_accessed(new_cs, e2);
3429 if (!(ss_e2 & DESC_A_MASK))
3430 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3431#endif
3432 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3433 get_seg_base(ss_e1, ss_e2),
3434 get_seg_limit(ss_e1, ss_e2),
3435 ss_e2);
3436 }
3437
3438 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3439 get_seg_base(e1, e2),
3440 get_seg_limit(e1, e2),
3441 e2);
3442 cpu_x86_set_cpl(env, rpl);
3443 sp = new_esp;
3444#ifdef TARGET_X86_64
3445 if (env->hflags & HF_CS64_MASK)
3446 sp_mask = -1;
3447 else
3448#endif
3449 sp_mask = get_sp_mask(ss_e2);
3450
3451 /* validate data segments */
3452 validate_seg(R_ES, rpl);
3453 validate_seg(R_DS, rpl);
3454 validate_seg(R_FS, rpl);
3455 validate_seg(R_GS, rpl);
3456
3457 sp += addend;
3458 }
3459 SET_ESP(sp, sp_mask);
3460 env->eip = new_eip;
3461 if (is_iret) {
3462 /* NOTE: 'cpl' is the _old_ CPL */
3463 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3464 if (cpl == 0)
3465#ifdef VBOX
3466 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3467#else
3468 eflags_mask |= IOPL_MASK;
3469#endif
3470 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3471 if (cpl <= iopl)
3472 eflags_mask |= IF_MASK;
3473 if (shift == 0)
3474 eflags_mask &= 0xffff;
3475 load_eflags(new_eflags, eflags_mask);
3476 }
3477 return;
3478
3479 return_to_vm86:
3480 POPL(ssp, sp, sp_mask, new_esp);
3481 POPL(ssp, sp, sp_mask, new_ss);
3482 POPL(ssp, sp, sp_mask, new_es);
3483 POPL(ssp, sp, sp_mask, new_ds);
3484 POPL(ssp, sp, sp_mask, new_fs);
3485 POPL(ssp, sp, sp_mask, new_gs);
3486
3487 /* modify processor state */
3488 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3489 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3490 load_seg_vm(R_CS, new_cs & 0xffff);
3491 cpu_x86_set_cpl(env, 3);
3492 load_seg_vm(R_SS, new_ss & 0xffff);
3493 load_seg_vm(R_ES, new_es & 0xffff);
3494 load_seg_vm(R_DS, new_ds & 0xffff);
3495 load_seg_vm(R_FS, new_fs & 0xffff);
3496 load_seg_vm(R_GS, new_gs & 0xffff);
3497
3498 env->eip = new_eip & 0xffff;
3499 ESP = new_esp;
3500}
3501
3502void helper_iret_protected(int shift, int next_eip)
3503{
3504 int tss_selector, type;
3505 uint32_t e1, e2;
3506
3507#ifdef VBOX
3508 Log(("iret (shift=%d new_eip=%#x)\n", shift, next_eip));
3509 e1 = e2 = 0; /** @todo Why do we do this? */
3510 remR3TrapClear(env->pVM);
3511#endif
3512
3513 /* specific case for TSS */
3514 if (env->eflags & NT_MASK) {
3515#ifdef TARGET_X86_64
3516 if (env->hflags & HF_LMA_MASK)
3517 {
3518#if defined(VBOX) && defined(DEBUG)
3519 Log(("eflags.NT=1 on iret in long mode\n"));
3520#endif
3521 raise_exception_err(EXCP0D_GPF, 0);
3522 }
3523#endif
3524 tss_selector = lduw_kernel(env->tr.base + 0);
3525 if (tss_selector & 4)
3526 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3527 if (load_segment(&e1, &e2, tss_selector) != 0)
3528 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3529 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3530 /* NOTE: we check both segment and busy TSS */
3531 if (type != 3)
3532 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3533 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3534 } else {
3535 helper_ret_protected(shift, 1, 0);
3536 }
3537 env->hflags2 &= ~HF2_NMI_MASK;
3538}
3539
3540void helper_lret_protected(int shift, int addend)
3541{
3542 helper_ret_protected(shift, 0, addend);
3543}
3544
3545void helper_sysenter(void)
3546{
3547 if (env->sysenter_cs == 0) {
3548 raise_exception_err(EXCP0D_GPF, 0);
3549 }
3550 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3551 cpu_x86_set_cpl(env, 0);
3552
3553#ifdef TARGET_X86_64
3554 if (env->hflags & HF_LMA_MASK) {
3555 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3556 0, 0xffffffff,
3557 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3558 DESC_S_MASK |
3559 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3560 } else
3561#endif
3562 {
3563 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3564 0, 0xffffffff,
3565 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3566 DESC_S_MASK |
3567 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3568 }
3569 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3570 0, 0xffffffff,
3571 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3572 DESC_S_MASK |
3573 DESC_W_MASK | DESC_A_MASK);
3574 ESP = env->sysenter_esp;
3575 EIP = env->sysenter_eip;
3576}
3577
3578void helper_sysexit(int dflag)
3579{
3580 int cpl;
3581
3582 cpl = env->hflags & HF_CPL_MASK;
3583 if (env->sysenter_cs == 0 || cpl != 0) {
3584 raise_exception_err(EXCP0D_GPF, 0);
3585 }
3586 cpu_x86_set_cpl(env, 3);
3587#ifdef TARGET_X86_64
3588 if (dflag == 2) {
3589 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3590 0, 0xffffffff,
3591 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3592 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3593 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3594 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3595 0, 0xffffffff,
3596 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3597 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3598 DESC_W_MASK | DESC_A_MASK);
3599 } else
3600#endif
3601 {
3602 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3603 0, 0xffffffff,
3604 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3605 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3606 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3607 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3608 0, 0xffffffff,
3609 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3610 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3611 DESC_W_MASK | DESC_A_MASK);
3612 }
3613 ESP = ECX;
3614 EIP = EDX;
3615}
3616
3617#if defined(CONFIG_USER_ONLY)
3618target_ulong helper_read_crN(int reg)
3619{
3620 return 0;
3621}
3622
3623void helper_write_crN(int reg, target_ulong t0)
3624{
3625}
3626
3627void helper_movl_drN_T0(int reg, target_ulong t0)
3628{
3629}
3630#else
3631target_ulong helper_read_crN(int reg)
3632{
3633 target_ulong val;
3634
3635 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3636 switch(reg) {
3637 default:
3638 val = env->cr[reg];
3639 break;
3640 case 8:
3641 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3642#ifndef VBOX
3643 val = cpu_get_apic_tpr(env->apic_state);
3644#else /* VBOX */
3645 val = cpu_get_apic_tpr(env);
3646#endif /* VBOX */
3647 } else {
3648 val = env->v_tpr;
3649 }
3650 break;
3651 }
3652 return val;
3653}
3654
3655void helper_write_crN(int reg, target_ulong t0)
3656{
3657 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3658 switch(reg) {
3659 case 0:
3660 cpu_x86_update_cr0(env, t0);
3661 break;
3662 case 3:
3663 cpu_x86_update_cr3(env, t0);
3664 break;
3665 case 4:
3666 cpu_x86_update_cr4(env, t0);
3667 break;
3668 case 8:
3669 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3670#ifndef VBOX
3671 cpu_set_apic_tpr(env->apic_state, t0);
3672#else /* VBOX */
3673 cpu_set_apic_tpr(env, t0);
3674#endif /* VBOX */
3675 }
3676 env->v_tpr = t0 & 0x0f;
3677 break;
3678 default:
3679 env->cr[reg] = t0;
3680 break;
3681 }
3682}
3683
3684void helper_movl_drN_T0(int reg, target_ulong t0)
3685{
3686 int i;
3687
3688 if (reg < 4) {
3689 hw_breakpoint_remove(env, reg);
3690 env->dr[reg] = t0;
3691 hw_breakpoint_insert(env, reg);
3692# ifndef VBOX
3693 } else if (reg == 7) {
3694# else
3695 } else if (reg == 7 || reg == 5) { /* (DR5 is an alias for DR7.) */
3696 if (t0 & X86_DR7_MBZ_MASK)
3697 raise_exception_err(EXCP0D_GPF, 0);
3698 t0 |= X86_DR7_RA1_MASK;
3699 t0 &= ~X86_DR7_RAZ_MASK;
3700# endif
3701 for (i = 0; i < 4; i++)
3702 hw_breakpoint_remove(env, i);
3703 env->dr[7] = t0;
3704 for (i = 0; i < 4; i++)
3705 hw_breakpoint_insert(env, i);
3706 } else {
3707# ifndef VBOX
3708 env->dr[reg] = t0;
3709# else
3710 if (t0 & X86_DR6_MBZ_MASK)
3711 raise_exception_err(EXCP0D_GPF, 0);
3712 t0 |= X86_DR6_RA1_MASK;
3713 t0 &= ~X86_DR6_RAZ_MASK;
3714 env->dr[6] = t0; /* (DR4 is an alias for DR6.) */
3715# endif
3716 }
3717}
3718#endif
3719
3720void helper_lmsw(target_ulong t0)
3721{
3722 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3723 if already set to one. */
3724 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3725 helper_write_crN(0, t0);
3726}
3727
3728void helper_clts(void)
3729{
3730 env->cr[0] &= ~CR0_TS_MASK;
3731 env->hflags &= ~HF_TS_MASK;
3732}
3733
3734void helper_invlpg(target_ulong addr)
3735{
3736 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3737 tlb_flush_page(env, addr);
3738}
3739
3740void helper_rdtsc(void)
3741{
3742 uint64_t val;
3743
3744 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3745 raise_exception(EXCP0D_GPF);
3746 }
3747 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3748
3749 val = cpu_get_tsc(env) + env->tsc_offset;
3750 EAX = (uint32_t)(val);
3751 EDX = (uint32_t)(val >> 32);
3752}
3753
3754void helper_rdtscp(void)
3755{
3756 helper_rdtsc();
3757#ifndef VBOX
3758 ECX = (uint32_t)(env->tsc_aux);
3759#else /* VBOX */
3760 uint64_t val;
3761 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3762 ECX = (uint32_t)(val);
3763 else
3764 ECX = 0;
3765#endif /* VBOX */
3766}
3767
3768void helper_rdpmc(void)
3769{
3770#ifdef VBOX
3771 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3772 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3773 raise_exception(EXCP0D_GPF);
3774 }
3775 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3776 EAX = 0;
3777 EDX = 0;
3778#else /* !VBOX */
3779 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3780 raise_exception(EXCP0D_GPF);
3781 }
3782 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3783
3784 /* currently unimplemented */
3785 raise_exception_err(EXCP06_ILLOP, 0);
3786#endif /* !VBOX */
3787}
3788
3789#if defined(CONFIG_USER_ONLY)
3790void helper_wrmsr(void)
3791{
3792}
3793
3794void helper_rdmsr(void)
3795{
3796}
3797#else
3798void helper_wrmsr(void)
3799{
3800 uint64_t val;
3801
3802 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3803
3804 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3805
3806 switch((uint32_t)ECX) {
3807 case MSR_IA32_SYSENTER_CS:
3808 env->sysenter_cs = val & 0xffff;
3809 break;
3810 case MSR_IA32_SYSENTER_ESP:
3811 env->sysenter_esp = val;
3812 break;
3813 case MSR_IA32_SYSENTER_EIP:
3814 env->sysenter_eip = val;
3815 break;
3816 case MSR_IA32_APICBASE:
3817# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3818 cpu_set_apic_base(env->apic_state, val);
3819# endif
3820 break;
3821 case MSR_EFER:
3822 {
3823 uint64_t update_mask;
3824 update_mask = 0;
3825 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3826 update_mask |= MSR_EFER_SCE;
3827 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3828 update_mask |= MSR_EFER_LME;
3829 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3830 update_mask |= MSR_EFER_FFXSR;
3831 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3832 update_mask |= MSR_EFER_NXE;
3833 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3834 update_mask |= MSR_EFER_SVME;
3835 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3836 update_mask |= MSR_EFER_FFXSR;
3837 cpu_load_efer(env, (env->efer & ~update_mask) |
3838 (val & update_mask));
3839 }
3840 break;
3841 case MSR_STAR:
3842 env->star = val;
3843 break;
3844 case MSR_PAT:
3845 env->pat = val;
3846 break;
3847 case MSR_VM_HSAVE_PA:
3848 env->vm_hsave = val;
3849 break;
3850#ifdef TARGET_X86_64
3851 case MSR_LSTAR:
3852 env->lstar = val;
3853 break;
3854 case MSR_CSTAR:
3855 env->cstar = val;
3856 break;
3857 case MSR_FMASK:
3858 env->fmask = val;
3859 break;
3860 case MSR_FSBASE:
3861 env->segs[R_FS].base = val;
3862 break;
3863 case MSR_GSBASE:
3864 env->segs[R_GS].base = val;
3865 break;
3866 case MSR_KERNELGSBASE:
3867 env->kernelgsbase = val;
3868 break;
3869#endif
3870# ifndef VBOX
3871 case MSR_MTRRphysBase(0):
3872 case MSR_MTRRphysBase(1):
3873 case MSR_MTRRphysBase(2):
3874 case MSR_MTRRphysBase(3):
3875 case MSR_MTRRphysBase(4):
3876 case MSR_MTRRphysBase(5):
3877 case MSR_MTRRphysBase(6):
3878 case MSR_MTRRphysBase(7):
3879 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3880 break;
3881 case MSR_MTRRphysMask(0):
3882 case MSR_MTRRphysMask(1):
3883 case MSR_MTRRphysMask(2):
3884 case MSR_MTRRphysMask(3):
3885 case MSR_MTRRphysMask(4):
3886 case MSR_MTRRphysMask(5):
3887 case MSR_MTRRphysMask(6):
3888 case MSR_MTRRphysMask(7):
3889 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3890 break;
3891 case MSR_MTRRfix64K_00000:
3892 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3893 break;
3894 case MSR_MTRRfix16K_80000:
3895 case MSR_MTRRfix16K_A0000:
3896 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3897 break;
3898 case MSR_MTRRfix4K_C0000:
3899 case MSR_MTRRfix4K_C8000:
3900 case MSR_MTRRfix4K_D0000:
3901 case MSR_MTRRfix4K_D8000:
3902 case MSR_MTRRfix4K_E0000:
3903 case MSR_MTRRfix4K_E8000:
3904 case MSR_MTRRfix4K_F0000:
3905 case MSR_MTRRfix4K_F8000:
3906 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3907 break;
3908 case MSR_MTRRdefType:
3909 env->mtrr_deftype = val;
3910 break;
3911 case MSR_MCG_STATUS:
3912 env->mcg_status = val;
3913 break;
3914 case MSR_MCG_CTL:
3915 if ((env->mcg_cap & MCG_CTL_P)
3916 && (val == 0 || val == ~(uint64_t)0))
3917 env->mcg_ctl = val;
3918 break;
3919 case MSR_TSC_AUX:
3920 env->tsc_aux = val;
3921 break;
3922# endif /* !VBOX */
3923 default:
3924# ifndef VBOX
3925 if ((uint32_t)ECX >= MSR_MC0_CTL
3926 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3927 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3928 if ((offset & 0x3) != 0
3929 || (val == 0 || val == ~(uint64_t)0))
3930 env->mce_banks[offset] = val;
3931 break;
3932 }
3933 /* XXX: exception ? */
3934# endif
3935 break;
3936 }
3937
3938# ifdef VBOX
3939 /* call CPUM. */
3940 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3941 {
3942 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3943 }
3944# endif
3945}
3946
3947void helper_rdmsr(void)
3948{
3949 uint64_t val;
3950
3951 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3952
3953 switch((uint32_t)ECX) {
3954 case MSR_IA32_SYSENTER_CS:
3955 val = env->sysenter_cs;
3956 break;
3957 case MSR_IA32_SYSENTER_ESP:
3958 val = env->sysenter_esp;
3959 break;
3960 case MSR_IA32_SYSENTER_EIP:
3961 val = env->sysenter_eip;
3962 break;
3963 case MSR_IA32_APICBASE:
3964#ifndef VBOX
3965 val = cpu_get_apic_base(env->apic_state);
3966#else /* VBOX */
3967 val = cpu_get_apic_base(env);
3968#endif /* VBOX */
3969 break;
3970 case MSR_EFER:
3971 val = env->efer;
3972 break;
3973 case MSR_STAR:
3974 val = env->star;
3975 break;
3976 case MSR_PAT:
3977 val = env->pat;
3978 break;
3979 case MSR_VM_HSAVE_PA:
3980 val = env->vm_hsave;
3981 break;
3982# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3983 case MSR_IA32_PERF_STATUS:
3984 /* tsc_increment_by_tick */
3985 val = 1000ULL;
3986 /* CPU multiplier */
3987 val |= (((uint64_t)4ULL) << 40);
3988 break;
3989# endif /* !VBOX */
3990#ifdef TARGET_X86_64
3991 case MSR_LSTAR:
3992 val = env->lstar;
3993 break;
3994 case MSR_CSTAR:
3995 val = env->cstar;
3996 break;
3997 case MSR_FMASK:
3998 val = env->fmask;
3999 break;
4000 case MSR_FSBASE:
4001 val = env->segs[R_FS].base;
4002 break;
4003 case MSR_GSBASE:
4004 val = env->segs[R_GS].base;
4005 break;
4006 case MSR_KERNELGSBASE:
4007 val = env->kernelgsbase;
4008 break;
4009# ifndef VBOX
4010 case MSR_TSC_AUX:
4011 val = env->tsc_aux;
4012 break;
4013# endif /*!VBOX*/
4014#endif
4015# ifndef VBOX
4016 case MSR_MTRRphysBase(0):
4017 case MSR_MTRRphysBase(1):
4018 case MSR_MTRRphysBase(2):
4019 case MSR_MTRRphysBase(3):
4020 case MSR_MTRRphysBase(4):
4021 case MSR_MTRRphysBase(5):
4022 case MSR_MTRRphysBase(6):
4023 case MSR_MTRRphysBase(7):
4024 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
4025 break;
4026 case MSR_MTRRphysMask(0):
4027 case MSR_MTRRphysMask(1):
4028 case MSR_MTRRphysMask(2):
4029 case MSR_MTRRphysMask(3):
4030 case MSR_MTRRphysMask(4):
4031 case MSR_MTRRphysMask(5):
4032 case MSR_MTRRphysMask(6):
4033 case MSR_MTRRphysMask(7):
4034 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
4035 break;
4036 case MSR_MTRRfix64K_00000:
4037 val = env->mtrr_fixed[0];
4038 break;
4039 case MSR_MTRRfix16K_80000:
4040 case MSR_MTRRfix16K_A0000:
4041 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
4042 break;
4043 case MSR_MTRRfix4K_C0000:
4044 case MSR_MTRRfix4K_C8000:
4045 case MSR_MTRRfix4K_D0000:
4046 case MSR_MTRRfix4K_D8000:
4047 case MSR_MTRRfix4K_E0000:
4048 case MSR_MTRRfix4K_E8000:
4049 case MSR_MTRRfix4K_F0000:
4050 case MSR_MTRRfix4K_F8000:
4051 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
4052 break;
4053 case MSR_MTRRdefType:
4054 val = env->mtrr_deftype;
4055 break;
4056 case MSR_MTRRcap:
4057 if (env->cpuid_features & CPUID_MTRR)
4058 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
4059 else
4060 /* XXX: exception ? */
4061 val = 0;
4062 break;
4063 case MSR_MCG_CAP:
4064 val = env->mcg_cap;
4065 break;
4066 case MSR_MCG_CTL:
4067 if (env->mcg_cap & MCG_CTL_P)
4068 val = env->mcg_ctl;
4069 else
4070 val = 0;
4071 break;
4072 case MSR_MCG_STATUS:
4073 val = env->mcg_status;
4074 break;
4075# endif /* !VBOX */
4076 default:
4077# ifndef VBOX
4078 if ((uint32_t)ECX >= MSR_MC0_CTL
4079 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
4080 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
4081 val = env->mce_banks[offset];
4082 break;
4083 }
4084 /* XXX: exception ? */
4085 val = 0;
4086# else /* VBOX */
4087 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4088 {
4089 /** @todo be a brave man and raise a \#GP(0) here as we should... */
4090 val = 0;
4091 }
4092# endif /* VBOX */
4093 break;
4094 }
4095 EAX = (uint32_t)(val);
4096 EDX = (uint32_t)(val >> 32);
4097
4098# ifdef VBOX_STRICT
4099 if ((uint32_t)ECX != MSR_IA32_TSC) {
4100 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4101 val = 0;
4102 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
4103 }
4104# endif
4105}
4106#endif
4107
4108target_ulong helper_lsl(target_ulong selector1)
4109{
4110 unsigned int limit;
4111 uint32_t e1, e2, eflags, selector;
4112 int rpl, dpl, cpl, type;
4113
4114 selector = selector1 & 0xffff;
4115 eflags = helper_cc_compute_all(CC_OP);
4116 if ((selector & 0xfffc) == 0)
4117 goto fail;
4118 if (load_segment(&e1, &e2, selector) != 0)
4119 goto fail;
4120 rpl = selector & 3;
4121 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4122 cpl = env->hflags & HF_CPL_MASK;
4123 if (e2 & DESC_S_MASK) {
4124 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4125 /* conforming */
4126 } else {
4127 if (dpl < cpl || dpl < rpl)
4128 goto fail;
4129 }
4130 } else {
4131 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4132 switch(type) {
4133 case 1:
4134 case 2:
4135 case 3:
4136 case 9:
4137 case 11:
4138 break;
4139 default:
4140 goto fail;
4141 }
4142 if (dpl < cpl || dpl < rpl) {
4143 fail:
4144 CC_SRC = eflags & ~CC_Z;
4145 return 0;
4146 }
4147 }
4148 limit = get_seg_limit(e1, e2);
4149 CC_SRC = eflags | CC_Z;
4150 return limit;
4151}
4152
4153target_ulong helper_lar(target_ulong selector1)
4154{
4155 uint32_t e1, e2, eflags, selector;
4156 int rpl, dpl, cpl, type;
4157
4158 selector = selector1 & 0xffff;
4159 eflags = helper_cc_compute_all(CC_OP);
4160 if ((selector & 0xfffc) == 0)
4161 goto fail;
4162 if (load_segment(&e1, &e2, selector) != 0)
4163 goto fail;
4164 rpl = selector & 3;
4165 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4166 cpl = env->hflags & HF_CPL_MASK;
4167 if (e2 & DESC_S_MASK) {
4168 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4169 /* conforming */
4170 } else {
4171 if (dpl < cpl || dpl < rpl)
4172 goto fail;
4173 }
4174 } else {
4175 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4176 switch(type) {
4177 case 1:
4178 case 2:
4179 case 3:
4180 case 4:
4181 case 5:
4182 case 9:
4183 case 11:
4184 case 12:
4185 break;
4186 default:
4187 goto fail;
4188 }
4189 if (dpl < cpl || dpl < rpl) {
4190 fail:
4191 CC_SRC = eflags & ~CC_Z;
4192 return 0;
4193 }
4194 }
4195 CC_SRC = eflags | CC_Z;
4196#ifdef VBOX /* AMD says 0x00ffff00, while intel says 0x00fxff00. Bochs and IEM does like AMD says (x=f). */
4197 return e2 & 0x00ffff00;
4198#else
4199 return e2 & 0x00f0ff00;
4200#endif
4201}
4202
4203void helper_verr(target_ulong selector1)
4204{
4205 uint32_t e1, e2, eflags, selector;
4206 int rpl, dpl, cpl;
4207
4208 selector = selector1 & 0xffff;
4209 eflags = helper_cc_compute_all(CC_OP);
4210 if ((selector & 0xfffc) == 0)
4211 goto fail;
4212 if (load_segment(&e1, &e2, selector) != 0)
4213 goto fail;
4214 if (!(e2 & DESC_S_MASK))
4215 goto fail;
4216 rpl = selector & 3;
4217 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4218 cpl = env->hflags & HF_CPL_MASK;
4219 if (e2 & DESC_CS_MASK) {
4220 if (!(e2 & DESC_R_MASK))
4221 goto fail;
4222 if (!(e2 & DESC_C_MASK)) {
4223 if (dpl < cpl || dpl < rpl)
4224 goto fail;
4225 }
4226 } else {
4227 if (dpl < cpl || dpl < rpl) {
4228 fail:
4229 CC_SRC = eflags & ~CC_Z;
4230 return;
4231 }
4232 }
4233 CC_SRC = eflags | CC_Z;
4234}
4235
4236void helper_verw(target_ulong selector1)
4237{
4238 uint32_t e1, e2, eflags, selector;
4239 int rpl, dpl, cpl;
4240
4241 selector = selector1 & 0xffff;
4242 eflags = helper_cc_compute_all(CC_OP);
4243 if ((selector & 0xfffc) == 0)
4244 goto fail;
4245 if (load_segment(&e1, &e2, selector) != 0)
4246 goto fail;
4247 if (!(e2 & DESC_S_MASK))
4248 goto fail;
4249 rpl = selector & 3;
4250 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4251 cpl = env->hflags & HF_CPL_MASK;
4252 if (e2 & DESC_CS_MASK) {
4253 goto fail;
4254 } else {
4255 if (dpl < cpl || dpl < rpl)
4256 goto fail;
4257 if (!(e2 & DESC_W_MASK)) {
4258 fail:
4259 CC_SRC = eflags & ~CC_Z;
4260 return;
4261 }
4262 }
4263 CC_SRC = eflags | CC_Z;
4264}
4265
4266/* x87 FPU helpers */
4267
4268static void fpu_set_exception(int mask)
4269{
4270 env->fpus |= mask;
4271 if (env->fpus & (~env->fpuc & FPUC_EM))
4272 env->fpus |= FPUS_SE | FPUS_B;
4273}
4274
4275static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4276{
4277 if (b == 0.0)
4278 fpu_set_exception(FPUS_ZE);
4279 return a / b;
4280}
4281
4282static void fpu_raise_exception(void)
4283{
4284 if (env->cr[0] & CR0_NE_MASK) {
4285 raise_exception(EXCP10_COPR);
4286 }
4287#if !defined(CONFIG_USER_ONLY)
4288 else {
4289 cpu_set_ferr(env);
4290 }
4291#endif
4292}
4293
4294void helper_flds_FT0(uint32_t val)
4295{
4296 union {
4297 float32 f;
4298 uint32_t i;
4299 } u;
4300 u.i = val;
4301 FT0 = float32_to_floatx(u.f, &env->fp_status);
4302}
4303
4304void helper_fldl_FT0(uint64_t val)
4305{
4306 union {
4307 float64 f;
4308 uint64_t i;
4309 } u;
4310 u.i = val;
4311 FT0 = float64_to_floatx(u.f, &env->fp_status);
4312}
4313
4314void helper_fildl_FT0(int32_t val)
4315{
4316 FT0 = int32_to_floatx(val, &env->fp_status);
4317}
4318
4319void helper_flds_ST0(uint32_t val)
4320{
4321 int new_fpstt;
4322 union {
4323 float32 f;
4324 uint32_t i;
4325 } u;
4326 new_fpstt = (env->fpstt - 1) & 7;
4327 u.i = val;
4328 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4329 env->fpstt = new_fpstt;
4330 env->fptags[new_fpstt] = 0; /* validate stack entry */
4331}
4332
4333void helper_fldl_ST0(uint64_t val)
4334{
4335 int new_fpstt;
4336 union {
4337 float64 f;
4338 uint64_t i;
4339 } u;
4340 new_fpstt = (env->fpstt - 1) & 7;
4341 u.i = val;
4342 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4343 env->fpstt = new_fpstt;
4344 env->fptags[new_fpstt] = 0; /* validate stack entry */
4345}
4346
4347void helper_fildl_ST0(int32_t val)
4348{
4349 int new_fpstt;
4350 new_fpstt = (env->fpstt - 1) & 7;
4351 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4352 env->fpstt = new_fpstt;
4353 env->fptags[new_fpstt] = 0; /* validate stack entry */
4354}
4355
4356void helper_fildll_ST0(int64_t val)
4357{
4358 int new_fpstt;
4359 new_fpstt = (env->fpstt - 1) & 7;
4360 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4361 env->fpstt = new_fpstt;
4362 env->fptags[new_fpstt] = 0; /* validate stack entry */
4363}
4364
4365#ifndef VBOX
4366uint32_t helper_fsts_ST0(void)
4367#else
4368RTCCUINTREG helper_fsts_ST0(void)
4369#endif
4370{
4371 union {
4372 float32 f;
4373 uint32_t i;
4374 } u;
4375 u.f = floatx_to_float32(ST0, &env->fp_status);
4376 return u.i;
4377}
4378
4379uint64_t helper_fstl_ST0(void)
4380{
4381 union {
4382 float64 f;
4383 uint64_t i;
4384 } u;
4385 u.f = floatx_to_float64(ST0, &env->fp_status);
4386 return u.i;
4387}
4388
4389#ifndef VBOX
4390int32_t helper_fist_ST0(void)
4391#else
4392RTCCINTREG helper_fist_ST0(void)
4393#endif
4394{
4395 int32_t val;
4396 val = floatx_to_int32(ST0, &env->fp_status);
4397 if (val != (int16_t)val)
4398 val = -32768;
4399 return val;
4400}
4401
4402#ifndef VBOX
4403int32_t helper_fistl_ST0(void)
4404#else
4405RTCCINTREG helper_fistl_ST0(void)
4406#endif
4407{
4408 int32_t val;
4409 val = floatx_to_int32(ST0, &env->fp_status);
4410 return val;
4411}
4412
4413int64_t helper_fistll_ST0(void)
4414{
4415 int64_t val;
4416 val = floatx_to_int64(ST0, &env->fp_status);
4417 return val;
4418}
4419
4420#ifndef VBOX
4421int32_t helper_fistt_ST0(void)
4422#else
4423RTCCINTREG helper_fistt_ST0(void)
4424#endif
4425{
4426 int32_t val;
4427 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4428 if (val != (int16_t)val)
4429 val = -32768;
4430 return val;
4431}
4432
4433#ifndef VBOX
4434int32_t helper_fisttl_ST0(void)
4435#else
4436RTCCINTREG helper_fisttl_ST0(void)
4437#endif
4438{
4439 int32_t val;
4440 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4441 return val;
4442}
4443
4444int64_t helper_fisttll_ST0(void)
4445{
4446 int64_t val;
4447 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4448 return val;
4449}
4450
4451void helper_fldt_ST0(target_ulong ptr)
4452{
4453 int new_fpstt;
4454 new_fpstt = (env->fpstt - 1) & 7;
4455 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4456 env->fpstt = new_fpstt;
4457 env->fptags[new_fpstt] = 0; /* validate stack entry */
4458}
4459
4460void helper_fstt_ST0(target_ulong ptr)
4461{
4462 helper_fstt(ST0, ptr);
4463}
4464
4465void helper_fpush(void)
4466{
4467 fpush();
4468}
4469
4470void helper_fpop(void)
4471{
4472 fpop();
4473}
4474
4475void helper_fdecstp(void)
4476{
4477 env->fpstt = (env->fpstt - 1) & 7;
4478 env->fpus &= (~0x4700);
4479}
4480
4481void helper_fincstp(void)
4482{
4483 env->fpstt = (env->fpstt + 1) & 7;
4484 env->fpus &= (~0x4700);
4485}
4486
4487/* FPU move */
4488
4489void helper_ffree_STN(int st_index)
4490{
4491 env->fptags[(env->fpstt + st_index) & 7] = 1;
4492}
4493
4494void helper_fmov_ST0_FT0(void)
4495{
4496 ST0 = FT0;
4497}
4498
4499void helper_fmov_FT0_STN(int st_index)
4500{
4501 FT0 = ST(st_index);
4502}
4503
4504void helper_fmov_ST0_STN(int st_index)
4505{
4506 ST0 = ST(st_index);
4507}
4508
4509void helper_fmov_STN_ST0(int st_index)
4510{
4511 ST(st_index) = ST0;
4512}
4513
4514void helper_fxchg_ST0_STN(int st_index)
4515{
4516 CPU86_LDouble tmp;
4517 tmp = ST(st_index);
4518 ST(st_index) = ST0;
4519 ST0 = tmp;
4520}
4521
4522/* FPU operations */
4523
4524static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4525
4526void helper_fcom_ST0_FT0(void)
4527{
4528 int ret;
4529
4530 ret = floatx_compare(ST0, FT0, &env->fp_status);
4531 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4532}
4533
4534void helper_fucom_ST0_FT0(void)
4535{
4536 int ret;
4537
4538 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4539 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4540}
4541
4542static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4543
4544void helper_fcomi_ST0_FT0(void)
4545{
4546 int eflags;
4547 int ret;
4548
4549 ret = floatx_compare(ST0, FT0, &env->fp_status);
4550 eflags = helper_cc_compute_all(CC_OP);
4551 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4552 CC_SRC = eflags;
4553}
4554
4555void helper_fucomi_ST0_FT0(void)
4556{
4557 int eflags;
4558 int ret;
4559
4560 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4561 eflags = helper_cc_compute_all(CC_OP);
4562 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4563 CC_SRC = eflags;
4564}
4565
4566void helper_fadd_ST0_FT0(void)
4567{
4568 ST0 += FT0;
4569}
4570
4571void helper_fmul_ST0_FT0(void)
4572{
4573 ST0 *= FT0;
4574}
4575
4576void helper_fsub_ST0_FT0(void)
4577{
4578 ST0 -= FT0;
4579}
4580
4581void helper_fsubr_ST0_FT0(void)
4582{
4583 ST0 = FT0 - ST0;
4584}
4585
4586void helper_fdiv_ST0_FT0(void)
4587{
4588 ST0 = helper_fdiv(ST0, FT0);
4589}
4590
4591void helper_fdivr_ST0_FT0(void)
4592{
4593 ST0 = helper_fdiv(FT0, ST0);
4594}
4595
4596/* fp operations between STN and ST0 */
4597
4598void helper_fadd_STN_ST0(int st_index)
4599{
4600 ST(st_index) += ST0;
4601}
4602
4603void helper_fmul_STN_ST0(int st_index)
4604{
4605 ST(st_index) *= ST0;
4606}
4607
4608void helper_fsub_STN_ST0(int st_index)
4609{
4610 ST(st_index) -= ST0;
4611}
4612
4613void helper_fsubr_STN_ST0(int st_index)
4614{
4615 CPU86_LDouble *p;
4616 p = &ST(st_index);
4617 *p = ST0 - *p;
4618}
4619
4620void helper_fdiv_STN_ST0(int st_index)
4621{
4622 CPU86_LDouble *p;
4623 p = &ST(st_index);
4624 *p = helper_fdiv(*p, ST0);
4625}
4626
4627void helper_fdivr_STN_ST0(int st_index)
4628{
4629 CPU86_LDouble *p;
4630 p = &ST(st_index);
4631 *p = helper_fdiv(ST0, *p);
4632}
4633
4634/* misc FPU operations */
4635void helper_fchs_ST0(void)
4636{
4637 ST0 = floatx_chs(ST0);
4638}
4639
4640void helper_fabs_ST0(void)
4641{
4642 ST0 = floatx_abs(ST0);
4643}
4644
4645void helper_fld1_ST0(void)
4646{
4647 ST0 = f15rk[1];
4648}
4649
4650void helper_fldl2t_ST0(void)
4651{
4652 ST0 = f15rk[6];
4653}
4654
4655void helper_fldl2e_ST0(void)
4656{
4657 ST0 = f15rk[5];
4658}
4659
4660void helper_fldpi_ST0(void)
4661{
4662 ST0 = f15rk[2];
4663}
4664
4665void helper_fldlg2_ST0(void)
4666{
4667 ST0 = f15rk[3];
4668}
4669
4670void helper_fldln2_ST0(void)
4671{
4672 ST0 = f15rk[4];
4673}
4674
4675void helper_fldz_ST0(void)
4676{
4677 ST0 = f15rk[0];
4678}
4679
4680void helper_fldz_FT0(void)
4681{
4682 FT0 = f15rk[0];
4683}
4684
4685#ifndef VBOX
4686uint32_t helper_fnstsw(void)
4687#else
4688RTCCUINTREG helper_fnstsw(void)
4689#endif
4690{
4691 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4692}
4693
4694#ifndef VBOX
4695uint32_t helper_fnstcw(void)
4696#else
4697RTCCUINTREG helper_fnstcw(void)
4698#endif
4699{
4700 return env->fpuc;
4701}
4702
4703static void update_fp_status(void)
4704{
4705 int rnd_type;
4706
4707 /* set rounding mode */
4708 switch(env->fpuc & RC_MASK) {
4709 default:
4710 case RC_NEAR:
4711 rnd_type = float_round_nearest_even;
4712 break;
4713 case RC_DOWN:
4714 rnd_type = float_round_down;
4715 break;
4716 case RC_UP:
4717 rnd_type = float_round_up;
4718 break;
4719 case RC_CHOP:
4720 rnd_type = float_round_to_zero;
4721 break;
4722 }
4723 set_float_rounding_mode(rnd_type, &env->fp_status);
4724#ifdef FLOATX80
4725 switch((env->fpuc >> 8) & 3) {
4726 case 0:
4727 rnd_type = 32;
4728 break;
4729 case 2:
4730 rnd_type = 64;
4731 break;
4732 case 3:
4733 default:
4734 rnd_type = 80;
4735 break;
4736 }
4737 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4738#endif
4739}
4740
4741void helper_fldcw(uint32_t val)
4742{
4743 env->fpuc = val;
4744 update_fp_status();
4745}
4746
4747void helper_fclex(void)
4748{
4749 env->fpus &= 0x7f00;
4750}
4751
4752void helper_fwait(void)
4753{
4754 if (env->fpus & FPUS_SE)
4755 fpu_raise_exception();
4756}
4757
4758void helper_fninit(void)
4759{
4760 env->fpus = 0;
4761 env->fpstt = 0;
4762 env->fpuc = 0x37f;
4763 env->fptags[0] = 1;
4764 env->fptags[1] = 1;
4765 env->fptags[2] = 1;
4766 env->fptags[3] = 1;
4767 env->fptags[4] = 1;
4768 env->fptags[5] = 1;
4769 env->fptags[6] = 1;
4770 env->fptags[7] = 1;
4771}
4772
4773/* BCD ops */
4774
4775void helper_fbld_ST0(target_ulong ptr)
4776{
4777 CPU86_LDouble tmp;
4778 uint64_t val;
4779 unsigned int v;
4780 int i;
4781
4782 val = 0;
4783 for(i = 8; i >= 0; i--) {
4784 v = ldub(ptr + i);
4785 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4786 }
4787 tmp = val;
4788 if (ldub(ptr + 9) & 0x80)
4789 tmp = -tmp;
4790 fpush();
4791 ST0 = tmp;
4792}
4793
4794void helper_fbst_ST0(target_ulong ptr)
4795{
4796 int v;
4797 target_ulong mem_ref, mem_end;
4798 int64_t val;
4799
4800 val = floatx_to_int64(ST0, &env->fp_status);
4801 mem_ref = ptr;
4802 mem_end = mem_ref + 9;
4803 if (val < 0) {
4804 stb(mem_end, 0x80);
4805 val = -val;
4806 } else {
4807 stb(mem_end, 0x00);
4808 }
4809 while (mem_ref < mem_end) {
4810 if (val == 0)
4811 break;
4812 v = val % 100;
4813 val = val / 100;
4814 v = ((v / 10) << 4) | (v % 10);
4815 stb(mem_ref++, v);
4816 }
4817 while (mem_ref < mem_end) {
4818 stb(mem_ref++, 0);
4819 }
4820}
4821
4822void helper_f2xm1(void)
4823{
4824 ST0 = pow(2.0,ST0) - 1.0;
4825}
4826
4827void helper_fyl2x(void)
4828{
4829 CPU86_LDouble fptemp;
4830
4831 fptemp = ST0;
4832 if (fptemp>0.0){
4833 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4834 ST1 *= fptemp;
4835 fpop();
4836 } else {
4837 env->fpus &= (~0x4700);
4838 env->fpus |= 0x400;
4839 }
4840}
4841
4842void helper_fptan(void)
4843{
4844 CPU86_LDouble fptemp;
4845
4846 fptemp = ST0;
4847 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4848 env->fpus |= 0x400;
4849 } else {
4850 ST0 = tan(fptemp);
4851 fpush();
4852 ST0 = 1.0;
4853 env->fpus &= (~0x400); /* C2 <-- 0 */
4854 /* the above code is for |arg| < 2**52 only */
4855 }
4856}
4857
4858void helper_fpatan(void)
4859{
4860 CPU86_LDouble fptemp, fpsrcop;
4861
4862 fpsrcop = ST1;
4863 fptemp = ST0;
4864 ST1 = atan2(fpsrcop,fptemp);
4865 fpop();
4866}
4867
4868void helper_fxtract(void)
4869{
4870 CPU86_LDoubleU temp;
4871 unsigned int expdif;
4872
4873 temp.d = ST0;
4874 expdif = EXPD(temp) - EXPBIAS;
4875 /*DP exponent bias*/
4876 ST0 = expdif;
4877 fpush();
4878 BIASEXPONENT(temp);
4879 ST0 = temp.d;
4880}
4881
4882void helper_fprem1(void)
4883{
4884 CPU86_LDouble dblq, fpsrcop, fptemp;
4885 CPU86_LDoubleU fpsrcop1, fptemp1;
4886 int expdif;
4887 signed long long int q;
4888
4889#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4890 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4891#else
4892 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4893#endif
4894 ST0 = 0.0 / 0.0; /* NaN */
4895 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4896 return;
4897 }
4898
4899 fpsrcop = ST0;
4900 fptemp = ST1;
4901 fpsrcop1.d = fpsrcop;
4902 fptemp1.d = fptemp;
4903 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4904
4905 if (expdif < 0) {
4906 /* optimisation? taken from the AMD docs */
4907 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4908 /* ST0 is unchanged */
4909 return;
4910 }
4911
4912 if (expdif < 53) {
4913 dblq = fpsrcop / fptemp;
4914 /* round dblq towards nearest integer */
4915 dblq = rint(dblq);
4916 ST0 = fpsrcop - fptemp * dblq;
4917
4918 /* convert dblq to q by truncating towards zero */
4919 if (dblq < 0.0)
4920 q = (signed long long int)(-dblq);
4921 else
4922 q = (signed long long int)dblq;
4923
4924 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4925 /* (C0,C3,C1) <-- (q2,q1,q0) */
4926 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4927 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4928 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4929 } else {
4930 env->fpus |= 0x400; /* C2 <-- 1 */
4931 fptemp = pow(2.0, expdif - 50);
4932 fpsrcop = (ST0 / ST1) / fptemp;
4933 /* fpsrcop = integer obtained by chopping */
4934 fpsrcop = (fpsrcop < 0.0) ?
4935 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4936 ST0 -= (ST1 * fpsrcop * fptemp);
4937 }
4938}
4939
4940void helper_fprem(void)
4941{
4942 CPU86_LDouble dblq, fpsrcop, fptemp;
4943 CPU86_LDoubleU fpsrcop1, fptemp1;
4944 int expdif;
4945 signed long long int q;
4946
4947#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4948 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4949#else
4950 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4951#endif
4952 ST0 = 0.0 / 0.0; /* NaN */
4953 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4954 return;
4955 }
4956
4957 fpsrcop = (CPU86_LDouble)ST0;
4958 fptemp = (CPU86_LDouble)ST1;
4959 fpsrcop1.d = fpsrcop;
4960 fptemp1.d = fptemp;
4961 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4962
4963 if (expdif < 0) {
4964 /* optimisation? taken from the AMD docs */
4965 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4966 /* ST0 is unchanged */
4967 return;
4968 }
4969
4970 if ( expdif < 53 ) {
4971 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4972 /* round dblq towards zero */
4973 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4974 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4975
4976 /* convert dblq to q by truncating towards zero */
4977 if (dblq < 0.0)
4978 q = (signed long long int)(-dblq);
4979 else
4980 q = (signed long long int)dblq;
4981
4982 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4983 /* (C0,C3,C1) <-- (q2,q1,q0) */
4984 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4985 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4986 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4987 } else {
4988 int N = 32 + (expdif % 32); /* as per AMD docs */
4989 env->fpus |= 0x400; /* C2 <-- 1 */
4990 fptemp = pow(2.0, (double)(expdif - N));
4991 fpsrcop = (ST0 / ST1) / fptemp;
4992 /* fpsrcop = integer obtained by chopping */
4993 fpsrcop = (fpsrcop < 0.0) ?
4994 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4995 ST0 -= (ST1 * fpsrcop * fptemp);
4996 }
4997}
4998
4999void helper_fyl2xp1(void)
5000{
5001 CPU86_LDouble fptemp;
5002
5003 fptemp = ST0;
5004 if ((fptemp+1.0)>0.0) {
5005 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
5006 ST1 *= fptemp;
5007 fpop();
5008 } else {
5009 env->fpus &= (~0x4700);
5010 env->fpus |= 0x400;
5011 }
5012}
5013
5014void helper_fsqrt(void)
5015{
5016 CPU86_LDouble fptemp;
5017
5018 fptemp = ST0;
5019 if (fptemp<0.0) {
5020 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5021 env->fpus |= 0x400;
5022 }
5023 ST0 = sqrt(fptemp);
5024}
5025
5026void helper_fsincos(void)
5027{
5028 CPU86_LDouble fptemp;
5029
5030 fptemp = ST0;
5031 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5032 env->fpus |= 0x400;
5033 } else {
5034 ST0 = sin(fptemp);
5035 fpush();
5036 ST0 = cos(fptemp);
5037 env->fpus &= (~0x400); /* C2 <-- 0 */
5038 /* the above code is for |arg| < 2**63 only */
5039 }
5040}
5041
5042void helper_frndint(void)
5043{
5044 ST0 = floatx_round_to_int(ST0, &env->fp_status);
5045}
5046
5047void helper_fscale(void)
5048{
5049 ST0 = ldexp (ST0, (int)(ST1));
5050}
5051
5052void helper_fsin(void)
5053{
5054 CPU86_LDouble fptemp;
5055
5056 fptemp = ST0;
5057 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5058 env->fpus |= 0x400;
5059 } else {
5060 ST0 = sin(fptemp);
5061 env->fpus &= (~0x400); /* C2 <-- 0 */
5062 /* the above code is for |arg| < 2**53 only */
5063 }
5064}
5065
5066void helper_fcos(void)
5067{
5068 CPU86_LDouble fptemp;
5069
5070 fptemp = ST0;
5071 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5072 env->fpus |= 0x400;
5073 } else {
5074 ST0 = cos(fptemp);
5075 env->fpus &= (~0x400); /* C2 <-- 0 */
5076 /* the above code is for |arg5 < 2**63 only */
5077 }
5078}
5079
5080void helper_fxam_ST0(void)
5081{
5082 CPU86_LDoubleU temp;
5083 int expdif;
5084
5085 temp.d = ST0;
5086
5087 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5088 if (SIGND(temp))
5089 env->fpus |= 0x200; /* C1 <-- 1 */
5090
5091 /* XXX: test fptags too */
5092 expdif = EXPD(temp);
5093 if (expdif == MAXEXPD) {
5094#ifdef USE_X86LDOUBLE
5095 if (MANTD(temp) == 0x8000000000000000ULL)
5096#else
5097 if (MANTD(temp) == 0)
5098#endif
5099 env->fpus |= 0x500 /*Infinity*/;
5100 else
5101 env->fpus |= 0x100 /*NaN*/;
5102 } else if (expdif == 0) {
5103 if (MANTD(temp) == 0)
5104 env->fpus |= 0x4000 /*Zero*/;
5105 else
5106 env->fpus |= 0x4400 /*Denormal*/;
5107 } else {
5108 env->fpus |= 0x400;
5109 }
5110}
5111
5112void helper_fstenv(target_ulong ptr, int data32)
5113{
5114 int fpus, fptag, exp, i;
5115 uint64_t mant;
5116 CPU86_LDoubleU tmp;
5117
5118 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5119 fptag = 0;
5120 for (i=7; i>=0; i--) {
5121 fptag <<= 2;
5122 if (env->fptags[i]) {
5123 fptag |= 3;
5124 } else {
5125 tmp.d = env->fpregs[i].d;
5126 exp = EXPD(tmp);
5127 mant = MANTD(tmp);
5128 if (exp == 0 && mant == 0) {
5129 /* zero */
5130 fptag |= 1;
5131 } else if (exp == 0 || exp == MAXEXPD
5132#ifdef USE_X86LDOUBLE
5133 || (mant & (1LL << 63)) == 0
5134#endif
5135 ) {
5136 /* NaNs, infinity, denormal */
5137 fptag |= 2;
5138 }
5139 }
5140 }
5141 if (data32) {
5142 /* 32 bit */
5143 stl(ptr, env->fpuc);
5144 stl(ptr + 4, fpus);
5145 stl(ptr + 8, fptag);
5146 stl(ptr + 12, 0); /* fpip */
5147 stl(ptr + 16, 0); /* fpcs */
5148 stl(ptr + 20, 0); /* fpoo */
5149 stl(ptr + 24, 0); /* fpos */
5150 } else {
5151 /* 16 bit */
5152 stw(ptr, env->fpuc);
5153 stw(ptr + 2, fpus);
5154 stw(ptr + 4, fptag);
5155 stw(ptr + 6, 0);
5156 stw(ptr + 8, 0);
5157 stw(ptr + 10, 0);
5158 stw(ptr + 12, 0);
5159 }
5160}
5161
5162void helper_fldenv(target_ulong ptr, int data32)
5163{
5164 int i, fpus, fptag;
5165
5166 if (data32) {
5167 env->fpuc = lduw(ptr);
5168 fpus = lduw(ptr + 4);
5169 fptag = lduw(ptr + 8);
5170 }
5171 else {
5172 env->fpuc = lduw(ptr);
5173 fpus = lduw(ptr + 2);
5174 fptag = lduw(ptr + 4);
5175 }
5176 env->fpstt = (fpus >> 11) & 7;
5177 env->fpus = fpus & ~0x3800;
5178 for(i = 0;i < 8; i++) {
5179 env->fptags[i] = ((fptag & 3) == 3);
5180 fptag >>= 2;
5181 }
5182}
5183
5184void helper_fsave(target_ulong ptr, int data32)
5185{
5186 CPU86_LDouble tmp;
5187 int i;
5188
5189 helper_fstenv(ptr, data32);
5190
5191 ptr += (14 << data32);
5192 for(i = 0;i < 8; i++) {
5193 tmp = ST(i);
5194 helper_fstt(tmp, ptr);
5195 ptr += 10;
5196 }
5197
5198 /* fninit */
5199 env->fpus = 0;
5200 env->fpstt = 0;
5201 env->fpuc = 0x37f;
5202 env->fptags[0] = 1;
5203 env->fptags[1] = 1;
5204 env->fptags[2] = 1;
5205 env->fptags[3] = 1;
5206 env->fptags[4] = 1;
5207 env->fptags[5] = 1;
5208 env->fptags[6] = 1;
5209 env->fptags[7] = 1;
5210}
5211
5212void helper_frstor(target_ulong ptr, int data32)
5213{
5214 CPU86_LDouble tmp;
5215 int i;
5216
5217 helper_fldenv(ptr, data32);
5218 ptr += (14 << data32);
5219
5220 for(i = 0;i < 8; i++) {
5221 tmp = helper_fldt(ptr);
5222 ST(i) = tmp;
5223 ptr += 10;
5224 }
5225}
5226
5227void helper_fxsave(target_ulong ptr, int data64)
5228{
5229 int fpus, fptag, i, nb_xmm_regs;
5230 CPU86_LDouble tmp;
5231 target_ulong addr;
5232
5233 /* The operand must be 16 byte aligned */
5234 if (ptr & 0xf) {
5235 raise_exception(EXCP0D_GPF);
5236 }
5237
5238 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5239 fptag = 0;
5240 for(i = 0; i < 8; i++) {
5241 fptag |= (env->fptags[i] << i);
5242 }
5243 stw(ptr, env->fpuc);
5244 stw(ptr + 2, fpus);
5245 stw(ptr + 4, fptag ^ 0xff);
5246#ifdef TARGET_X86_64
5247 if (data64) {
5248 stq(ptr + 0x08, 0); /* rip */
5249 stq(ptr + 0x10, 0); /* rdp */
5250 } else
5251#endif
5252 {
5253 stl(ptr + 0x08, 0); /* eip */
5254 stl(ptr + 0x0c, 0); /* sel */
5255 stl(ptr + 0x10, 0); /* dp */
5256 stl(ptr + 0x14, 0); /* sel */
5257 }
5258
5259 addr = ptr + 0x20;
5260 for(i = 0;i < 8; i++) {
5261 tmp = ST(i);
5262 helper_fstt(tmp, addr);
5263 addr += 16;
5264 }
5265
5266 if (env->cr[4] & CR4_OSFXSR_MASK) {
5267 /* XXX: finish it */
5268 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5269 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5270 if (env->hflags & HF_CS64_MASK)
5271 nb_xmm_regs = 16;
5272 else
5273 nb_xmm_regs = 8;
5274 addr = ptr + 0xa0;
5275 /* Fast FXSAVE leaves out the XMM registers */
5276 if (!(env->efer & MSR_EFER_FFXSR)
5277 || (env->hflags & HF_CPL_MASK)
5278 || !(env->hflags & HF_LMA_MASK)) {
5279 for(i = 0; i < nb_xmm_regs; i++) {
5280 stq(addr, env->xmm_regs[i].XMM_Q(0));
5281 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5282 addr += 16;
5283 }
5284 }
5285 }
5286}
5287
5288void helper_fxrstor(target_ulong ptr, int data64)
5289{
5290 int i, fpus, fptag, nb_xmm_regs;
5291 CPU86_LDouble tmp;
5292 target_ulong addr;
5293
5294 /* The operand must be 16 byte aligned */
5295 if (ptr & 0xf) {
5296 raise_exception(EXCP0D_GPF);
5297 }
5298
5299 env->fpuc = lduw(ptr);
5300 fpus = lduw(ptr + 2);
5301 fptag = lduw(ptr + 4);
5302 env->fpstt = (fpus >> 11) & 7;
5303 env->fpus = fpus & ~0x3800;
5304 fptag ^= 0xff;
5305 for(i = 0;i < 8; i++) {
5306 env->fptags[i] = ((fptag >> i) & 1);
5307 }
5308
5309 addr = ptr + 0x20;
5310 for(i = 0;i < 8; i++) {
5311 tmp = helper_fldt(addr);
5312 ST(i) = tmp;
5313 addr += 16;
5314 }
5315
5316 if (env->cr[4] & CR4_OSFXSR_MASK) {
5317 /* XXX: finish it */
5318 env->mxcsr = ldl(ptr + 0x18);
5319 //ldl(ptr + 0x1c);
5320 if (env->hflags & HF_CS64_MASK)
5321 nb_xmm_regs = 16;
5322 else
5323 nb_xmm_regs = 8;
5324 addr = ptr + 0xa0;
5325 /* Fast FXRESTORE leaves out the XMM registers */
5326 if (!(env->efer & MSR_EFER_FFXSR)
5327 || (env->hflags & HF_CPL_MASK)
5328 || !(env->hflags & HF_LMA_MASK)) {
5329 for(i = 0; i < nb_xmm_regs; i++) {
5330#if !defined(VBOX) || __GNUC__ < 4
5331 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5332 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5333#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5334# if 1
5335 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5336 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5337 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5338 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5339# else
5340 /* this works fine on Mac OS X, gcc 4.0.1 */
5341 uint64_t u64 = ldq(addr);
5342 env->xmm_regs[i].XMM_Q(0);
5343 u64 = ldq(addr + 4);
5344 env->xmm_regs[i].XMM_Q(1) = u64;
5345# endif
5346#endif
5347 addr += 16;
5348 }
5349 }
5350 }
5351}
5352
5353#ifndef USE_X86LDOUBLE
5354
5355void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5356{
5357 CPU86_LDoubleU temp;
5358 int e;
5359
5360 temp.d = f;
5361 /* mantissa */
5362 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5363 /* exponent + sign */
5364 e = EXPD(temp) - EXPBIAS + 16383;
5365 e |= SIGND(temp) >> 16;
5366 *pexp = e;
5367}
5368
5369CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5370{
5371 CPU86_LDoubleU temp;
5372 int e;
5373 uint64_t ll;
5374
5375 /* XXX: handle overflow ? */
5376 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5377 e |= (upper >> 4) & 0x800; /* sign */
5378 ll = (mant >> 11) & ((1LL << 52) - 1);
5379#ifdef __arm__
5380 temp.l.upper = (e << 20) | (ll >> 32);
5381 temp.l.lower = ll;
5382#else
5383 temp.ll = ll | ((uint64_t)e << 52);
5384#endif
5385 return temp.d;
5386}
5387
5388#else
5389
5390void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5391{
5392 CPU86_LDoubleU temp;
5393
5394 temp.d = f;
5395 *pmant = temp.l.lower;
5396 *pexp = temp.l.upper;
5397}
5398
5399CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5400{
5401 CPU86_LDoubleU temp;
5402
5403 temp.l.upper = upper;
5404 temp.l.lower = mant;
5405 return temp.d;
5406}
5407#endif
5408
5409#ifdef TARGET_X86_64
5410
5411//#define DEBUG_MULDIV
5412
5413static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5414{
5415 *plow += a;
5416 /* carry test */
5417 if (*plow < a)
5418 (*phigh)++;
5419 *phigh += b;
5420}
5421
5422static void neg128(uint64_t *plow, uint64_t *phigh)
5423{
5424 *plow = ~ *plow;
5425 *phigh = ~ *phigh;
5426 add128(plow, phigh, 1, 0);
5427}
5428
5429/* return TRUE if overflow */
5430static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5431{
5432 uint64_t q, r, a1, a0;
5433 int i, qb, ab;
5434
5435 a0 = *plow;
5436 a1 = *phigh;
5437 if (a1 == 0) {
5438 q = a0 / b;
5439 r = a0 % b;
5440 *plow = q;
5441 *phigh = r;
5442 } else {
5443 if (a1 >= b)
5444 return 1;
5445 /* XXX: use a better algorithm */
5446 for(i = 0; i < 64; i++) {
5447 ab = a1 >> 63;
5448 a1 = (a1 << 1) | (a0 >> 63);
5449 if (ab || a1 >= b) {
5450 a1 -= b;
5451 qb = 1;
5452 } else {
5453 qb = 0;
5454 }
5455 a0 = (a0 << 1) | qb;
5456 }
5457#if defined(DEBUG_MULDIV)
5458 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5459 *phigh, *plow, b, a0, a1);
5460#endif
5461 *plow = a0;
5462 *phigh = a1;
5463 }
5464 return 0;
5465}
5466
5467/* return TRUE if overflow */
5468static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5469{
5470 int sa, sb;
5471 sa = ((int64_t)*phigh < 0);
5472 if (sa)
5473 neg128(plow, phigh);
5474 sb = (b < 0);
5475 if (sb)
5476 b = -b;
5477 if (div64(plow, phigh, b) != 0)
5478 return 1;
5479 if (sa ^ sb) {
5480 if (*plow > (1ULL << 63))
5481 return 1;
5482 *plow = - *plow;
5483 } else {
5484 if (*plow >= (1ULL << 63))
5485 return 1;
5486 }
5487 if (sa)
5488 *phigh = - *phigh;
5489 return 0;
5490}
5491
5492void helper_mulq_EAX_T0(target_ulong t0)
5493{
5494 uint64_t r0, r1;
5495
5496 mulu64(&r0, &r1, EAX, t0);
5497 EAX = r0;
5498 EDX = r1;
5499 CC_DST = r0;
5500 CC_SRC = r1;
5501}
5502
5503void helper_imulq_EAX_T0(target_ulong t0)
5504{
5505 uint64_t r0, r1;
5506
5507 muls64(&r0, &r1, EAX, t0);
5508 EAX = r0;
5509 EDX = r1;
5510 CC_DST = r0;
5511 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5512}
5513
5514target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5515{
5516 uint64_t r0, r1;
5517
5518 muls64(&r0, &r1, t0, t1);
5519 CC_DST = r0;
5520 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5521 return r0;
5522}
5523
5524void helper_divq_EAX(target_ulong t0)
5525{
5526 uint64_t r0, r1;
5527 if (t0 == 0) {
5528 raise_exception(EXCP00_DIVZ);
5529 }
5530 r0 = EAX;
5531 r1 = EDX;
5532 if (div64(&r0, &r1, t0))
5533 raise_exception(EXCP00_DIVZ);
5534 EAX = r0;
5535 EDX = r1;
5536}
5537
5538void helper_idivq_EAX(target_ulong t0)
5539{
5540 uint64_t r0, r1;
5541 if (t0 == 0) {
5542 raise_exception(EXCP00_DIVZ);
5543 }
5544 r0 = EAX;
5545 r1 = EDX;
5546 if (idiv64(&r0, &r1, t0))
5547 raise_exception(EXCP00_DIVZ);
5548 EAX = r0;
5549 EDX = r1;
5550}
5551#endif
5552
5553static void do_hlt(void)
5554{
5555 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5556 env->halted = 1;
5557 env->exception_index = EXCP_HLT;
5558 cpu_loop_exit();
5559}
5560
5561void helper_hlt(int next_eip_addend)
5562{
5563 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5564 EIP += next_eip_addend;
5565
5566 do_hlt();
5567}
5568
5569void helper_monitor(target_ulong ptr)
5570{
5571#ifdef VBOX
5572 if ((uint32_t)ECX > 1)
5573 raise_exception(EXCP0D_GPF);
5574#else /* !VBOX */
5575 if ((uint32_t)ECX != 0)
5576 raise_exception(EXCP0D_GPF);
5577#endif /* !VBOX */
5578 /* XXX: store address ? */
5579 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5580}
5581
5582void helper_mwait(int next_eip_addend)
5583{
5584 if ((uint32_t)ECX != 0)
5585 raise_exception(EXCP0D_GPF);
5586#ifdef VBOX
5587 helper_hlt(next_eip_addend);
5588#else /* !VBOX */
5589 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5590 EIP += next_eip_addend;
5591
5592 /* XXX: not complete but not completely erroneous */
5593 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5594 /* more than one CPU: do not sleep because another CPU may
5595 wake this one */
5596 } else {
5597 do_hlt();
5598 }
5599#endif /* !VBOX */
5600}
5601
5602void helper_debug(void)
5603{
5604 env->exception_index = EXCP_DEBUG;
5605 cpu_loop_exit();
5606}
5607
5608void helper_reset_rf(void)
5609{
5610 env->eflags &= ~RF_MASK;
5611}
5612
5613void helper_raise_interrupt(int intno, int next_eip_addend)
5614{
5615 raise_interrupt(intno, 1, 0, next_eip_addend);
5616}
5617
5618void helper_raise_exception(int exception_index)
5619{
5620 raise_exception(exception_index);
5621}
5622
5623void helper_cli(void)
5624{
5625 env->eflags &= ~IF_MASK;
5626}
5627
5628void helper_sti(void)
5629{
5630 env->eflags |= IF_MASK;
5631}
5632
5633#ifdef VBOX
5634void helper_cli_vme(void)
5635{
5636 env->eflags &= ~VIF_MASK;
5637}
5638
5639void helper_sti_vme(void)
5640{
5641 /* First check, then change eflags according to the AMD manual */
5642 if (env->eflags & VIP_MASK) {
5643 raise_exception(EXCP0D_GPF);
5644 }
5645 env->eflags |= VIF_MASK;
5646}
5647#endif /* VBOX */
5648
5649#if 0
5650/* vm86plus instructions */
5651void helper_cli_vm(void)
5652{
5653 env->eflags &= ~VIF_MASK;
5654}
5655
5656void helper_sti_vm(void)
5657{
5658 env->eflags |= VIF_MASK;
5659 if (env->eflags & VIP_MASK) {
5660 raise_exception(EXCP0D_GPF);
5661 }
5662}
5663#endif
5664
5665void helper_set_inhibit_irq(void)
5666{
5667 env->hflags |= HF_INHIBIT_IRQ_MASK;
5668}
5669
5670void helper_reset_inhibit_irq(void)
5671{
5672 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5673}
5674
5675void helper_boundw(target_ulong a0, int v)
5676{
5677 int low, high;
5678 low = ldsw(a0);
5679 high = ldsw(a0 + 2);
5680 v = (int16_t)v;
5681 if (v < low || v > high) {
5682 raise_exception(EXCP05_BOUND);
5683 }
5684}
5685
5686void helper_boundl(target_ulong a0, int v)
5687{
5688 int low, high;
5689 low = ldl(a0);
5690 high = ldl(a0 + 4);
5691 if (v < low || v > high) {
5692 raise_exception(EXCP05_BOUND);
5693 }
5694}
5695
5696static float approx_rsqrt(float a)
5697{
5698 return 1.0 / sqrt(a);
5699}
5700
5701static float approx_rcp(float a)
5702{
5703 return 1.0 / a;
5704}
5705
5706#if !defined(CONFIG_USER_ONLY)
5707
5708#define MMUSUFFIX _mmu
5709
5710#define SHIFT 0
5711#include "softmmu_template.h"
5712
5713#define SHIFT 1
5714#include "softmmu_template.h"
5715
5716#define SHIFT 2
5717#include "softmmu_template.h"
5718
5719#define SHIFT 3
5720#include "softmmu_template.h"
5721
5722#endif
5723
5724#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5725/* This code assumes real physical address always fit into host CPU reg,
5726 which is wrong in general, but true for our current use cases. */
5727RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5728{
5729 return remR3PhysReadS8(addr);
5730}
5731RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5732{
5733 return remR3PhysReadU8(addr);
5734}
5735void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5736{
5737 remR3PhysWriteU8(addr, val);
5738}
5739RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5740{
5741 return remR3PhysReadS16(addr);
5742}
5743RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5744{
5745 return remR3PhysReadU16(addr);
5746}
5747void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5748{
5749 remR3PhysWriteU16(addr, val);
5750}
5751RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5752{
5753 return remR3PhysReadS32(addr);
5754}
5755RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5756{
5757 return remR3PhysReadU32(addr);
5758}
5759void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5760{
5761 remR3PhysWriteU32(addr, val);
5762}
5763uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5764{
5765 return remR3PhysReadU64(addr);
5766}
5767void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5768{
5769 remR3PhysWriteU64(addr, val);
5770}
5771#endif /* VBOX */
5772
5773#if !defined(CONFIG_USER_ONLY)
5774/* try to fill the TLB and return an exception if error. If retaddr is
5775 NULL, it means that the function was called in C code (i.e. not
5776 from generated code or from helper.c) */
5777/* XXX: fix it to restore all registers */
5778void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5779{
5780 TranslationBlock *tb;
5781 int ret;
5782 uintptr_t pc;
5783 CPUX86State *saved_env;
5784
5785 /* XXX: hack to restore env in all cases, even if not called from
5786 generated code */
5787 saved_env = env;
5788 env = cpu_single_env;
5789
5790 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5791 if (ret) {
5792 if (retaddr) {
5793 /* now we have a real cpu fault */
5794 pc = (uintptr_t)retaddr;
5795 tb = tb_find_pc(pc);
5796 if (tb) {
5797 /* the PC is inside the translated code. It means that we have
5798 a virtual CPU fault */
5799 cpu_restore_state(tb, env, pc, NULL);
5800 }
5801 }
5802 raise_exception_err(env->exception_index, env->error_code);
5803 }
5804 env = saved_env;
5805}
5806#endif
5807
5808#ifdef VBOX
5809
5810/**
5811 * Correctly computes the eflags.
5812 * @returns eflags.
5813 * @param env1 CPU environment.
5814 */
5815uint32_t raw_compute_eflags(CPUX86State *env1)
5816{
5817 CPUX86State *savedenv = env;
5818 uint32_t efl;
5819 env = env1;
5820 efl = compute_eflags();
5821 env = savedenv;
5822 return efl;
5823}
5824
5825/**
5826 * Reads byte from virtual address in guest memory area.
5827 * XXX: is it working for any addresses? swapped out pages?
5828 * @returns read data byte.
5829 * @param env1 CPU environment.
5830 * @param pvAddr GC Virtual address.
5831 */
5832uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5833{
5834 CPUX86State *savedenv = env;
5835 uint8_t u8;
5836 env = env1;
5837 u8 = ldub_kernel(addr);
5838 env = savedenv;
5839 return u8;
5840}
5841
5842/**
5843 * Reads byte from virtual address in guest memory area.
5844 * XXX: is it working for any addresses? swapped out pages?
5845 * @returns read data byte.
5846 * @param env1 CPU environment.
5847 * @param pvAddr GC Virtual address.
5848 */
5849uint16_t read_word(CPUX86State *env1, target_ulong addr)
5850{
5851 CPUX86State *savedenv = env;
5852 uint16_t u16;
5853 env = env1;
5854 u16 = lduw_kernel(addr);
5855 env = savedenv;
5856 return u16;
5857}
5858
5859/**
5860 * Reads byte from virtual address in guest memory area.
5861 * XXX: is it working for any addresses? swapped out pages?
5862 * @returns read data byte.
5863 * @param env1 CPU environment.
5864 * @param pvAddr GC Virtual address.
5865 */
5866uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5867{
5868 CPUX86State *savedenv = env;
5869 uint32_t u32;
5870 env = env1;
5871 u32 = ldl_kernel(addr);
5872 env = savedenv;
5873 return u32;
5874}
5875
5876/**
5877 * Writes byte to virtual address in guest memory area.
5878 * XXX: is it working for any addresses? swapped out pages?
5879 * @returns read data byte.
5880 * @param env1 CPU environment.
5881 * @param pvAddr GC Virtual address.
5882 * @param val byte value
5883 */
5884void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5885{
5886 CPUX86State *savedenv = env;
5887 env = env1;
5888 stb(addr, val);
5889 env = savedenv;
5890}
5891
5892void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5893{
5894 CPUX86State *savedenv = env;
5895 env = env1;
5896 stw(addr, val);
5897 env = savedenv;
5898}
5899
5900void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5901{
5902 CPUX86State *savedenv = env;
5903 env = env1;
5904 stl(addr, val);
5905 env = savedenv;
5906}
5907
5908/**
5909 * Correctly loads selector into segment register with updating internal
5910 * qemu data/caches.
5911 * @param env1 CPU environment.
5912 * @param seg_reg Segment register.
5913 * @param selector Selector to load.
5914 */
5915void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5916{
5917 CPUX86State *savedenv = env;
5918#ifdef FORCE_SEGMENT_SYNC
5919 jmp_buf old_buf;
5920#endif
5921
5922 env = env1;
5923
5924 if ( env->eflags & X86_EFL_VM
5925 || !(env->cr[0] & X86_CR0_PE))
5926 {
5927 load_seg_vm(seg_reg, selector);
5928
5929 env = savedenv;
5930
5931 /* Successful sync. */
5932 Assert(env1->segs[seg_reg].newselector == 0);
5933 }
5934 else
5935 {
5936 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5937 time critical - let's not do that */
5938#ifdef FORCE_SEGMENT_SYNC
5939 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5940#endif
5941 if (setjmp(env1->jmp_env) == 0)
5942 {
5943 if (seg_reg == R_CS)
5944 {
5945 uint32_t e1, e2;
5946 e1 = e2 = 0;
5947 load_segment(&e1, &e2, selector);
5948 cpu_x86_load_seg_cache(env, R_CS, selector,
5949 get_seg_base(e1, e2),
5950 get_seg_limit(e1, e2),
5951 e2);
5952 }
5953 else
5954 helper_load_seg(seg_reg, selector);
5955 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5956 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5957
5958 env = savedenv;
5959
5960 /* Successful sync. */
5961 Assert(env1->segs[seg_reg].newselector == 0);
5962 }
5963 else
5964 {
5965 env = savedenv;
5966
5967 /* Postpone sync until the guest uses the selector. */
5968 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5969 env1->segs[seg_reg].newselector = selector;
5970 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5971 env1->exception_index = -1;
5972 env1->error_code = 0;
5973 env1->old_exception = -1;
5974 }
5975#ifdef FORCE_SEGMENT_SYNC
5976 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5977#endif
5978 }
5979
5980}
5981
5982DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5983{
5984 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5985}
5986
5987
5988int emulate_single_instr(CPUX86State *env1)
5989{
5990 TranslationBlock *tb;
5991 TranslationBlock *current;
5992 int flags;
5993 uint8_t *tc_ptr;
5994 target_ulong old_eip;
5995
5996 /* ensures env is loaded! */
5997 CPUX86State *savedenv = env;
5998 env = env1;
5999
6000 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
6001
6002 current = env->current_tb;
6003 env->current_tb = NULL;
6004 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
6005
6006 /*
6007 * Translate only one instruction.
6008 */
6009 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
6010 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
6011 env->segs[R_CS].base, flags, 0);
6012
6013 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
6014
6015
6016 /* tb_link_phys: */
6017 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
6018 tb->jmp_next[0] = NULL;
6019 tb->jmp_next[1] = NULL;
6020 Assert(tb->jmp_next[0] == NULL);
6021 Assert(tb->jmp_next[1] == NULL);
6022 if (tb->tb_next_offset[0] != 0xffff)
6023 tb_reset_jump(tb, 0);
6024 if (tb->tb_next_offset[1] != 0xffff)
6025 tb_reset_jump(tb, 1);
6026
6027 /*
6028 * Execute it using emulation
6029 */
6030 old_eip = env->eip;
6031 env->current_tb = tb;
6032
6033 /*
6034 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
6035 * perhaps not a very safe hack
6036 */
6037 while (old_eip == env->eip)
6038 {
6039 tc_ptr = tb->tc_ptr;
6040
6041#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
6042 int fake_ret;
6043 tcg_qemu_tb_exec(tc_ptr, fake_ret);
6044#else
6045 tcg_qemu_tb_exec(tc_ptr);
6046#endif
6047
6048 /*
6049 * Exit once we detect an external interrupt and interrupts are enabled
6050 */
6051 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
6052 || ( (env->eflags & IF_MASK)
6053 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
6054 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
6055 )
6056 {
6057 break;
6058 }
6059 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
6060 tlb_flush(env, true);
6061 }
6062 }
6063 env->current_tb = current;
6064
6065 tb_phys_invalidate(tb, -1);
6066 tb_free(tb);
6067/*
6068 Assert(tb->tb_next_offset[0] == 0xffff);
6069 Assert(tb->tb_next_offset[1] == 0xffff);
6070 Assert(tb->tb_next[0] == 0xffff);
6071 Assert(tb->tb_next[1] == 0xffff);
6072 Assert(tb->jmp_next[0] == NULL);
6073 Assert(tb->jmp_next[1] == NULL);
6074 Assert(tb->jmp_first == NULL); */
6075
6076 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
6077
6078 /*
6079 * Execute the next instruction when we encounter instruction fusing.
6080 */
6081 if (env->hflags & HF_INHIBIT_IRQ_MASK)
6082 {
6083 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
6084 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6085 emulate_single_instr(env);
6086 }
6087
6088 env = savedenv;
6089 return 0;
6090}
6091
6092/**
6093 * Correctly loads a new ldtr selector.
6094 *
6095 * @param env1 CPU environment.
6096 * @param selector Selector to load.
6097 */
6098void sync_ldtr(CPUX86State *env1, int selector)
6099{
6100 CPUX86State *saved_env = env;
6101 if (setjmp(env1->jmp_env) == 0)
6102 {
6103 env = env1;
6104 helper_lldt(selector);
6105 env = saved_env;
6106 }
6107 else
6108 {
6109 env = saved_env;
6110#ifdef VBOX_STRICT
6111 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6112#endif
6113 }
6114}
6115
6116int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6117 uint32_t *esp_ptr, int dpl)
6118{
6119 int type, index, shift;
6120
6121 CPUX86State *savedenv = env;
6122 env = env1;
6123
6124 if (!(env->tr.flags & DESC_P_MASK))
6125 cpu_abort(env, "invalid tss");
6126 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6127 if ((type & 7) != 3)
6128 cpu_abort(env, "invalid tss type %d", type);
6129 shift = type >> 3;
6130 index = (dpl * 4 + 2) << shift;
6131 if (index + (4 << shift) - 1 > env->tr.limit)
6132 {
6133 env = savedenv;
6134 return 0;
6135 }
6136 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6137
6138 if (shift == 0) {
6139 *esp_ptr = lduw_kernel(env->tr.base + index);
6140 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6141 } else {
6142 *esp_ptr = ldl_kernel(env->tr.base + index);
6143 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6144 }
6145
6146 env = savedenv;
6147 return 1;
6148}
6149
6150//*****************************************************************************
6151// Needs to be at the bottom of the file (overriding macros)
6152
6153static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6154{
6155#ifdef USE_X86LDOUBLE
6156 CPU86_LDoubleU tmp;
6157 tmp.l.lower = *(uint64_t const *)ptr;
6158 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6159 return tmp.d;
6160#else
6161# error "Busted FPU saving/restoring!"
6162 return *(CPU86_LDouble *)ptr;
6163#endif
6164}
6165
6166static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6167{
6168#ifdef USE_X86LDOUBLE
6169 CPU86_LDoubleU tmp;
6170 tmp.d = f;
6171 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6172 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6173 *(uint16_t *)(ptr + 10) = 0;
6174 *(uint32_t *)(ptr + 12) = 0;
6175 AssertCompile(sizeof(long double) > 8);
6176#else
6177# error "Busted FPU saving/restoring!"
6178 *(CPU86_LDouble *)ptr = f;
6179#endif
6180}
6181
6182#undef stw
6183#undef stl
6184#undef stq
6185#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6186#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6187#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6188
6189//*****************************************************************************
6190void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6191{
6192 int fpus, fptag, i, nb_xmm_regs;
6193 CPU86_LDouble tmp;
6194 uint8_t *addr;
6195 int data64 = !!(env->hflags & HF_LMA_MASK);
6196
6197 if (env->cpuid_features & CPUID_FXSR)
6198 {
6199 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6200 fptag = 0;
6201 for(i = 0; i < 8; i++) {
6202 fptag |= (env->fptags[i] << i);
6203 }
6204 stw(ptr, env->fpuc);
6205 stw(ptr + 2, fpus);
6206 stw(ptr + 4, fptag ^ 0xff);
6207
6208 addr = ptr + 0x20;
6209 for(i = 0;i < 8; i++) {
6210 tmp = ST(i);
6211 helper_fstt_raw(tmp, addr);
6212 addr += 16;
6213 }
6214
6215 if (env->cr[4] & CR4_OSFXSR_MASK) {
6216 /* XXX: finish it */
6217 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6218 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6219 nb_xmm_regs = 8 << data64;
6220 addr = ptr + 0xa0;
6221 for(i = 0; i < nb_xmm_regs; i++) {
6222#if __GNUC__ < 4
6223 stq(addr, env->xmm_regs[i].XMM_Q(0));
6224 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6225#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6226 stl(addr, env->xmm_regs[i].XMM_L(0));
6227 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6228 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6229 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6230#endif
6231 addr += 16;
6232 }
6233 }
6234 }
6235 else
6236 {
6237 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6238 int fptag;
6239
6240 fp->FCW = env->fpuc;
6241 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6242 fptag = 0;
6243 for (i=7; i>=0; i--) {
6244 fptag <<= 2;
6245 if (env->fptags[i]) {
6246 fptag |= 3;
6247 } else {
6248 /* the FPU automatically computes it */
6249 }
6250 }
6251 fp->FTW = fptag;
6252
6253 for(i = 0;i < 8; i++) {
6254 tmp = ST(i);
6255 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6256 }
6257 }
6258}
6259
6260//*****************************************************************************
6261#undef lduw
6262#undef ldl
6263#undef ldq
6264#define lduw(a) *(uint16_t *)(a)
6265#define ldl(a) *(uint32_t *)(a)
6266#define ldq(a) *(uint64_t *)(a)
6267//*****************************************************************************
6268void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6269{
6270 int i, fpus, fptag, nb_xmm_regs;
6271 CPU86_LDouble tmp;
6272 uint8_t *addr;
6273 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6274
6275 if (env->cpuid_features & CPUID_FXSR)
6276 {
6277 env->fpuc = lduw(ptr);
6278 fpus = lduw(ptr + 2);
6279 fptag = lduw(ptr + 4);
6280 env->fpstt = (fpus >> 11) & 7;
6281 env->fpus = fpus & ~0x3800;
6282 fptag ^= 0xff;
6283 for(i = 0;i < 8; i++) {
6284 env->fptags[i] = ((fptag >> i) & 1);
6285 }
6286
6287 addr = ptr + 0x20;
6288 for(i = 0;i < 8; i++) {
6289 tmp = helper_fldt_raw(addr);
6290 ST(i) = tmp;
6291 addr += 16;
6292 }
6293
6294 if (env->cr[4] & CR4_OSFXSR_MASK) {
6295 /* XXX: finish it, endianness */
6296 env->mxcsr = ldl(ptr + 0x18);
6297 //ldl(ptr + 0x1c);
6298 nb_xmm_regs = 8 << data64;
6299 addr = ptr + 0xa0;
6300 for(i = 0; i < nb_xmm_regs; i++) {
6301#if HC_ARCH_BITS == 32
6302 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6303 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6304 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6305 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6306 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6307#else
6308 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6309 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6310#endif
6311 addr += 16;
6312 }
6313 }
6314 }
6315 else
6316 {
6317 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6318 int fptag, j;
6319
6320 env->fpuc = fp->FCW;
6321 env->fpstt = (fp->FSW >> 11) & 7;
6322 env->fpus = fp->FSW & ~0x3800;
6323 fptag = fp->FTW;
6324 for(i = 0;i < 8; i++) {
6325 env->fptags[i] = ((fptag & 3) == 3);
6326 fptag >>= 2;
6327 }
6328 j = env->fpstt;
6329 for(i = 0;i < 8; i++) {
6330 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6331 ST(i) = tmp;
6332 }
6333 }
6334}
6335//*****************************************************************************
6336//*****************************************************************************
6337
6338#endif /* VBOX */
6339
6340/* Secure Virtual Machine helpers */
6341
6342#if defined(CONFIG_USER_ONLY)
6343
6344void helper_vmrun(int aflag, int next_eip_addend)
6345{
6346}
6347void helper_vmmcall(void)
6348{
6349}
6350void helper_vmload(int aflag)
6351{
6352}
6353void helper_vmsave(int aflag)
6354{
6355}
6356void helper_stgi(void)
6357{
6358}
6359void helper_clgi(void)
6360{
6361}
6362void helper_skinit(void)
6363{
6364}
6365void helper_invlpga(int aflag)
6366{
6367}
6368void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6369{
6370}
6371void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6372{
6373}
6374
6375void helper_svm_check_io(uint32_t port, uint32_t param,
6376 uint32_t next_eip_addend)
6377{
6378}
6379#else
6380
6381static inline void svm_save_seg(target_phys_addr_t addr,
6382 const SegmentCache *sc)
6383{
6384 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6385 sc->selector);
6386 stq_phys(addr + offsetof(struct vmcb_seg, base),
6387 sc->base);
6388 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6389 sc->limit);
6390 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6391 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6392}
6393
6394static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6395{
6396 unsigned int flags;
6397
6398 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6399 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6400 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6401 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6402 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6403}
6404
6405static inline void svm_load_seg_cache(target_phys_addr_t addr,
6406 CPUState *env, int seg_reg)
6407{
6408 SegmentCache sc1, *sc = &sc1;
6409 svm_load_seg(addr, sc);
6410 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6411 sc->base, sc->limit, sc->flags);
6412}
6413
6414void helper_vmrun(int aflag, int next_eip_addend)
6415{
6416 target_ulong addr;
6417 uint32_t event_inj;
6418 uint32_t int_ctl;
6419
6420 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6421
6422 if (aflag == 2)
6423 addr = EAX;
6424 else
6425 addr = (uint32_t)EAX;
6426
6427 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6428
6429 env->vm_vmcb = addr;
6430
6431 /* save the current CPU state in the hsave page */
6432 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6433 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6434
6435 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6436 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6437
6438 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6439 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6440 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6441 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6442 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6443 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6444
6445 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6446 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6447
6448 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6449 &env->segs[R_ES]);
6450 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6451 &env->segs[R_CS]);
6452 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6453 &env->segs[R_SS]);
6454 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6455 &env->segs[R_DS]);
6456
6457 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6458 EIP + next_eip_addend);
6459 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6460 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6461
6462 /* load the interception bitmaps so we do not need to access the
6463 vmcb in svm mode */
6464 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6465 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6466 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6467 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6468 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6469 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6470
6471 /* enable intercepts */
6472 env->hflags |= HF_SVMI_MASK;
6473
6474 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6475
6476 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6477 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6478
6479 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6480 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6481
6482 /* clear exit_info_2 so we behave like the real hardware */
6483 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6484
6485 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6486 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6487 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6488 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6489 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6490 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6491 if (int_ctl & V_INTR_MASKING_MASK) {
6492 env->v_tpr = int_ctl & V_TPR_MASK;
6493 env->hflags2 |= HF2_VINTR_MASK;
6494 if (env->eflags & IF_MASK)
6495 env->hflags2 |= HF2_HIF_MASK;
6496 }
6497
6498 cpu_load_efer(env,
6499 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6500 env->eflags = 0;
6501 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6502 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6503 CC_OP = CC_OP_EFLAGS;
6504
6505 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6506 env, R_ES);
6507 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6508 env, R_CS);
6509 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6510 env, R_SS);
6511 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6512 env, R_DS);
6513
6514 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6515 env->eip = EIP;
6516 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6517 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6518 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6519 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6520 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6521
6522 /* FIXME: guest state consistency checks */
6523
6524 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6525 case TLB_CONTROL_DO_NOTHING:
6526 break;
6527 case TLB_CONTROL_FLUSH_ALL_ASID:
6528 /* FIXME: this is not 100% correct but should work for now */
6529 tlb_flush(env, 1);
6530 break;
6531 }
6532
6533 env->hflags2 |= HF2_GIF_MASK;
6534
6535 if (int_ctl & V_IRQ_MASK) {
6536 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6537 }
6538
6539 /* maybe we need to inject an event */
6540 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6541 if (event_inj & SVM_EVTINJ_VALID) {
6542 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6543 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6544 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6545
6546 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6547 /* FIXME: need to implement valid_err */
6548 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6549 case SVM_EVTINJ_TYPE_INTR:
6550 env->exception_index = vector;
6551 env->error_code = event_inj_err;
6552 env->exception_is_int = 0;
6553 env->exception_next_eip = -1;
6554 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6555 /* XXX: is it always correct ? */
6556 do_interrupt(vector, 0, 0, 0, 1);
6557 break;
6558 case SVM_EVTINJ_TYPE_NMI:
6559 env->exception_index = EXCP02_NMI;
6560 env->error_code = event_inj_err;
6561 env->exception_is_int = 0;
6562 env->exception_next_eip = EIP;
6563 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6564 cpu_loop_exit();
6565 break;
6566 case SVM_EVTINJ_TYPE_EXEPT:
6567 env->exception_index = vector;
6568 env->error_code = event_inj_err;
6569 env->exception_is_int = 0;
6570 env->exception_next_eip = -1;
6571 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6572 cpu_loop_exit();
6573 break;
6574 case SVM_EVTINJ_TYPE_SOFT:
6575 env->exception_index = vector;
6576 env->error_code = event_inj_err;
6577 env->exception_is_int = 1;
6578 env->exception_next_eip = EIP;
6579 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6580 cpu_loop_exit();
6581 break;
6582 }
6583 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6584 }
6585}
6586
6587void helper_vmmcall(void)
6588{
6589 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6590 raise_exception(EXCP06_ILLOP);
6591}
6592
6593void helper_vmload(int aflag)
6594{
6595 target_ulong addr;
6596 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6597
6598 if (aflag == 2)
6599 addr = EAX;
6600 else
6601 addr = (uint32_t)EAX;
6602
6603 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6604 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6605 env->segs[R_FS].base);
6606
6607 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6608 env, R_FS);
6609 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6610 env, R_GS);
6611 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6612 &env->tr);
6613 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6614 &env->ldt);
6615
6616#ifdef TARGET_X86_64
6617 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6618 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6619 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6620 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6621#endif
6622 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6623 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6624 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6625 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6626}
6627
6628void helper_vmsave(int aflag)
6629{
6630 target_ulong addr;
6631 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6632
6633 if (aflag == 2)
6634 addr = EAX;
6635 else
6636 addr = (uint32_t)EAX;
6637
6638 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6639 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6640 env->segs[R_FS].base);
6641
6642 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6643 &env->segs[R_FS]);
6644 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6645 &env->segs[R_GS]);
6646 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6647 &env->tr);
6648 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6649 &env->ldt);
6650
6651#ifdef TARGET_X86_64
6652 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6653 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6654 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6655 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6656#endif
6657 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6658 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6659 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6660 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6661}
6662
6663void helper_stgi(void)
6664{
6665 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6666 env->hflags2 |= HF2_GIF_MASK;
6667}
6668
6669void helper_clgi(void)
6670{
6671 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6672 env->hflags2 &= ~HF2_GIF_MASK;
6673}
6674
6675void helper_skinit(void)
6676{
6677 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6678 /* XXX: not implemented */
6679 raise_exception(EXCP06_ILLOP);
6680}
6681
6682void helper_invlpga(int aflag)
6683{
6684 target_ulong addr;
6685 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6686
6687 if (aflag == 2)
6688 addr = EAX;
6689 else
6690 addr = (uint32_t)EAX;
6691
6692 /* XXX: could use the ASID to see if it is needed to do the
6693 flush */
6694 tlb_flush_page(env, addr);
6695}
6696
6697void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6698{
6699 if (likely(!(env->hflags & HF_SVMI_MASK)))
6700 return;
6701#ifndef VBOX
6702 switch(type) {
6703 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6704 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6705 helper_vmexit(type, param);
6706 }
6707 break;
6708 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6709 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6710 helper_vmexit(type, param);
6711 }
6712 break;
6713 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6714 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6715 helper_vmexit(type, param);
6716 }
6717 break;
6718 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6719 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6720 helper_vmexit(type, param);
6721 }
6722 break;
6723 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6724 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6725 helper_vmexit(type, param);
6726 }
6727 break;
6728 case SVM_EXIT_MSR:
6729 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6730 /* FIXME: this should be read in at vmrun (faster this way?) */
6731 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6732 uint32_t t0, t1;
6733 switch((uint32_t)ECX) {
6734 case 0 ... 0x1fff:
6735 t0 = (ECX * 2) % 8;
6736 t1 = ECX / 8;
6737 break;
6738 case 0xc0000000 ... 0xc0001fff:
6739 t0 = (8192 + ECX - 0xc0000000) * 2;
6740 t1 = (t0 / 8);
6741 t0 %= 8;
6742 break;
6743 case 0xc0010000 ... 0xc0011fff:
6744 t0 = (16384 + ECX - 0xc0010000) * 2;
6745 t1 = (t0 / 8);
6746 t0 %= 8;
6747 break;
6748 default:
6749 helper_vmexit(type, param);
6750 t0 = 0;
6751 t1 = 0;
6752 break;
6753 }
6754 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6755 helper_vmexit(type, param);
6756 }
6757 break;
6758 default:
6759 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6760 helper_vmexit(type, param);
6761 }
6762 break;
6763 }
6764#else /* VBOX */
6765 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6766#endif /* VBOX */
6767}
6768
6769void helper_svm_check_io(uint32_t port, uint32_t param,
6770 uint32_t next_eip_addend)
6771{
6772 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6773 /* FIXME: this should be read in at vmrun (faster this way?) */
6774 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6775 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6776 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6777 /* next EIP */
6778 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6779 env->eip + next_eip_addend);
6780 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6781 }
6782 }
6783}
6784
6785/* Note: currently only 32 bits of exit_code are used */
6786void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6787{
6788 uint32_t int_ctl;
6789
6790 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6791 exit_code, exit_info_1,
6792 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6793 EIP);
6794
6795 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6796 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6797 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6798 } else {
6799 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6800 }
6801
6802 /* Save the VM state in the vmcb */
6803 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6804 &env->segs[R_ES]);
6805 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6806 &env->segs[R_CS]);
6807 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6808 &env->segs[R_SS]);
6809 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6810 &env->segs[R_DS]);
6811
6812 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6813 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6814
6815 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6816 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6817
6818 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6819 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6820 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6821 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6822 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6823
6824 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6825 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6826 int_ctl |= env->v_tpr & V_TPR_MASK;
6827 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6828 int_ctl |= V_IRQ_MASK;
6829 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6830
6831 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6832 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6833 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6834 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6835 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6836 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6837 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6838
6839 /* Reload the host state from vm_hsave */
6840 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6841 env->hflags &= ~HF_SVMI_MASK;
6842 env->intercept = 0;
6843 env->intercept_exceptions = 0;
6844 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6845 env->tsc_offset = 0;
6846
6847 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6848 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6849
6850 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6851 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6852
6853 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6854 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6855 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6856 /* we need to set the efer after the crs so the hidden flags get
6857 set properly */
6858 cpu_load_efer(env,
6859 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6860 env->eflags = 0;
6861 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6862 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6863 CC_OP = CC_OP_EFLAGS;
6864
6865 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6866 env, R_ES);
6867 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6868 env, R_CS);
6869 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6870 env, R_SS);
6871 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6872 env, R_DS);
6873
6874 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6875 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6876 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6877
6878 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6879 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6880
6881 /* other setups */
6882 cpu_x86_set_cpl(env, 0);
6883 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6884 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6885
6886 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6887 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6888 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6889 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6890 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6891
6892 env->hflags2 &= ~HF2_GIF_MASK;
6893 /* FIXME: Resets the current ASID register to zero (host ASID). */
6894
6895 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6896
6897 /* Clears the TSC_OFFSET inside the processor. */
6898
6899 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6900 from the page table indicated the host's CR3. If the PDPEs contain
6901 illegal state, the processor causes a shutdown. */
6902
6903 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6904 env->cr[0] |= CR0_PE_MASK;
6905 env->eflags &= ~VM_MASK;
6906
6907 /* Disables all breakpoints in the host DR7 register. */
6908
6909 /* Checks the reloaded host state for consistency. */
6910
6911 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6912 host's code segment or non-canonical (in the case of long mode), a
6913 #GP fault is delivered inside the host.) */
6914
6915 /* remove any pending exception */
6916 env->exception_index = -1;
6917 env->error_code = 0;
6918 env->old_exception = -1;
6919
6920 cpu_loop_exit();
6921}
6922
6923#endif
6924
6925/* MMX/SSE */
6926/* XXX: optimize by storing fptt and fptags in the static cpu state */
6927void helper_enter_mmx(void)
6928{
6929 env->fpstt = 0;
6930 *(uint32_t *)(env->fptags) = 0;
6931 *(uint32_t *)(env->fptags + 4) = 0;
6932}
6933
6934void helper_emms(void)
6935{
6936 /* set to empty state */
6937 *(uint32_t *)(env->fptags) = 0x01010101;
6938 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6939}
6940
6941/* XXX: suppress */
6942void helper_movq(void *d, void *s)
6943{
6944 *(uint64_t *)d = *(uint64_t *)s;
6945}
6946
6947#define SHIFT 0
6948#include "ops_sse.h"
6949
6950#define SHIFT 1
6951#include "ops_sse.h"
6952
6953#define SHIFT 0
6954#include "helper_template.h"
6955#undef SHIFT
6956
6957#define SHIFT 1
6958#include "helper_template.h"
6959#undef SHIFT
6960
6961#define SHIFT 2
6962#include "helper_template.h"
6963#undef SHIFT
6964
6965#ifdef TARGET_X86_64
6966
6967#define SHIFT 3
6968#include "helper_template.h"
6969#undef SHIFT
6970
6971#endif
6972
6973/* bit operations */
6974target_ulong helper_bsf(target_ulong t0)
6975{
6976 int count;
6977 target_ulong res;
6978
6979 res = t0;
6980 count = 0;
6981 while ((res & 1) == 0) {
6982 count++;
6983 res >>= 1;
6984 }
6985 return count;
6986}
6987
6988target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6989{
6990 int count;
6991 target_ulong res, mask;
6992
6993 if (wordsize > 0 && t0 == 0) {
6994 return wordsize;
6995 }
6996 res = t0;
6997 count = TARGET_LONG_BITS - 1;
6998 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6999 while ((res & mask) == 0) {
7000 count--;
7001 res <<= 1;
7002 }
7003 if (wordsize > 0) {
7004 return wordsize - 1 - count;
7005 }
7006 return count;
7007}
7008
7009target_ulong helper_bsr(target_ulong t0)
7010{
7011 return helper_lzcnt(t0, 0);
7012}
7013
7014static int compute_all_eflags(void)
7015{
7016 return CC_SRC;
7017}
7018
7019static int compute_c_eflags(void)
7020{
7021 return CC_SRC & CC_C;
7022}
7023
7024uint32_t helper_cc_compute_all(int op)
7025{
7026 switch (op) {
7027 default: /* should never happen */ return 0;
7028
7029 case CC_OP_EFLAGS: return compute_all_eflags();
7030
7031 case CC_OP_MULB: return compute_all_mulb();
7032 case CC_OP_MULW: return compute_all_mulw();
7033 case CC_OP_MULL: return compute_all_mull();
7034
7035 case CC_OP_ADDB: return compute_all_addb();
7036 case CC_OP_ADDW: return compute_all_addw();
7037 case CC_OP_ADDL: return compute_all_addl();
7038
7039 case CC_OP_ADCB: return compute_all_adcb();
7040 case CC_OP_ADCW: return compute_all_adcw();
7041 case CC_OP_ADCL: return compute_all_adcl();
7042
7043 case CC_OP_SUBB: return compute_all_subb();
7044 case CC_OP_SUBW: return compute_all_subw();
7045 case CC_OP_SUBL: return compute_all_subl();
7046
7047 case CC_OP_SBBB: return compute_all_sbbb();
7048 case CC_OP_SBBW: return compute_all_sbbw();
7049 case CC_OP_SBBL: return compute_all_sbbl();
7050
7051 case CC_OP_LOGICB: return compute_all_logicb();
7052 case CC_OP_LOGICW: return compute_all_logicw();
7053 case CC_OP_LOGICL: return compute_all_logicl();
7054
7055 case CC_OP_INCB: return compute_all_incb();
7056 case CC_OP_INCW: return compute_all_incw();
7057 case CC_OP_INCL: return compute_all_incl();
7058
7059 case CC_OP_DECB: return compute_all_decb();
7060 case CC_OP_DECW: return compute_all_decw();
7061 case CC_OP_DECL: return compute_all_decl();
7062
7063 case CC_OP_SHLB: return compute_all_shlb();
7064 case CC_OP_SHLW: return compute_all_shlw();
7065 case CC_OP_SHLL: return compute_all_shll();
7066
7067 case CC_OP_SARB: return compute_all_sarb();
7068 case CC_OP_SARW: return compute_all_sarw();
7069 case CC_OP_SARL: return compute_all_sarl();
7070
7071#ifdef TARGET_X86_64
7072 case CC_OP_MULQ: return compute_all_mulq();
7073
7074 case CC_OP_ADDQ: return compute_all_addq();
7075
7076 case CC_OP_ADCQ: return compute_all_adcq();
7077
7078 case CC_OP_SUBQ: return compute_all_subq();
7079
7080 case CC_OP_SBBQ: return compute_all_sbbq();
7081
7082 case CC_OP_LOGICQ: return compute_all_logicq();
7083
7084 case CC_OP_INCQ: return compute_all_incq();
7085
7086 case CC_OP_DECQ: return compute_all_decq();
7087
7088 case CC_OP_SHLQ: return compute_all_shlq();
7089
7090 case CC_OP_SARQ: return compute_all_sarq();
7091#endif
7092 }
7093}
7094
7095uint32_t helper_cc_compute_c(int op)
7096{
7097 switch (op) {
7098 default: /* should never happen */ return 0;
7099
7100 case CC_OP_EFLAGS: return compute_c_eflags();
7101
7102 case CC_OP_MULB: return compute_c_mull();
7103 case CC_OP_MULW: return compute_c_mull();
7104 case CC_OP_MULL: return compute_c_mull();
7105
7106 case CC_OP_ADDB: return compute_c_addb();
7107 case CC_OP_ADDW: return compute_c_addw();
7108 case CC_OP_ADDL: return compute_c_addl();
7109
7110 case CC_OP_ADCB: return compute_c_adcb();
7111 case CC_OP_ADCW: return compute_c_adcw();
7112 case CC_OP_ADCL: return compute_c_adcl();
7113
7114 case CC_OP_SUBB: return compute_c_subb();
7115 case CC_OP_SUBW: return compute_c_subw();
7116 case CC_OP_SUBL: return compute_c_subl();
7117
7118 case CC_OP_SBBB: return compute_c_sbbb();
7119 case CC_OP_SBBW: return compute_c_sbbw();
7120 case CC_OP_SBBL: return compute_c_sbbl();
7121
7122 case CC_OP_LOGICB: return compute_c_logicb();
7123 case CC_OP_LOGICW: return compute_c_logicw();
7124 case CC_OP_LOGICL: return compute_c_logicl();
7125
7126 case CC_OP_INCB: return compute_c_incl();
7127 case CC_OP_INCW: return compute_c_incl();
7128 case CC_OP_INCL: return compute_c_incl();
7129
7130 case CC_OP_DECB: return compute_c_incl();
7131 case CC_OP_DECW: return compute_c_incl();
7132 case CC_OP_DECL: return compute_c_incl();
7133
7134 case CC_OP_SHLB: return compute_c_shlb();
7135 case CC_OP_SHLW: return compute_c_shlw();
7136 case CC_OP_SHLL: return compute_c_shll();
7137
7138 case CC_OP_SARB: return compute_c_sarl();
7139 case CC_OP_SARW: return compute_c_sarl();
7140 case CC_OP_SARL: return compute_c_sarl();
7141
7142#ifdef TARGET_X86_64
7143 case CC_OP_MULQ: return compute_c_mull();
7144
7145 case CC_OP_ADDQ: return compute_c_addq();
7146
7147 case CC_OP_ADCQ: return compute_c_adcq();
7148
7149 case CC_OP_SUBQ: return compute_c_subq();
7150
7151 case CC_OP_SBBQ: return compute_c_sbbq();
7152
7153 case CC_OP_LOGICQ: return compute_c_logicq();
7154
7155 case CC_OP_INCQ: return compute_c_incl();
7156
7157 case CC_OP_DECQ: return compute_c_incl();
7158
7159 case CC_OP_SHLQ: return compute_c_shlq();
7160
7161 case CC_OP_SARQ: return compute_c_sarl();
7162#endif
7163 }
7164}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette