VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 47713

Last change on this file since 47713 was 47709, checked in by vboxsync, 12 years ago

REM: Attempt at better selector attribute handling, to avoid further weird bits turning up.

  • Property svn:eol-style set to native
File size: 200.7 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232#ifdef VBOX
233 /* Trying to load a selector with CPL=1? */
234 /** @todo this is a hack to correct the incorrect checking order for pending interrupts in the patm iret replacement code (corrected in the ring-1 version) */
235 /** @todo in theory the iret could fault and we'd still need this. */
236 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0) && !EMIsRawRing1Enabled(env->pVM))
237 {
238 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
239 selector = selector & 0xfffc;
240 }
241#endif /* VBOX */
242
243 if (selector & 0x4)
244 dt = &env->ldt;
245 else
246 dt = &env->gdt;
247 index = selector & ~7;
248 if ((index + 7) > dt->limit)
249 return -1;
250 ptr = dt->base + index;
251 *e1_ptr = ldl_kernel(ptr);
252 *e2_ptr = ldl_kernel(ptr + 4);
253 return 0;
254}
255
256static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257{
258 unsigned int limit;
259 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
260 if (e2 & DESC_G_MASK)
261 limit = (limit << 12) | 0xfff;
262 return limit;
263}
264
265static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266{
267 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
268}
269
270static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271{
272 sc->base = get_seg_base(e1, e2);
273 sc->limit = get_seg_limit(e1, e2);
274#ifndef VBOX
275 sc->flags = e2;
276#else
277 sc->flags = e2 & DESC_RAW_FLAG_BITS;
278 sc->newselector = 0;
279 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
280#endif
281}
282
283/* init the segment cache in vm86 mode. */
284static inline void load_seg_vm(int seg, int selector)
285{
286 selector &= 0xffff;
287#ifdef VBOX
288 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
289 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
290 flags |= (3 << DESC_DPL_SHIFT);
291
292 cpu_x86_load_seg_cache(env, seg, selector,
293 (selector << 4), 0xffff, flags);
294#else /* VBOX */
295 cpu_x86_load_seg_cache(env, seg, selector,
296 (selector << 4), 0xffff, 0);
297#endif /* VBOX */
298}
299
300static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
301 uint32_t *esp_ptr, int dpl)
302{
303#ifndef VBOX
304 int type, index, shift;
305#else
306 unsigned int type, index, shift;
307#endif
308
309#if 0
310 {
311 int i;
312 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
313 for(i=0;i<env->tr.limit;i++) {
314 printf("%02x ", env->tr.base[i]);
315 if ((i & 7) == 7) printf("\n");
316 }
317 printf("\n");
318 }
319#endif
320
321 if (!(env->tr.flags & DESC_P_MASK))
322 cpu_abort(env, "invalid tss");
323 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
324 if ((type & 7) != 1)
325 cpu_abort(env, "invalid tss type");
326 shift = type >> 3;
327 index = (dpl * 4 + 2) << shift;
328 if (index + (4 << shift) - 1 > env->tr.limit)
329 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
330 if (shift == 0) {
331 *esp_ptr = lduw_kernel(env->tr.base + index);
332 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
333 } else {
334 *esp_ptr = ldl_kernel(env->tr.base + index);
335 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
336 }
337}
338
339/* XXX: merge with load_seg() */
340static void tss_load_seg(int seg_reg, int selector)
341{
342 uint32_t e1, e2;
343 int rpl, dpl, cpl;
344
345#ifdef VBOX
346 e1 = e2 = 0; /* gcc warning? */
347 cpl = env->hflags & HF_CPL_MASK;
348 /* Trying to load a selector with CPL=1? */
349 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
350 {
351 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
352 selector = selector & 0xfffc;
353 }
354#endif /* VBOX */
355
356 if ((selector & 0xfffc) != 0) {
357 if (load_segment(&e1, &e2, selector) != 0)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if (!(e2 & DESC_S_MASK))
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 rpl = selector & 3;
362 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
363 cpl = env->hflags & HF_CPL_MASK;
364 if (seg_reg == R_CS) {
365 if (!(e2 & DESC_CS_MASK))
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 /* XXX: is it correct ? */
368 if (dpl != rpl)
369 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
370 if ((e2 & DESC_C_MASK) && dpl > rpl)
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372 } else if (seg_reg == R_SS) {
373 /* SS must be writable data */
374 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376 if (dpl != cpl || dpl != rpl)
377 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
378 } else {
379 /* not readable code */
380 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
381 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
382 /* if data or non conforming code, checks the rights */
383 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
384 if (dpl < cpl || dpl < rpl)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386 }
387 }
388 if (!(e2 & DESC_P_MASK))
389 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
390 cpu_x86_load_seg_cache(env, seg_reg, selector,
391 get_seg_base(e1, e2),
392 get_seg_limit(e1, e2),
393 e2);
394 } else {
395 if (seg_reg == R_SS || seg_reg == R_CS)
396 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
397#ifdef VBOX
398# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
399 cpu_x86_load_seg_cache(env, seg_reg, selector,
400 0, 0, 0);
401# endif
402#endif /* VBOX */
403 }
404}
405
406#define SWITCH_TSS_JMP 0
407#define SWITCH_TSS_IRET 1
408#define SWITCH_TSS_CALL 2
409
410/* XXX: restore CPU state in registers (PowerPC case) */
411static void switch_tss(int tss_selector,
412 uint32_t e1, uint32_t e2, int source,
413 uint32_t next_eip)
414{
415 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
416 target_ulong tss_base;
417 uint32_t new_regs[8], new_segs[6];
418 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
419 uint32_t old_eflags, eflags_mask;
420 SegmentCache *dt;
421#ifndef VBOX
422 int index;
423#else
424 unsigned int index;
425#endif
426 target_ulong ptr;
427
428 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
429 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
430
431 /* if task gate, we read the TSS segment and we load it */
432 if (type == 5) {
433 if (!(e2 & DESC_P_MASK))
434 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
435 tss_selector = e1 >> 16;
436 if (tss_selector & 4)
437 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
438 if (load_segment(&e1, &e2, tss_selector) != 0)
439 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
440 if (e2 & DESC_S_MASK)
441 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
442 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
443 if ((type & 7) != 1)
444 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
445 }
446
447 if (!(e2 & DESC_P_MASK))
448 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
449
450 if (type & 8)
451 tss_limit_max = 103;
452 else
453 tss_limit_max = 43;
454 tss_limit = get_seg_limit(e1, e2);
455 tss_base = get_seg_base(e1, e2);
456 if ((tss_selector & 4) != 0 ||
457 tss_limit < tss_limit_max)
458 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
459 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
460 if (old_type & 8)
461 old_tss_limit_max = 103;
462 else
463 old_tss_limit_max = 43;
464
465 /* read all the registers from the new TSS */
466 if (type & 8) {
467 /* 32 bit */
468 new_cr3 = ldl_kernel(tss_base + 0x1c);
469 new_eip = ldl_kernel(tss_base + 0x20);
470 new_eflags = ldl_kernel(tss_base + 0x24);
471 for(i = 0; i < 8; i++)
472 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
473 for(i = 0; i < 6; i++)
474 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
475 new_ldt = lduw_kernel(tss_base + 0x60);
476 new_trap = ldl_kernel(tss_base + 0x64);
477 } else {
478 /* 16 bit */
479 new_cr3 = 0;
480 new_eip = lduw_kernel(tss_base + 0x0e);
481 new_eflags = lduw_kernel(tss_base + 0x10);
482 for(i = 0; i < 8; i++)
483 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
484 for(i = 0; i < 4; i++)
485 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
486 new_ldt = lduw_kernel(tss_base + 0x2a);
487 new_segs[R_FS] = 0;
488 new_segs[R_GS] = 0;
489 new_trap = 0;
490 }
491
492 /* NOTE: we must avoid memory exceptions during the task switch,
493 so we make dummy accesses before */
494 /* XXX: it can still fail in some cases, so a bigger hack is
495 necessary to valid the TLB after having done the accesses */
496
497 v1 = ldub_kernel(env->tr.base);
498 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
499 stb_kernel(env->tr.base, v1);
500 stb_kernel(env->tr.base + old_tss_limit_max, v2);
501
502 /* clear busy bit (it is restartable) */
503 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
504 target_ulong ptr;
505 uint32_t e2;
506 ptr = env->gdt.base + (env->tr.selector & ~7);
507 e2 = ldl_kernel(ptr + 4);
508 e2 &= ~DESC_TSS_BUSY_MASK;
509 stl_kernel(ptr + 4, e2);
510 }
511 old_eflags = compute_eflags();
512 if (source == SWITCH_TSS_IRET)
513 old_eflags &= ~NT_MASK;
514
515 /* save the current state in the old TSS */
516 if (type & 8) {
517 /* 32 bit */
518 stl_kernel(env->tr.base + 0x20, next_eip);
519 stl_kernel(env->tr.base + 0x24, old_eflags);
520 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
521 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
522 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
523 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
524 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
525 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
526 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
527 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
528 for(i = 0; i < 6; i++)
529 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
530#ifdef VBOX
531 /* Must store the ldt as it gets reloaded and might have been changed. */
532 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
533#endif
534#if defined(VBOX) && defined(DEBUG)
535 printf("TSS 32 bits switch\n");
536 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
537#endif
538 } else {
539 /* 16 bit */
540 stw_kernel(env->tr.base + 0x0e, next_eip);
541 stw_kernel(env->tr.base + 0x10, old_eflags);
542 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
543 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
544 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
545 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
546 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
547 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
548 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
549 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
550 for(i = 0; i < 4; i++)
551 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
552#ifdef VBOX
553 /* Must store the ldt as it gets reloaded and might have been changed. */
554 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
555#endif
556 }
557
558 /* now if an exception occurs, it will occurs in the next task
559 context */
560
561 if (source == SWITCH_TSS_CALL) {
562 stw_kernel(tss_base, env->tr.selector);
563 new_eflags |= NT_MASK;
564 }
565
566 /* set busy bit */
567 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
568 target_ulong ptr;
569 uint32_t e2;
570 ptr = env->gdt.base + (tss_selector & ~7);
571 e2 = ldl_kernel(ptr + 4);
572 e2 |= DESC_TSS_BUSY_MASK;
573 stl_kernel(ptr + 4, e2);
574 }
575
576 /* set the new CPU state */
577 /* from this point, any exception which occurs can give problems */
578 env->cr[0] |= CR0_TS_MASK;
579 env->hflags |= HF_TS_MASK;
580 env->tr.selector = tss_selector;
581 env->tr.base = tss_base;
582 env->tr.limit = tss_limit;
583 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
584#ifdef VBOX
585 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
586 env->tr.newselector = 0;
587#endif
588
589 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
590 cpu_x86_update_cr3(env, new_cr3);
591 }
592
593 /* load all registers without an exception, then reload them with
594 possible exception */
595 env->eip = new_eip;
596 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
597 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
598 if (!(type & 8))
599 eflags_mask &= 0xffff;
600 load_eflags(new_eflags, eflags_mask);
601 /* XXX: what to do in 16 bit case ? */
602 EAX = new_regs[0];
603 ECX = new_regs[1];
604 EDX = new_regs[2];
605 EBX = new_regs[3];
606 ESP = new_regs[4];
607 EBP = new_regs[5];
608 ESI = new_regs[6];
609 EDI = new_regs[7];
610 if (new_eflags & VM_MASK) {
611 for(i = 0; i < 6; i++)
612 load_seg_vm(i, new_segs[i]);
613 /* in vm86, CPL is always 3 */
614 cpu_x86_set_cpl(env, 3);
615 } else {
616 /* CPL is set the RPL of CS */
617 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
618 /* first just selectors as the rest may trigger exceptions */
619 for(i = 0; i < 6; i++)
620 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
621 }
622
623 env->ldt.selector = new_ldt & ~4;
624 env->ldt.base = 0;
625 env->ldt.limit = 0;
626 env->ldt.flags = 0;
627#ifdef VBOX
628 env->ldt.flags = DESC_INTEL_UNUSABLE;
629 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
630 env->ldt.newselector = 0;
631#endif
632
633 /* load the LDT */
634 if (new_ldt & 4)
635 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
636
637 if ((new_ldt & 0xfffc) != 0) {
638 dt = &env->gdt;
639 index = new_ldt & ~7;
640 if ((index + 7) > dt->limit)
641 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
642 ptr = dt->base + index;
643 e1 = ldl_kernel(ptr);
644 e2 = ldl_kernel(ptr + 4);
645 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
646 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
647 if (!(e2 & DESC_P_MASK))
648 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
649 load_seg_cache_raw_dt(&env->ldt, e1, e2);
650 }
651
652 /* load the segments */
653 if (!(new_eflags & VM_MASK)) {
654 tss_load_seg(R_CS, new_segs[R_CS]);
655 tss_load_seg(R_SS, new_segs[R_SS]);
656 tss_load_seg(R_ES, new_segs[R_ES]);
657 tss_load_seg(R_DS, new_segs[R_DS]);
658 tss_load_seg(R_FS, new_segs[R_FS]);
659 tss_load_seg(R_GS, new_segs[R_GS]);
660 }
661
662 /* check that EIP is in the CS segment limits */
663 if (new_eip > env->segs[R_CS].limit) {
664 /* XXX: different exception if CALL ? */
665 raise_exception_err(EXCP0D_GPF, 0);
666 }
667
668#ifndef CONFIG_USER_ONLY
669 /* reset local breakpoints */
670 if (env->dr[7] & 0x55) {
671 for (i = 0; i < 4; i++) {
672 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
673 hw_breakpoint_remove(env, i);
674 }
675 env->dr[7] &= ~0x55;
676 }
677#endif
678}
679
680/* check if Port I/O is allowed in TSS */
681static inline void check_io(int addr, int size)
682{
683#ifndef VBOX
684 int io_offset, val, mask;
685#else
686 int val, mask;
687 unsigned int io_offset;
688#endif /* VBOX */
689
690 /* TSS must be a valid 32 bit one */
691 if (!(env->tr.flags & DESC_P_MASK) ||
692 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
693 env->tr.limit < 103)
694 goto fail;
695 io_offset = lduw_kernel(env->tr.base + 0x66);
696 io_offset += (addr >> 3);
697 /* Note: the check needs two bytes */
698 if ((io_offset + 1) > env->tr.limit)
699 goto fail;
700 val = lduw_kernel(env->tr.base + io_offset);
701 val >>= (addr & 7);
702 mask = (1 << size) - 1;
703 /* all bits must be zero to allow the I/O */
704 if ((val & mask) != 0) {
705 fail:
706 raise_exception_err(EXCP0D_GPF, 0);
707 }
708}
709
710#ifdef VBOX
711
712/* Keep in sync with gen_check_external_event() */
713void helper_check_external_event()
714{
715 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
716 | CPU_INTERRUPT_EXTERNAL_EXIT
717 | CPU_INTERRUPT_EXTERNAL_TIMER
718 | CPU_INTERRUPT_EXTERNAL_DMA))
719 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
720 && (env->eflags & IF_MASK)
721 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
722 {
723 helper_external_event();
724 }
725
726}
727
728void helper_sync_seg(uint32_t reg)
729{
730 if (env->segs[reg].newselector)
731 sync_seg(env, reg, env->segs[reg].newselector);
732}
733
734#endif /* VBOX */
735
736void helper_check_iob(uint32_t t0)
737{
738 check_io(t0, 1);
739}
740
741void helper_check_iow(uint32_t t0)
742{
743 check_io(t0, 2);
744}
745
746void helper_check_iol(uint32_t t0)
747{
748 check_io(t0, 4);
749}
750
751void helper_outb(uint32_t port, uint32_t data)
752{
753#ifndef VBOX
754 cpu_outb(port, data & 0xff);
755#else
756 cpu_outb(env, port, data & 0xff);
757#endif
758}
759
760target_ulong helper_inb(uint32_t port)
761{
762#ifndef VBOX
763 return cpu_inb(port);
764#else
765 return cpu_inb(env, port);
766#endif
767}
768
769void helper_outw(uint32_t port, uint32_t data)
770{
771#ifndef VBOX
772 cpu_outw(port, data & 0xffff);
773#else
774 cpu_outw(env, port, data & 0xffff);
775#endif
776}
777
778target_ulong helper_inw(uint32_t port)
779{
780#ifndef VBOX
781 return cpu_inw(port);
782#else
783 return cpu_inw(env, port);
784#endif
785}
786
787void helper_outl(uint32_t port, uint32_t data)
788{
789#ifndef VBOX
790 cpu_outl(port, data);
791#else
792 cpu_outl(env, port, data);
793#endif
794}
795
796target_ulong helper_inl(uint32_t port)
797{
798#ifndef VBOX
799 return cpu_inl(port);
800#else
801 return cpu_inl(env, port);
802#endif
803}
804
805static inline unsigned int get_sp_mask(unsigned int e2)
806{
807 if (e2 & DESC_B_MASK)
808 return 0xffffffff;
809 else
810 return 0xffff;
811}
812
813static int exeption_has_error_code(int intno)
814{
815 switch(intno) {
816 case 8:
817 case 10:
818 case 11:
819 case 12:
820 case 13:
821 case 14:
822 case 17:
823 return 1;
824 }
825 return 0;
826}
827
828#ifdef TARGET_X86_64
829#define SET_ESP(val, sp_mask)\
830do {\
831 if ((sp_mask) == 0xffff)\
832 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
833 else if ((sp_mask) == 0xffffffffLL)\
834 ESP = (uint32_t)(val);\
835 else\
836 ESP = (val);\
837} while (0)
838#else
839#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
840#endif
841
842/* in 64-bit machines, this can overflow. So this segment addition macro
843 * can be used to trim the value to 32-bit whenever needed */
844#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
845
846/* XXX: add a is_user flag to have proper security support */
847#define PUSHW(ssp, sp, sp_mask, val)\
848{\
849 sp -= 2;\
850 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
851}
852
853#define PUSHL(ssp, sp, sp_mask, val)\
854{\
855 sp -= 4;\
856 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
857}
858
859#define POPW(ssp, sp, sp_mask, val)\
860{\
861 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
862 sp += 2;\
863}
864
865#define POPL(ssp, sp, sp_mask, val)\
866{\
867 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
868 sp += 4;\
869}
870
871/* protected mode interrupt */
872static void do_interrupt_protected(int intno, int is_int, int error_code,
873 unsigned int next_eip, int is_hw)
874{
875 SegmentCache *dt;
876 target_ulong ptr, ssp;
877 int type, dpl, selector, ss_dpl, cpl;
878 int has_error_code, new_stack, shift;
879 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
880 uint32_t old_eip, sp_mask;
881
882#ifdef VBOX
883 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
884 cpu_loop_exit();
885#endif
886
887 has_error_code = 0;
888 if (!is_int && !is_hw)
889 has_error_code = exeption_has_error_code(intno);
890 if (is_int)
891 old_eip = next_eip;
892 else
893 old_eip = env->eip;
894
895 dt = &env->idt;
896#ifndef VBOX
897 if (intno * 8 + 7 > dt->limit)
898#else
899 if ((unsigned)intno * 8 + 7 > dt->limit)
900#endif
901 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
902 ptr = dt->base + intno * 8;
903 e1 = ldl_kernel(ptr);
904 e2 = ldl_kernel(ptr + 4);
905 /* check gate type */
906 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
907 switch(type) {
908 case 5: /* task gate */
909#ifdef VBOX
910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
911 cpl = env->hflags & HF_CPL_MASK;
912 /* check privilege if software int */
913 if (is_int && dpl < cpl)
914 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
915#endif
916 /* must do that check here to return the correct error code */
917 if (!(e2 & DESC_P_MASK))
918 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
919 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
920 if (has_error_code) {
921 int type;
922 uint32_t mask;
923 /* push the error code */
924 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
925 shift = type >> 3;
926 if (env->segs[R_SS].flags & DESC_B_MASK)
927 mask = 0xffffffff;
928 else
929 mask = 0xffff;
930 esp = (ESP - (2 << shift)) & mask;
931 ssp = env->segs[R_SS].base + esp;
932 if (shift)
933 stl_kernel(ssp, error_code);
934 else
935 stw_kernel(ssp, error_code);
936 SET_ESP(esp, mask);
937 }
938 return;
939 case 6: /* 286 interrupt gate */
940 case 7: /* 286 trap gate */
941 case 14: /* 386 interrupt gate */
942 case 15: /* 386 trap gate */
943 break;
944 default:
945 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
946 break;
947 }
948 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
949 cpl = env->hflags & HF_CPL_MASK;
950 /* check privilege if software int */
951 if (is_int && dpl < cpl)
952 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
953 /* check valid bit */
954 if (!(e2 & DESC_P_MASK))
955 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
956 selector = e1 >> 16;
957 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
958 if ((selector & 0xfffc) == 0)
959 raise_exception_err(EXCP0D_GPF, 0);
960
961 if (load_segment(&e1, &e2, selector) != 0)
962 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
963#ifdef VBOX /** @todo figure out when this is done one day... */
964 if (!(e2 & DESC_A_MASK))
965 e2 = set_segment_accessed(selector, e2);
966#endif
967 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
968 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
969 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
970 if (dpl > cpl)
971 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
972 if (!(e2 & DESC_P_MASK))
973 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
974 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
975 /* to inner privilege */
976 get_ss_esp_from_tss(&ss, &esp, dpl);
977 if ((ss & 0xfffc) == 0)
978 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
979 if ((ss & 3) != dpl)
980 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
981 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
982 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
983#ifdef VBOX /** @todo figure out when this is done one day... */
984 if (!(ss_e2 & DESC_A_MASK))
985 ss_e2 = set_segment_accessed(ss, ss_e2);
986#endif
987 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
988 if (ss_dpl != dpl)
989 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
990 if (!(ss_e2 & DESC_S_MASK) ||
991 (ss_e2 & DESC_CS_MASK) ||
992 !(ss_e2 & DESC_W_MASK))
993 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
994 if (!(ss_e2 & DESC_P_MASK))
995#ifdef VBOX /* See page 3-477 of 253666.pdf */
996 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
997#else
998 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
999#endif
1000 new_stack = 1;
1001 sp_mask = get_sp_mask(ss_e2);
1002 ssp = get_seg_base(ss_e1, ss_e2);
1003#if defined(VBOX) && defined(DEBUG)
1004 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
1005#endif
1006 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1007 /* to same privilege */
1008 if (env->eflags & VM_MASK)
1009 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1010 new_stack = 0;
1011 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1012 ssp = env->segs[R_SS].base;
1013 esp = ESP;
1014 dpl = cpl;
1015 } else {
1016 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1017 new_stack = 0; /* avoid warning */
1018 sp_mask = 0; /* avoid warning */
1019 ssp = 0; /* avoid warning */
1020 esp = 0; /* avoid warning */
1021 }
1022
1023 shift = type >> 3;
1024
1025#if 0
1026 /* XXX: check that enough room is available */
1027 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1028 if (env->eflags & VM_MASK)
1029 push_size += 8;
1030 push_size <<= shift;
1031#endif
1032 if (shift == 1) {
1033 if (new_stack) {
1034 if (env->eflags & VM_MASK) {
1035 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1036 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1037 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1038 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1039 }
1040 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1041 PUSHL(ssp, esp, sp_mask, ESP);
1042 }
1043 PUSHL(ssp, esp, sp_mask, compute_eflags());
1044 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1045 PUSHL(ssp, esp, sp_mask, old_eip);
1046 if (has_error_code) {
1047 PUSHL(ssp, esp, sp_mask, error_code);
1048 }
1049 } else {
1050 if (new_stack) {
1051 if (env->eflags & VM_MASK) {
1052 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1053 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1054 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1055 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1056 }
1057 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1058 PUSHW(ssp, esp, sp_mask, ESP);
1059 }
1060 PUSHW(ssp, esp, sp_mask, compute_eflags());
1061 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1062 PUSHW(ssp, esp, sp_mask, old_eip);
1063 if (has_error_code) {
1064 PUSHW(ssp, esp, sp_mask, error_code);
1065 }
1066 }
1067
1068 if (new_stack) {
1069 if (env->eflags & VM_MASK) {
1070 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1071 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1072 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1073 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1074 }
1075 ss = (ss & ~3) | dpl;
1076 cpu_x86_load_seg_cache(env, R_SS, ss,
1077 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1078 }
1079 SET_ESP(esp, sp_mask);
1080
1081 selector = (selector & ~3) | dpl;
1082 cpu_x86_load_seg_cache(env, R_CS, selector,
1083 get_seg_base(e1, e2),
1084 get_seg_limit(e1, e2),
1085 e2);
1086 cpu_x86_set_cpl(env, dpl);
1087 env->eip = offset;
1088
1089 /* interrupt gate clear IF mask */
1090 if ((type & 1) == 0) {
1091 env->eflags &= ~IF_MASK;
1092 }
1093#ifndef VBOX
1094 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1095#else
1096 /*
1097 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1098 * gets confused by seemingly changed EFLAGS. See #3491 and
1099 * public bug #2341.
1100 */
1101 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1102#endif
1103}
1104
1105#ifdef VBOX
1106
1107/* check if VME interrupt redirection is enabled in TSS */
1108DECLINLINE(bool) is_vme_irq_redirected(int intno)
1109{
1110 unsigned int io_offset, intredir_offset;
1111 unsigned char val, mask;
1112
1113 /* TSS must be a valid 32 bit one */
1114 if (!(env->tr.flags & DESC_P_MASK) ||
1115 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1116 env->tr.limit < 103)
1117 goto fail;
1118 io_offset = lduw_kernel(env->tr.base + 0x66);
1119 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1120 if (io_offset < 0x68 + 0x20)
1121 io_offset = 0x68 + 0x20;
1122 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1123 intredir_offset = io_offset - 0x20;
1124
1125 intredir_offset += (intno >> 3);
1126 if ((intredir_offset) > env->tr.limit)
1127 goto fail;
1128
1129 val = ldub_kernel(env->tr.base + intredir_offset);
1130 mask = 1 << (unsigned char)(intno & 7);
1131
1132 /* bit set means no redirection. */
1133 if ((val & mask) != 0) {
1134 return false;
1135 }
1136 return true;
1137
1138fail:
1139 raise_exception_err(EXCP0D_GPF, 0);
1140 return true;
1141}
1142
1143/* V86 mode software interrupt with CR4.VME=1 */
1144static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1145{
1146 target_ulong ptr, ssp;
1147 int selector;
1148 uint32_t offset, esp;
1149 uint32_t old_cs, old_eflags;
1150 uint32_t iopl;
1151
1152 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1153
1154 if (!is_vme_irq_redirected(intno))
1155 {
1156 if (iopl == 3)
1157 {
1158 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1159 return;
1160 }
1161 else
1162 raise_exception_err(EXCP0D_GPF, 0);
1163 }
1164
1165 /* virtual mode idt is at linear address 0 */
1166 ptr = 0 + intno * 4;
1167 offset = lduw_kernel(ptr);
1168 selector = lduw_kernel(ptr + 2);
1169 esp = ESP;
1170 ssp = env->segs[R_SS].base;
1171 old_cs = env->segs[R_CS].selector;
1172
1173 old_eflags = compute_eflags();
1174 if (iopl < 3)
1175 {
1176 /* copy VIF into IF and set IOPL to 3 */
1177 if (env->eflags & VIF_MASK)
1178 old_eflags |= IF_MASK;
1179 else
1180 old_eflags &= ~IF_MASK;
1181
1182 old_eflags |= (3 << IOPL_SHIFT);
1183 }
1184
1185 /* XXX: use SS segment size ? */
1186 PUSHW(ssp, esp, 0xffff, old_eflags);
1187 PUSHW(ssp, esp, 0xffff, old_cs);
1188 PUSHW(ssp, esp, 0xffff, next_eip);
1189
1190 /* update processor state */
1191 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1192 env->eip = offset;
1193 env->segs[R_CS].selector = selector;
1194 env->segs[R_CS].base = (selector << 4);
1195 env->eflags &= ~(TF_MASK | RF_MASK);
1196
1197 if (iopl < 3)
1198 env->eflags &= ~VIF_MASK;
1199 else
1200 env->eflags &= ~IF_MASK;
1201}
1202
1203#endif /* VBOX */
1204
1205#ifdef TARGET_X86_64
1206
1207#define PUSHQ(sp, val)\
1208{\
1209 sp -= 8;\
1210 stq_kernel(sp, (val));\
1211}
1212
1213#define POPQ(sp, val)\
1214{\
1215 val = ldq_kernel(sp);\
1216 sp += 8;\
1217}
1218
1219static inline target_ulong get_rsp_from_tss(int level)
1220{
1221 int index;
1222
1223#if 0
1224 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1225 env->tr.base, env->tr.limit);
1226#endif
1227
1228 if (!(env->tr.flags & DESC_P_MASK))
1229 cpu_abort(env, "invalid tss");
1230 index = 8 * level + 4;
1231 if ((index + 7) > env->tr.limit)
1232 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1233 return ldq_kernel(env->tr.base + index);
1234}
1235
1236/* 64 bit interrupt */
1237static void do_interrupt64(int intno, int is_int, int error_code,
1238 target_ulong next_eip, int is_hw)
1239{
1240 SegmentCache *dt;
1241 target_ulong ptr;
1242 int type, dpl, selector, cpl, ist;
1243 int has_error_code, new_stack;
1244 uint32_t e1, e2, e3, ss;
1245 target_ulong old_eip, esp, offset;
1246
1247#ifdef VBOX
1248 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1249 cpu_loop_exit();
1250#endif
1251
1252 has_error_code = 0;
1253 if (!is_int && !is_hw)
1254 has_error_code = exeption_has_error_code(intno);
1255 if (is_int)
1256 old_eip = next_eip;
1257 else
1258 old_eip = env->eip;
1259
1260 dt = &env->idt;
1261 if (intno * 16 + 15 > dt->limit)
1262 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1263 ptr = dt->base + intno * 16;
1264 e1 = ldl_kernel(ptr);
1265 e2 = ldl_kernel(ptr + 4);
1266 e3 = ldl_kernel(ptr + 8);
1267 /* check gate type */
1268 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1269 switch(type) {
1270 case 14: /* 386 interrupt gate */
1271 case 15: /* 386 trap gate */
1272 break;
1273 default:
1274 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1275 break;
1276 }
1277 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1278 cpl = env->hflags & HF_CPL_MASK;
1279 /* check privilege if software int */
1280 if (is_int && dpl < cpl)
1281 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1282 /* check valid bit */
1283 if (!(e2 & DESC_P_MASK))
1284 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1285 selector = e1 >> 16;
1286 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1287 ist = e2 & 7;
1288 if ((selector & 0xfffc) == 0)
1289 raise_exception_err(EXCP0D_GPF, 0);
1290
1291 if (load_segment(&e1, &e2, selector) != 0)
1292 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1293 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1294 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1295 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1296 if (dpl > cpl)
1297 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1298 if (!(e2 & DESC_P_MASK))
1299 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1300 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1301 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1302 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1303 /* to inner privilege */
1304 if (ist != 0)
1305 esp = get_rsp_from_tss(ist + 3);
1306 else
1307 esp = get_rsp_from_tss(dpl);
1308 esp &= ~0xfLL; /* align stack */
1309 ss = 0;
1310 new_stack = 1;
1311 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1312 /* to same privilege */
1313 if (env->eflags & VM_MASK)
1314 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1315 new_stack = 0;
1316 if (ist != 0)
1317 esp = get_rsp_from_tss(ist + 3);
1318 else
1319 esp = ESP;
1320 esp &= ~0xfLL; /* align stack */
1321 dpl = cpl;
1322 } else {
1323 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1324 new_stack = 0; /* avoid warning */
1325 esp = 0; /* avoid warning */
1326 }
1327
1328 PUSHQ(esp, env->segs[R_SS].selector);
1329 PUSHQ(esp, ESP);
1330 PUSHQ(esp, compute_eflags());
1331 PUSHQ(esp, env->segs[R_CS].selector);
1332 PUSHQ(esp, old_eip);
1333 if (has_error_code) {
1334 PUSHQ(esp, error_code);
1335 }
1336
1337 if (new_stack) {
1338 ss = 0 | dpl;
1339#ifndef VBOX
1340 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1341#else
1342 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1343#endif
1344 }
1345 ESP = esp;
1346
1347 selector = (selector & ~3) | dpl;
1348 cpu_x86_load_seg_cache(env, R_CS, selector,
1349 get_seg_base(e1, e2),
1350 get_seg_limit(e1, e2),
1351 e2);
1352 cpu_x86_set_cpl(env, dpl);
1353 env->eip = offset;
1354
1355 /* interrupt gate clear IF mask */
1356 if ((type & 1) == 0) {
1357 env->eflags &= ~IF_MASK;
1358 }
1359#ifndef VBOX
1360 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1361#else /* VBOX */
1362 /*
1363 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1364 * gets confused by seemingly changed EFLAGS. See #3491 and
1365 * public bug #2341.
1366 */
1367 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1368#endif /* VBOX */
1369}
1370#endif
1371
1372#ifdef TARGET_X86_64
1373#if defined(CONFIG_USER_ONLY)
1374void helper_syscall(int next_eip_addend)
1375{
1376 env->exception_index = EXCP_SYSCALL;
1377 env->exception_next_eip = env->eip + next_eip_addend;
1378 cpu_loop_exit();
1379}
1380#else
1381void helper_syscall(int next_eip_addend)
1382{
1383 int selector;
1384
1385 if (!(env->efer & MSR_EFER_SCE)) {
1386 raise_exception_err(EXCP06_ILLOP, 0);
1387 }
1388 selector = (env->star >> 32) & 0xffff;
1389 if (env->hflags & HF_LMA_MASK) {
1390 int code64;
1391
1392 ECX = env->eip + next_eip_addend;
1393 env->regs[11] = compute_eflags();
1394
1395 code64 = env->hflags & HF_CS64_MASK;
1396
1397 cpu_x86_set_cpl(env, 0);
1398 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1399 0, 0xffffffff,
1400 DESC_G_MASK | DESC_P_MASK |
1401 DESC_S_MASK |
1402 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1403 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1404 0, 0xffffffff,
1405 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1406 DESC_S_MASK |
1407 DESC_W_MASK | DESC_A_MASK);
1408 env->eflags &= ~env->fmask;
1409 load_eflags(env->eflags, 0);
1410 if (code64)
1411 env->eip = env->lstar;
1412 else
1413 env->eip = env->cstar;
1414 } else {
1415 ECX = (uint32_t)(env->eip + next_eip_addend);
1416
1417 cpu_x86_set_cpl(env, 0);
1418 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1419 0, 0xffffffff,
1420 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1421 DESC_S_MASK |
1422 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1423 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1424 0, 0xffffffff,
1425 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1426 DESC_S_MASK |
1427 DESC_W_MASK | DESC_A_MASK);
1428 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1429 env->eip = (uint32_t)env->star;
1430 }
1431}
1432#endif
1433#endif
1434
1435#ifdef TARGET_X86_64
1436void helper_sysret(int dflag)
1437{
1438 int cpl, selector;
1439
1440 if (!(env->efer & MSR_EFER_SCE)) {
1441 raise_exception_err(EXCP06_ILLOP, 0);
1442 }
1443 cpl = env->hflags & HF_CPL_MASK;
1444 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1445 raise_exception_err(EXCP0D_GPF, 0);
1446 }
1447 selector = (env->star >> 48) & 0xffff;
1448 if (env->hflags & HF_LMA_MASK) {
1449 if (dflag == 2) {
1450 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1451 0, 0xffffffff,
1452 DESC_G_MASK | DESC_P_MASK |
1453 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1454 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1455 DESC_L_MASK);
1456 env->eip = ECX;
1457 } else {
1458 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1459 0, 0xffffffff,
1460 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1461 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1462 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1463 env->eip = (uint32_t)ECX;
1464 }
1465 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1466 0, 0xffffffff,
1467 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1468 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1469 DESC_W_MASK | DESC_A_MASK);
1470 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1471 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1472 cpu_x86_set_cpl(env, 3);
1473 } else {
1474 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1475 0, 0xffffffff,
1476 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1477 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1478 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1479 env->eip = (uint32_t)ECX;
1480 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1481 0, 0xffffffff,
1482 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1483 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1484 DESC_W_MASK | DESC_A_MASK);
1485 env->eflags |= IF_MASK;
1486 cpu_x86_set_cpl(env, 3);
1487 }
1488}
1489#endif
1490
1491#ifdef VBOX
1492
1493/**
1494 * Checks and processes external VMM events.
1495 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1496 */
1497void helper_external_event(void)
1498{
1499# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1500 uintptr_t uSP;
1501# ifdef RT_ARCH_AMD64
1502 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1503# else
1504 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1505# endif
1506 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1507# endif
1508 /* Keep in sync with flags checked by gen_check_external_event() */
1509 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1510 {
1511 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1512 ~CPU_INTERRUPT_EXTERNAL_HARD);
1513 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1514 }
1515 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1516 {
1517 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1518 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1519 cpu_exit(env);
1520 }
1521 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1522 {
1523 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1524 ~CPU_INTERRUPT_EXTERNAL_DMA);
1525 remR3DmaRun(env);
1526 }
1527 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1528 {
1529 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1530 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1531 remR3TimersRun(env);
1532 }
1533 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1534 {
1535 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1536 ~CPU_INTERRUPT_EXTERNAL_HARD);
1537 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1538 }
1539}
1540
1541/* helper for recording call instruction addresses for later scanning */
1542void helper_record_call()
1543{
1544 if ( !(env->state & CPU_RAW_RING0)
1545 && (env->cr[0] & CR0_PG_MASK)
1546 && !(env->eflags & X86_EFL_IF))
1547 remR3RecordCall(env);
1548}
1549
1550#endif /* VBOX */
1551
1552/* real mode interrupt */
1553static void do_interrupt_real(int intno, int is_int, int error_code,
1554 unsigned int next_eip)
1555{
1556 SegmentCache *dt;
1557 target_ulong ptr, ssp;
1558 int selector;
1559 uint32_t offset, esp;
1560 uint32_t old_cs, old_eip;
1561
1562 /* real mode (simpler !) */
1563 dt = &env->idt;
1564#ifndef VBOX
1565 if (intno * 4 + 3 > dt->limit)
1566#else
1567 if ((unsigned)intno * 4 + 3 > dt->limit)
1568#endif
1569 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1570 ptr = dt->base + intno * 4;
1571 offset = lduw_kernel(ptr);
1572 selector = lduw_kernel(ptr + 2);
1573 esp = ESP;
1574 ssp = env->segs[R_SS].base;
1575 if (is_int)
1576 old_eip = next_eip;
1577 else
1578 old_eip = env->eip;
1579 old_cs = env->segs[R_CS].selector;
1580 /* XXX: use SS segment size ? */
1581 PUSHW(ssp, esp, 0xffff, compute_eflags());
1582 PUSHW(ssp, esp, 0xffff, old_cs);
1583 PUSHW(ssp, esp, 0xffff, old_eip);
1584
1585 /* update processor state */
1586 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1587 env->eip = offset;
1588 env->segs[R_CS].selector = selector;
1589 env->segs[R_CS].base = (selector << 4);
1590 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1591}
1592
1593/* fake user mode interrupt */
1594void do_interrupt_user(int intno, int is_int, int error_code,
1595 target_ulong next_eip)
1596{
1597 SegmentCache *dt;
1598 target_ulong ptr;
1599 int dpl, cpl, shift;
1600 uint32_t e2;
1601
1602 dt = &env->idt;
1603 if (env->hflags & HF_LMA_MASK) {
1604 shift = 4;
1605 } else {
1606 shift = 3;
1607 }
1608 ptr = dt->base + (intno << shift);
1609 e2 = ldl_kernel(ptr + 4);
1610
1611 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1612 cpl = env->hflags & HF_CPL_MASK;
1613 /* check privilege if software int */
1614 if (is_int && dpl < cpl)
1615 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1616
1617 /* Since we emulate only user space, we cannot do more than
1618 exiting the emulation with the suitable exception and error
1619 code */
1620 if (is_int)
1621 EIP = next_eip;
1622}
1623
1624#if !defined(CONFIG_USER_ONLY)
1625static void handle_even_inj(int intno, int is_int, int error_code,
1626 int is_hw, int rm)
1627{
1628 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1629 if (!(event_inj & SVM_EVTINJ_VALID)) {
1630 int type;
1631 if (is_int)
1632 type = SVM_EVTINJ_TYPE_SOFT;
1633 else
1634 type = SVM_EVTINJ_TYPE_EXEPT;
1635 event_inj = intno | type | SVM_EVTINJ_VALID;
1636 if (!rm && exeption_has_error_code(intno)) {
1637 event_inj |= SVM_EVTINJ_VALID_ERR;
1638 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1639 }
1640 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1641 }
1642}
1643#endif
1644
1645/*
1646 * Begin execution of an interruption. is_int is TRUE if coming from
1647 * the int instruction. next_eip is the EIP value AFTER the interrupt
1648 * instruction. It is only relevant if is_int is TRUE.
1649 */
1650void do_interrupt(int intno, int is_int, int error_code,
1651 target_ulong next_eip, int is_hw)
1652{
1653 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1654 if ((env->cr[0] & CR0_PE_MASK)) {
1655 static int count;
1656 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1657 count, intno, error_code, is_int,
1658 env->hflags & HF_CPL_MASK,
1659 env->segs[R_CS].selector, EIP,
1660 (int)env->segs[R_CS].base + EIP,
1661 env->segs[R_SS].selector, ESP);
1662 if (intno == 0x0e) {
1663 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1664 } else {
1665 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1666 }
1667 qemu_log("\n");
1668 log_cpu_state(env, X86_DUMP_CCOP);
1669#if 0
1670 {
1671 int i;
1672 uint8_t *ptr;
1673 qemu_log(" code=");
1674 ptr = env->segs[R_CS].base + env->eip;
1675 for(i = 0; i < 16; i++) {
1676 qemu_log(" %02x", ldub(ptr + i));
1677 }
1678 qemu_log("\n");
1679 }
1680#endif
1681 count++;
1682 }
1683 }
1684#ifdef VBOX
1685 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1686 if (is_int) {
1687 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1688 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1689 } else {
1690 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1691 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1692 }
1693 }
1694#endif
1695 if (env->cr[0] & CR0_PE_MASK) {
1696#if !defined(CONFIG_USER_ONLY)
1697 if (env->hflags & HF_SVMI_MASK)
1698 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1699#endif
1700#ifdef TARGET_X86_64
1701 if (env->hflags & HF_LMA_MASK) {
1702 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1703 } else
1704#endif
1705 {
1706#ifdef VBOX
1707 /* int xx *, v86 code and VME enabled? */
1708 if ( (env->eflags & VM_MASK)
1709 && (env->cr[4] & CR4_VME_MASK)
1710 && is_int
1711 && !is_hw
1712 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1713 )
1714 do_soft_interrupt_vme(intno, error_code, next_eip);
1715 else
1716#endif /* VBOX */
1717 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1718 }
1719 } else {
1720#if !defined(CONFIG_USER_ONLY)
1721 if (env->hflags & HF_SVMI_MASK)
1722 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1723#endif
1724 do_interrupt_real(intno, is_int, error_code, next_eip);
1725 }
1726
1727#if !defined(CONFIG_USER_ONLY)
1728 if (env->hflags & HF_SVMI_MASK) {
1729 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1730 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1731 }
1732#endif
1733}
1734
1735/* This should come from sysemu.h - if we could include it here... */
1736void qemu_system_reset_request(void);
1737
1738/*
1739 * Check nested exceptions and change to double or triple fault if
1740 * needed. It should only be called, if this is not an interrupt.
1741 * Returns the new exception number.
1742 */
1743static int check_exception(int intno, int *error_code)
1744{
1745 int first_contributory = env->old_exception == 0 ||
1746 (env->old_exception >= 10 &&
1747 env->old_exception <= 13);
1748 int second_contributory = intno == 0 ||
1749 (intno >= 10 && intno <= 13);
1750
1751 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1752 env->old_exception, intno);
1753
1754#if !defined(CONFIG_USER_ONLY)
1755 if (env->old_exception == EXCP08_DBLE) {
1756 if (env->hflags & HF_SVMI_MASK)
1757 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1758
1759 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1760
1761# ifndef VBOX
1762 qemu_system_reset_request();
1763# else
1764 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1765# endif
1766 return EXCP_HLT;
1767 }
1768#endif
1769
1770 if ((first_contributory && second_contributory)
1771 || (env->old_exception == EXCP0E_PAGE &&
1772 (second_contributory || (intno == EXCP0E_PAGE)))) {
1773 intno = EXCP08_DBLE;
1774 *error_code = 0;
1775 }
1776
1777 if (second_contributory || (intno == EXCP0E_PAGE) ||
1778 (intno == EXCP08_DBLE))
1779 env->old_exception = intno;
1780
1781 return intno;
1782}
1783
1784/*
1785 * Signal an interruption. It is executed in the main CPU loop.
1786 * is_int is TRUE if coming from the int instruction. next_eip is the
1787 * EIP value AFTER the interrupt instruction. It is only relevant if
1788 * is_int is TRUE.
1789 */
1790static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1791 int next_eip_addend)
1792{
1793#if defined(VBOX) && defined(DEBUG)
1794 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1795#endif
1796 if (!is_int) {
1797 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1798 intno = check_exception(intno, &error_code);
1799 } else {
1800 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1801 }
1802
1803 env->exception_index = intno;
1804 env->error_code = error_code;
1805 env->exception_is_int = is_int;
1806 env->exception_next_eip = env->eip + next_eip_addend;
1807 cpu_loop_exit();
1808}
1809
1810/* shortcuts to generate exceptions */
1811
1812void raise_exception_err(int exception_index, int error_code)
1813{
1814 raise_interrupt(exception_index, 0, error_code, 0);
1815}
1816
1817void raise_exception(int exception_index)
1818{
1819 raise_interrupt(exception_index, 0, 0, 0);
1820}
1821
1822void raise_exception_env(int exception_index, CPUState *nenv)
1823{
1824 env = nenv;
1825 raise_exception(exception_index);
1826}
1827/* SMM support */
1828
1829#if defined(CONFIG_USER_ONLY)
1830
1831void do_smm_enter(void)
1832{
1833}
1834
1835void helper_rsm(void)
1836{
1837}
1838
1839#else
1840
1841#ifdef TARGET_X86_64
1842#define SMM_REVISION_ID 0x00020064
1843#else
1844#define SMM_REVISION_ID 0x00020000
1845#endif
1846
1847void do_smm_enter(void)
1848{
1849 target_ulong sm_state;
1850 SegmentCache *dt;
1851 int i, offset;
1852
1853 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1854 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1855
1856 env->hflags |= HF_SMM_MASK;
1857 cpu_smm_update(env);
1858
1859 sm_state = env->smbase + 0x8000;
1860
1861#ifdef TARGET_X86_64
1862 for(i = 0; i < 6; i++) {
1863 dt = &env->segs[i];
1864 offset = 0x7e00 + i * 16;
1865 stw_phys(sm_state + offset, dt->selector);
1866 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1867 stl_phys(sm_state + offset + 4, dt->limit);
1868 stq_phys(sm_state + offset + 8, dt->base);
1869 }
1870
1871 stq_phys(sm_state + 0x7e68, env->gdt.base);
1872 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1873
1874 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1875 stq_phys(sm_state + 0x7e78, env->ldt.base);
1876 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1877 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1878
1879 stq_phys(sm_state + 0x7e88, env->idt.base);
1880 stl_phys(sm_state + 0x7e84, env->idt.limit);
1881
1882 stw_phys(sm_state + 0x7e90, env->tr.selector);
1883 stq_phys(sm_state + 0x7e98, env->tr.base);
1884 stl_phys(sm_state + 0x7e94, env->tr.limit);
1885 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1886
1887 stq_phys(sm_state + 0x7ed0, env->efer);
1888
1889 stq_phys(sm_state + 0x7ff8, EAX);
1890 stq_phys(sm_state + 0x7ff0, ECX);
1891 stq_phys(sm_state + 0x7fe8, EDX);
1892 stq_phys(sm_state + 0x7fe0, EBX);
1893 stq_phys(sm_state + 0x7fd8, ESP);
1894 stq_phys(sm_state + 0x7fd0, EBP);
1895 stq_phys(sm_state + 0x7fc8, ESI);
1896 stq_phys(sm_state + 0x7fc0, EDI);
1897 for(i = 8; i < 16; i++)
1898 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1899 stq_phys(sm_state + 0x7f78, env->eip);
1900 stl_phys(sm_state + 0x7f70, compute_eflags());
1901 stl_phys(sm_state + 0x7f68, env->dr[6]);
1902 stl_phys(sm_state + 0x7f60, env->dr[7]);
1903
1904 stl_phys(sm_state + 0x7f48, env->cr[4]);
1905 stl_phys(sm_state + 0x7f50, env->cr[3]);
1906 stl_phys(sm_state + 0x7f58, env->cr[0]);
1907
1908 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1909 stl_phys(sm_state + 0x7f00, env->smbase);
1910#else
1911 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1912 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1913 stl_phys(sm_state + 0x7ff4, compute_eflags());
1914 stl_phys(sm_state + 0x7ff0, env->eip);
1915 stl_phys(sm_state + 0x7fec, EDI);
1916 stl_phys(sm_state + 0x7fe8, ESI);
1917 stl_phys(sm_state + 0x7fe4, EBP);
1918 stl_phys(sm_state + 0x7fe0, ESP);
1919 stl_phys(sm_state + 0x7fdc, EBX);
1920 stl_phys(sm_state + 0x7fd8, EDX);
1921 stl_phys(sm_state + 0x7fd4, ECX);
1922 stl_phys(sm_state + 0x7fd0, EAX);
1923 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1924 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1925
1926 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1927 stl_phys(sm_state + 0x7f64, env->tr.base);
1928 stl_phys(sm_state + 0x7f60, env->tr.limit);
1929 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1930
1931 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1932 stl_phys(sm_state + 0x7f80, env->ldt.base);
1933 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1934 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1935
1936 stl_phys(sm_state + 0x7f74, env->gdt.base);
1937 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1938
1939 stl_phys(sm_state + 0x7f58, env->idt.base);
1940 stl_phys(sm_state + 0x7f54, env->idt.limit);
1941
1942 for(i = 0; i < 6; i++) {
1943 dt = &env->segs[i];
1944 if (i < 3)
1945 offset = 0x7f84 + i * 12;
1946 else
1947 offset = 0x7f2c + (i - 3) * 12;
1948 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1949 stl_phys(sm_state + offset + 8, dt->base);
1950 stl_phys(sm_state + offset + 4, dt->limit);
1951 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1952 }
1953 stl_phys(sm_state + 0x7f14, env->cr[4]);
1954
1955 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1956 stl_phys(sm_state + 0x7ef8, env->smbase);
1957#endif
1958 /* init SMM cpu state */
1959
1960#ifdef TARGET_X86_64
1961 cpu_load_efer(env, 0);
1962#endif
1963 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1964 env->eip = 0x00008000;
1965 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1966 0xffffffff, 0);
1967 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1968 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1969 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1970 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1971 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1972
1973 cpu_x86_update_cr0(env,
1974 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1975 cpu_x86_update_cr4(env, 0);
1976 env->dr[7] = 0x00000400;
1977 CC_OP = CC_OP_EFLAGS;
1978}
1979
1980void helper_rsm(void)
1981{
1982#ifdef VBOX
1983 cpu_abort(env, "helper_rsm");
1984#else /* !VBOX */
1985 target_ulong sm_state;
1986 int i, offset;
1987 uint32_t val;
1988
1989 sm_state = env->smbase + 0x8000;
1990#ifdef TARGET_X86_64
1991 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1992
1993 for(i = 0; i < 6; i++) {
1994 offset = 0x7e00 + i * 16;
1995 cpu_x86_load_seg_cache(env, i,
1996 lduw_phys(sm_state + offset),
1997 ldq_phys(sm_state + offset + 8),
1998 ldl_phys(sm_state + offset + 4),
1999 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
2000 }
2001
2002 env->gdt.base = ldq_phys(sm_state + 0x7e68);
2003 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
2004
2005 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
2006 env->ldt.base = ldq_phys(sm_state + 0x7e78);
2007 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
2008 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
2009#ifdef VBOX
2010 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2011 env->ldt.newselector = 0;
2012#endif
2013
2014 env->idt.base = ldq_phys(sm_state + 0x7e88);
2015 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2016
2017 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2018 env->tr.base = ldq_phys(sm_state + 0x7e98);
2019 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2020 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2021#ifdef VBOX
2022 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2023 env->tr.newselector = 0;
2024#endif
2025
2026 EAX = ldq_phys(sm_state + 0x7ff8);
2027 ECX = ldq_phys(sm_state + 0x7ff0);
2028 EDX = ldq_phys(sm_state + 0x7fe8);
2029 EBX = ldq_phys(sm_state + 0x7fe0);
2030 ESP = ldq_phys(sm_state + 0x7fd8);
2031 EBP = ldq_phys(sm_state + 0x7fd0);
2032 ESI = ldq_phys(sm_state + 0x7fc8);
2033 EDI = ldq_phys(sm_state + 0x7fc0);
2034 for(i = 8; i < 16; i++)
2035 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2036 env->eip = ldq_phys(sm_state + 0x7f78);
2037 load_eflags(ldl_phys(sm_state + 0x7f70),
2038 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2039 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2040 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2041
2042 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2043 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2044 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2045
2046 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2047 if (val & 0x20000) {
2048 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2049 }
2050#else
2051 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2052 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2053 load_eflags(ldl_phys(sm_state + 0x7ff4),
2054 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2055 env->eip = ldl_phys(sm_state + 0x7ff0);
2056 EDI = ldl_phys(sm_state + 0x7fec);
2057 ESI = ldl_phys(sm_state + 0x7fe8);
2058 EBP = ldl_phys(sm_state + 0x7fe4);
2059 ESP = ldl_phys(sm_state + 0x7fe0);
2060 EBX = ldl_phys(sm_state + 0x7fdc);
2061 EDX = ldl_phys(sm_state + 0x7fd8);
2062 ECX = ldl_phys(sm_state + 0x7fd4);
2063 EAX = ldl_phys(sm_state + 0x7fd0);
2064 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2065 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2066
2067 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2068 env->tr.base = ldl_phys(sm_state + 0x7f64);
2069 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2070 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2071#ifdef VBOX
2072 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2073 env->tr.newselector = 0;
2074#endif
2075
2076 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2077 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2078 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2079 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2080#ifdef VBOX
2081 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2082 env->ldt.newselector = 0;
2083#endif
2084
2085 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2086 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2087
2088 env->idt.base = ldl_phys(sm_state + 0x7f58);
2089 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2090
2091 for(i = 0; i < 6; i++) {
2092 if (i < 3)
2093 offset = 0x7f84 + i * 12;
2094 else
2095 offset = 0x7f2c + (i - 3) * 12;
2096 cpu_x86_load_seg_cache(env, i,
2097 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2098 ldl_phys(sm_state + offset + 8),
2099 ldl_phys(sm_state + offset + 4),
2100 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2101 }
2102 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2103
2104 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2105 if (val & 0x20000) {
2106 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2107 }
2108#endif
2109 CC_OP = CC_OP_EFLAGS;
2110 env->hflags &= ~HF_SMM_MASK;
2111 cpu_smm_update(env);
2112
2113 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2114 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2115#endif /* !VBOX */
2116}
2117
2118#endif /* !CONFIG_USER_ONLY */
2119
2120
2121/* division, flags are undefined */
2122
2123void helper_divb_AL(target_ulong t0)
2124{
2125 unsigned int num, den, q, r;
2126
2127 num = (EAX & 0xffff);
2128 den = (t0 & 0xff);
2129 if (den == 0) {
2130 raise_exception(EXCP00_DIVZ);
2131 }
2132 q = (num / den);
2133 if (q > 0xff)
2134 raise_exception(EXCP00_DIVZ);
2135 q &= 0xff;
2136 r = (num % den) & 0xff;
2137 EAX = (EAX & ~0xffff) | (r << 8) | q;
2138}
2139
2140void helper_idivb_AL(target_ulong t0)
2141{
2142 int num, den, q, r;
2143
2144 num = (int16_t)EAX;
2145 den = (int8_t)t0;
2146 if (den == 0) {
2147 raise_exception(EXCP00_DIVZ);
2148 }
2149 q = (num / den);
2150 if (q != (int8_t)q)
2151 raise_exception(EXCP00_DIVZ);
2152 q &= 0xff;
2153 r = (num % den) & 0xff;
2154 EAX = (EAX & ~0xffff) | (r << 8) | q;
2155}
2156
2157void helper_divw_AX(target_ulong t0)
2158{
2159 unsigned int num, den, q, r;
2160
2161 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2162 den = (t0 & 0xffff);
2163 if (den == 0) {
2164 raise_exception(EXCP00_DIVZ);
2165 }
2166 q = (num / den);
2167 if (q > 0xffff)
2168 raise_exception(EXCP00_DIVZ);
2169 q &= 0xffff;
2170 r = (num % den) & 0xffff;
2171 EAX = (EAX & ~0xffff) | q;
2172 EDX = (EDX & ~0xffff) | r;
2173}
2174
2175void helper_idivw_AX(target_ulong t0)
2176{
2177 int num, den, q, r;
2178
2179 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2180 den = (int16_t)t0;
2181 if (den == 0) {
2182 raise_exception(EXCP00_DIVZ);
2183 }
2184 q = (num / den);
2185 if (q != (int16_t)q)
2186 raise_exception(EXCP00_DIVZ);
2187 q &= 0xffff;
2188 r = (num % den) & 0xffff;
2189 EAX = (EAX & ~0xffff) | q;
2190 EDX = (EDX & ~0xffff) | r;
2191}
2192
2193void helper_divl_EAX(target_ulong t0)
2194{
2195 unsigned int den, r;
2196 uint64_t num, q;
2197
2198 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2199 den = t0;
2200 if (den == 0) {
2201 raise_exception(EXCP00_DIVZ);
2202 }
2203 q = (num / den);
2204 r = (num % den);
2205 if (q > 0xffffffff)
2206 raise_exception(EXCP00_DIVZ);
2207 EAX = (uint32_t)q;
2208 EDX = (uint32_t)r;
2209}
2210
2211void helper_idivl_EAX(target_ulong t0)
2212{
2213 int den, r;
2214 int64_t num, q;
2215
2216 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2217 den = t0;
2218 if (den == 0) {
2219 raise_exception(EXCP00_DIVZ);
2220 }
2221 q = (num / den);
2222 r = (num % den);
2223 if (q != (int32_t)q)
2224 raise_exception(EXCP00_DIVZ);
2225 EAX = (uint32_t)q;
2226 EDX = (uint32_t)r;
2227}
2228
2229/* bcd */
2230
2231/* XXX: exception */
2232void helper_aam(int base)
2233{
2234 int al, ah;
2235 al = EAX & 0xff;
2236 ah = al / base;
2237 al = al % base;
2238 EAX = (EAX & ~0xffff) | al | (ah << 8);
2239 CC_DST = al;
2240}
2241
2242void helper_aad(int base)
2243{
2244 int al, ah;
2245 al = EAX & 0xff;
2246 ah = (EAX >> 8) & 0xff;
2247 al = ((ah * base) + al) & 0xff;
2248 EAX = (EAX & ~0xffff) | al;
2249 CC_DST = al;
2250}
2251
2252void helper_aaa(void)
2253{
2254 int icarry;
2255 int al, ah, af;
2256 int eflags;
2257
2258 eflags = helper_cc_compute_all(CC_OP);
2259 af = eflags & CC_A;
2260 al = EAX & 0xff;
2261 ah = (EAX >> 8) & 0xff;
2262
2263 icarry = (al > 0xf9);
2264 if (((al & 0x0f) > 9 ) || af) {
2265 al = (al + 6) & 0x0f;
2266 ah = (ah + 1 + icarry) & 0xff;
2267 eflags |= CC_C | CC_A;
2268 } else {
2269 eflags &= ~(CC_C | CC_A);
2270 al &= 0x0f;
2271 }
2272 EAX = (EAX & ~0xffff) | al | (ah << 8);
2273 CC_SRC = eflags;
2274}
2275
2276void helper_aas(void)
2277{
2278 int icarry;
2279 int al, ah, af;
2280 int eflags;
2281
2282 eflags = helper_cc_compute_all(CC_OP);
2283 af = eflags & CC_A;
2284 al = EAX & 0xff;
2285 ah = (EAX >> 8) & 0xff;
2286
2287 icarry = (al < 6);
2288 if (((al & 0x0f) > 9 ) || af) {
2289 al = (al - 6) & 0x0f;
2290 ah = (ah - 1 - icarry) & 0xff;
2291 eflags |= CC_C | CC_A;
2292 } else {
2293 eflags &= ~(CC_C | CC_A);
2294 al &= 0x0f;
2295 }
2296 EAX = (EAX & ~0xffff) | al | (ah << 8);
2297 CC_SRC = eflags;
2298}
2299
2300void helper_daa(void)
2301{
2302 int al, af, cf;
2303 int eflags;
2304
2305 eflags = helper_cc_compute_all(CC_OP);
2306 cf = eflags & CC_C;
2307 af = eflags & CC_A;
2308 al = EAX & 0xff;
2309
2310 eflags = 0;
2311 if (((al & 0x0f) > 9 ) || af) {
2312 al = (al + 6) & 0xff;
2313 eflags |= CC_A;
2314 }
2315 if ((al > 0x9f) || cf) {
2316 al = (al + 0x60) & 0xff;
2317 eflags |= CC_C;
2318 }
2319 EAX = (EAX & ~0xff) | al;
2320 /* well, speed is not an issue here, so we compute the flags by hand */
2321 eflags |= (al == 0) << 6; /* zf */
2322 eflags |= parity_table[al]; /* pf */
2323 eflags |= (al & 0x80); /* sf */
2324 CC_SRC = eflags;
2325}
2326
2327void helper_das(void)
2328{
2329 int al, al1, af, cf;
2330 int eflags;
2331
2332 eflags = helper_cc_compute_all(CC_OP);
2333 cf = eflags & CC_C;
2334 af = eflags & CC_A;
2335 al = EAX & 0xff;
2336
2337 eflags = 0;
2338 al1 = al;
2339 if (((al & 0x0f) > 9 ) || af) {
2340 eflags |= CC_A;
2341 if (al < 6 || cf)
2342 eflags |= CC_C;
2343 al = (al - 6) & 0xff;
2344 }
2345 if ((al1 > 0x99) || cf) {
2346 al = (al - 0x60) & 0xff;
2347 eflags |= CC_C;
2348 }
2349 EAX = (EAX & ~0xff) | al;
2350 /* well, speed is not an issue here, so we compute the flags by hand */
2351 eflags |= (al == 0) << 6; /* zf */
2352 eflags |= parity_table[al]; /* pf */
2353 eflags |= (al & 0x80); /* sf */
2354 CC_SRC = eflags;
2355}
2356
2357void helper_into(int next_eip_addend)
2358{
2359 int eflags;
2360 eflags = helper_cc_compute_all(CC_OP);
2361 if (eflags & CC_O) {
2362 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2363 }
2364}
2365
2366void helper_cmpxchg8b(target_ulong a0)
2367{
2368 uint64_t d;
2369 int eflags;
2370
2371 eflags = helper_cc_compute_all(CC_OP);
2372 d = ldq(a0);
2373 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2374 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2375 eflags |= CC_Z;
2376 } else {
2377 /* always do the store */
2378 stq(a0, d);
2379 EDX = (uint32_t)(d >> 32);
2380 EAX = (uint32_t)d;
2381 eflags &= ~CC_Z;
2382 }
2383 CC_SRC = eflags;
2384}
2385
2386#ifdef TARGET_X86_64
2387void helper_cmpxchg16b(target_ulong a0)
2388{
2389 uint64_t d0, d1;
2390 int eflags;
2391
2392 if ((a0 & 0xf) != 0)
2393 raise_exception(EXCP0D_GPF);
2394 eflags = helper_cc_compute_all(CC_OP);
2395 d0 = ldq(a0);
2396 d1 = ldq(a0 + 8);
2397 if (d0 == EAX && d1 == EDX) {
2398 stq(a0, EBX);
2399 stq(a0 + 8, ECX);
2400 eflags |= CC_Z;
2401 } else {
2402 /* always do the store */
2403 stq(a0, d0);
2404 stq(a0 + 8, d1);
2405 EDX = d1;
2406 EAX = d0;
2407 eflags &= ~CC_Z;
2408 }
2409 CC_SRC = eflags;
2410}
2411#endif
2412
2413void helper_single_step(void)
2414{
2415#ifndef CONFIG_USER_ONLY
2416 check_hw_breakpoints(env, 1);
2417 env->dr[6] |= DR6_BS;
2418#endif
2419 raise_exception(EXCP01_DB);
2420}
2421
2422void helper_cpuid(void)
2423{
2424 uint32_t eax, ebx, ecx, edx;
2425
2426 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2427
2428 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2429 EAX = eax;
2430 EBX = ebx;
2431 ECX = ecx;
2432 EDX = edx;
2433}
2434
2435void helper_enter_level(int level, int data32, target_ulong t1)
2436{
2437 target_ulong ssp;
2438 uint32_t esp_mask, esp, ebp;
2439
2440 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2441 ssp = env->segs[R_SS].base;
2442 ebp = EBP;
2443 esp = ESP;
2444 if (data32) {
2445 /* 32 bit */
2446 esp -= 4;
2447 while (--level) {
2448 esp -= 4;
2449 ebp -= 4;
2450 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2451 }
2452 esp -= 4;
2453 stl(ssp + (esp & esp_mask), t1);
2454 } else {
2455 /* 16 bit */
2456 esp -= 2;
2457 while (--level) {
2458 esp -= 2;
2459 ebp -= 2;
2460 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2461 }
2462 esp -= 2;
2463 stw(ssp + (esp & esp_mask), t1);
2464 }
2465}
2466
2467#ifdef TARGET_X86_64
2468void helper_enter64_level(int level, int data64, target_ulong t1)
2469{
2470 target_ulong esp, ebp;
2471 ebp = EBP;
2472 esp = ESP;
2473
2474 if (data64) {
2475 /* 64 bit */
2476 esp -= 8;
2477 while (--level) {
2478 esp -= 8;
2479 ebp -= 8;
2480 stq(esp, ldq(ebp));
2481 }
2482 esp -= 8;
2483 stq(esp, t1);
2484 } else {
2485 /* 16 bit */
2486 esp -= 2;
2487 while (--level) {
2488 esp -= 2;
2489 ebp -= 2;
2490 stw(esp, lduw(ebp));
2491 }
2492 esp -= 2;
2493 stw(esp, t1);
2494 }
2495}
2496#endif
2497
2498void helper_lldt(int selector)
2499{
2500 SegmentCache *dt;
2501 uint32_t e1, e2;
2502#ifndef VBOX
2503 int index, entry_limit;
2504#else
2505 unsigned int index, entry_limit;
2506#endif
2507 target_ulong ptr;
2508
2509#ifdef VBOX
2510 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2511 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2512#endif
2513
2514 selector &= 0xffff;
2515 if ((selector & 0xfffc) == 0) {
2516 /* XXX: NULL selector case: invalid LDT */
2517 env->ldt.base = 0;
2518 env->ldt.limit = 0;
2519#ifdef VBOX
2520 env->ldt.flags = DESC_INTEL_UNUSABLE;
2521 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2522 env->ldt.newselector = 0;
2523#endif
2524 } else {
2525 if (selector & 0x4)
2526 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2527 dt = &env->gdt;
2528 index = selector & ~7;
2529#ifdef TARGET_X86_64
2530 if (env->hflags & HF_LMA_MASK)
2531 entry_limit = 15;
2532 else
2533#endif
2534 entry_limit = 7;
2535 if ((index + entry_limit) > dt->limit)
2536 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2537 ptr = dt->base + index;
2538 e1 = ldl_kernel(ptr);
2539 e2 = ldl_kernel(ptr + 4);
2540 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2541 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2542 if (!(e2 & DESC_P_MASK))
2543 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2544#ifdef TARGET_X86_64
2545 if (env->hflags & HF_LMA_MASK) {
2546 uint32_t e3;
2547 e3 = ldl_kernel(ptr + 8);
2548 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2549 env->ldt.base |= (target_ulong)e3 << 32;
2550 } else
2551#endif
2552 {
2553 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2554 }
2555 }
2556 env->ldt.selector = selector;
2557#ifdef VBOX
2558 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2559 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2560#endif
2561}
2562
2563void helper_ltr(int selector)
2564{
2565 SegmentCache *dt;
2566 uint32_t e1, e2;
2567#ifndef VBOX
2568 int index, type, entry_limit;
2569#else
2570 unsigned int index;
2571 int type, entry_limit;
2572#endif
2573 target_ulong ptr;
2574
2575#ifdef VBOX
2576 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2577 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2578 env->tr.flags, (RTSEL)(selector & 0xffff)));
2579#endif
2580 selector &= 0xffff;
2581 if ((selector & 0xfffc) == 0) {
2582 /* NULL selector case: invalid TR */
2583 env->tr.base = 0;
2584 env->tr.limit = 0;
2585 env->tr.flags = 0;
2586#ifdef VBOX /** @todo can TR really be 0? If so, what're the hidden attributes? */
2587 env->tr.flags = DESC_INTEL_UNUSABLE;
2588 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2589 env->tr.newselector = 0;
2590#endif
2591 } else {
2592 if (selector & 0x4)
2593 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2594 dt = &env->gdt;
2595 index = selector & ~7;
2596#ifdef TARGET_X86_64
2597 if (env->hflags & HF_LMA_MASK)
2598 entry_limit = 15;
2599 else
2600#endif
2601 entry_limit = 7;
2602 if ((index + entry_limit) > dt->limit)
2603 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2604 ptr = dt->base + index;
2605 e1 = ldl_kernel(ptr);
2606 e2 = ldl_kernel(ptr + 4);
2607 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2608 if ((e2 & DESC_S_MASK) ||
2609 (type != 1 && type != 9))
2610 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2611 if (!(e2 & DESC_P_MASK))
2612 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2613#ifdef TARGET_X86_64
2614 if (env->hflags & HF_LMA_MASK) {
2615 uint32_t e3, e4;
2616 e3 = ldl_kernel(ptr + 8);
2617 e4 = ldl_kernel(ptr + 12);
2618 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2619 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2620 load_seg_cache_raw_dt(&env->tr, e1, e2);
2621 env->tr.base |= (target_ulong)e3 << 32;
2622 } else
2623#endif
2624 {
2625 load_seg_cache_raw_dt(&env->tr, e1, e2);
2626 }
2627 e2 |= DESC_TSS_BUSY_MASK;
2628 stl_kernel(ptr + 4, e2);
2629 }
2630 env->tr.selector = selector;
2631#ifdef VBOX
2632 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2633 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2634 env->tr.flags, (RTSEL)(selector & 0xffff)));
2635#endif
2636}
2637
2638/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2639void helper_load_seg(int seg_reg, int selector)
2640{
2641 uint32_t e1, e2;
2642 int cpl, dpl, rpl;
2643 SegmentCache *dt;
2644#ifndef VBOX
2645 int index;
2646#else
2647 unsigned int index;
2648#endif
2649 target_ulong ptr;
2650
2651 selector &= 0xffff;
2652 cpl = env->hflags & HF_CPL_MASK;
2653#ifdef VBOX
2654
2655 /* Trying to load a selector with CPL=1? */
2656 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2657 {
2658 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2659 selector = selector & 0xfffc;
2660 }
2661#endif /* VBOX */
2662 if ((selector & 0xfffc) == 0) {
2663 /* null selector case */
2664#ifndef VBOX
2665 if (seg_reg == R_SS
2666#ifdef TARGET_X86_64
2667 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2668#endif
2669 )
2670 raise_exception_err(EXCP0D_GPF, 0);
2671 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2672#else
2673 if (seg_reg == R_SS) {
2674 if (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2675 raise_exception_err(EXCP0D_GPF, 0);
2676 e2 = (cpl << DESC_DPL_SHIFT) | DESC_INTEL_UNUSABLE;
2677 } else {
2678 e2 = DESC_INTEL_UNUSABLE;
2679 }
2680 cpu_x86_load_seg_cache_with_clean_flags(env, seg_reg, selector, 0, 0, e2);
2681#endif
2682 } else {
2683
2684 if (selector & 0x4)
2685 dt = &env->ldt;
2686 else
2687 dt = &env->gdt;
2688 index = selector & ~7;
2689 if ((index + 7) > dt->limit)
2690 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2691 ptr = dt->base + index;
2692 e1 = ldl_kernel(ptr);
2693 e2 = ldl_kernel(ptr + 4);
2694
2695 if (!(e2 & DESC_S_MASK))
2696 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2697 rpl = selector & 3;
2698 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2699 if (seg_reg == R_SS) {
2700 /* must be writable segment */
2701 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2702 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2703 if (rpl != cpl || dpl != cpl)
2704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2705 } else {
2706 /* must be readable segment */
2707 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2708 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2709
2710 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2711 /* if not conforming code, test rights */
2712 if (dpl < cpl || dpl < rpl)
2713 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2714 }
2715 }
2716
2717 if (!(e2 & DESC_P_MASK)) {
2718 if (seg_reg == R_SS)
2719 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2720 else
2721 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2722 }
2723
2724 /* set the access bit if not already set */
2725 if (!(e2 & DESC_A_MASK)) {
2726 e2 |= DESC_A_MASK;
2727 stl_kernel(ptr + 4, e2);
2728 }
2729
2730 cpu_x86_load_seg_cache(env, seg_reg, selector,
2731 get_seg_base(e1, e2),
2732 get_seg_limit(e1, e2),
2733 e2);
2734#if 0
2735 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2736 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2737#endif
2738 }
2739}
2740
2741/* protected mode jump */
2742void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2743 int next_eip_addend)
2744{
2745 int gate_cs, type;
2746 uint32_t e1, e2, cpl, dpl, rpl, limit;
2747 target_ulong next_eip;
2748
2749#ifdef VBOX /** @todo Why do we do this? */
2750 e1 = e2 = 0;
2751#endif
2752 if ((new_cs & 0xfffc) == 0)
2753 raise_exception_err(EXCP0D_GPF, 0);
2754 if (load_segment(&e1, &e2, new_cs) != 0)
2755 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2756 cpl = env->hflags & HF_CPL_MASK;
2757 if (e2 & DESC_S_MASK) {
2758 if (!(e2 & DESC_CS_MASK))
2759 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2760 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2761 if (e2 & DESC_C_MASK) {
2762 /* conforming code segment */
2763 if (dpl > cpl)
2764 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2765 } else {
2766 /* non conforming code segment */
2767 rpl = new_cs & 3;
2768 if (rpl > cpl)
2769 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2770 if (dpl != cpl)
2771 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2772 }
2773 if (!(e2 & DESC_P_MASK))
2774 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2775 limit = get_seg_limit(e1, e2);
2776 if (new_eip > limit &&
2777 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779#ifdef VBOX
2780 if (!(e2 & DESC_A_MASK))
2781 e2 = set_segment_accessed(new_cs, e2);
2782#endif
2783 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2784 get_seg_base(e1, e2), limit, e2);
2785 EIP = new_eip;
2786 } else {
2787 /* jump to call or task gate */
2788 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2789 rpl = new_cs & 3;
2790 cpl = env->hflags & HF_CPL_MASK;
2791 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2792 switch(type) {
2793 case 1: /* 286 TSS */
2794 case 9: /* 386 TSS */
2795 case 5: /* task gate */
2796 if (dpl < cpl || dpl < rpl)
2797 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2798 next_eip = env->eip + next_eip_addend;
2799 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2800 CC_OP = CC_OP_EFLAGS;
2801 break;
2802 case 4: /* 286 call gate */
2803 case 12: /* 386 call gate */
2804 if ((dpl < cpl) || (dpl < rpl))
2805 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2806 if (!(e2 & DESC_P_MASK))
2807 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2808 gate_cs = e1 >> 16;
2809 new_eip = (e1 & 0xffff);
2810 if (type == 12)
2811 new_eip |= (e2 & 0xffff0000);
2812 if (load_segment(&e1, &e2, gate_cs) != 0)
2813 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2814 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2815 /* must be code segment */
2816 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2817 (DESC_S_MASK | DESC_CS_MASK)))
2818 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2819 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2820 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2821 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2822 if (!(e2 & DESC_P_MASK))
2823#ifdef VBOX /* See page 3-514 of 253666.pdf */
2824 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2825#else
2826 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2827#endif
2828 limit = get_seg_limit(e1, e2);
2829 if (new_eip > limit)
2830 raise_exception_err(EXCP0D_GPF, 0);
2831 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2832 get_seg_base(e1, e2), limit, e2);
2833 EIP = new_eip;
2834 break;
2835 default:
2836 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2837 break;
2838 }
2839 }
2840}
2841
2842/* real mode call */
2843void helper_lcall_real(int new_cs, target_ulong new_eip1,
2844 int shift, int next_eip)
2845{
2846 int new_eip;
2847 uint32_t esp, esp_mask;
2848 target_ulong ssp;
2849
2850 new_eip = new_eip1;
2851 esp = ESP;
2852 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2853 ssp = env->segs[R_SS].base;
2854 if (shift) {
2855 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2856 PUSHL(ssp, esp, esp_mask, next_eip);
2857 } else {
2858 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2859 PUSHW(ssp, esp, esp_mask, next_eip);
2860 }
2861
2862 SET_ESP(esp, esp_mask);
2863 env->eip = new_eip;
2864 env->segs[R_CS].selector = new_cs;
2865 env->segs[R_CS].base = (new_cs << 4);
2866}
2867
2868/* protected mode call */
2869void helper_lcall_protected(int new_cs, target_ulong new_eip,
2870 int shift, int next_eip_addend)
2871{
2872 int new_stack, i;
2873 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2874 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2875 uint32_t val, limit, old_sp_mask;
2876 target_ulong ssp, old_ssp, next_eip;
2877
2878#ifdef VBOX /** @todo Why do we do this? */
2879 e1 = e2 = 0;
2880#endif
2881 next_eip = env->eip + next_eip_addend;
2882 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2883 LOG_PCALL_STATE(env);
2884 if ((new_cs & 0xfffc) == 0)
2885 raise_exception_err(EXCP0D_GPF, 0);
2886 if (load_segment(&e1, &e2, new_cs) != 0)
2887 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2888 cpl = env->hflags & HF_CPL_MASK;
2889 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2890 if (e2 & DESC_S_MASK) {
2891 if (!(e2 & DESC_CS_MASK))
2892 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2893 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2894 if (e2 & DESC_C_MASK) {
2895 /* conforming code segment */
2896 if (dpl > cpl)
2897 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2898 } else {
2899 /* non conforming code segment */
2900 rpl = new_cs & 3;
2901 if (rpl > cpl)
2902 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2903 if (dpl != cpl)
2904 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2905 }
2906 if (!(e2 & DESC_P_MASK))
2907 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2908#ifdef VBOX
2909 if (!(e2 & DESC_A_MASK))
2910 e2 = set_segment_accessed(new_cs, e2);
2911#endif
2912
2913#ifdef TARGET_X86_64
2914 /* XXX: check 16/32 bit cases in long mode */
2915 if (shift == 2) {
2916 target_ulong rsp;
2917 /* 64 bit case */
2918 rsp = ESP;
2919 PUSHQ(rsp, env->segs[R_CS].selector);
2920 PUSHQ(rsp, next_eip);
2921 /* from this point, not restartable */
2922 ESP = rsp;
2923 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2924 get_seg_base(e1, e2),
2925 get_seg_limit(e1, e2), e2);
2926 EIP = new_eip;
2927 } else
2928#endif
2929 {
2930 sp = ESP;
2931 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2932 ssp = env->segs[R_SS].base;
2933 if (shift) {
2934 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2935 PUSHL(ssp, sp, sp_mask, next_eip);
2936 } else {
2937 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2938 PUSHW(ssp, sp, sp_mask, next_eip);
2939 }
2940
2941 limit = get_seg_limit(e1, e2);
2942 if (new_eip > limit)
2943 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2944 /* from this point, not restartable */
2945 SET_ESP(sp, sp_mask);
2946 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2947 get_seg_base(e1, e2), limit, e2);
2948 EIP = new_eip;
2949 }
2950 } else {
2951 /* check gate type */
2952 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2953 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2954 rpl = new_cs & 3;
2955 switch(type) {
2956 case 1: /* available 286 TSS */
2957 case 9: /* available 386 TSS */
2958 case 5: /* task gate */
2959 if (dpl < cpl || dpl < rpl)
2960 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2961 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2962 CC_OP = CC_OP_EFLAGS;
2963 return;
2964 case 4: /* 286 call gate */
2965 case 12: /* 386 call gate */
2966 break;
2967 default:
2968 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2969 break;
2970 }
2971 shift = type >> 3;
2972
2973 if (dpl < cpl || dpl < rpl)
2974 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2975 /* check valid bit */
2976 if (!(e2 & DESC_P_MASK))
2977 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2978 selector = e1 >> 16;
2979 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2980 param_count = e2 & 0x1f;
2981 if ((selector & 0xfffc) == 0)
2982 raise_exception_err(EXCP0D_GPF, 0);
2983
2984 if (load_segment(&e1, &e2, selector) != 0)
2985 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2986 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2987 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2988 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2989 if (dpl > cpl)
2990 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2991 if (!(e2 & DESC_P_MASK))
2992 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2993
2994 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2995 /* to inner privilege */
2996 get_ss_esp_from_tss(&ss, &sp, dpl);
2997 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2998 ss, sp, param_count, ESP);
2999 if ((ss & 0xfffc) == 0)
3000 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3001 if ((ss & 3) != dpl)
3002 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3003 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3004 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3005 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3006 if (ss_dpl != dpl)
3007 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3008 if (!(ss_e2 & DESC_S_MASK) ||
3009 (ss_e2 & DESC_CS_MASK) ||
3010 !(ss_e2 & DESC_W_MASK))
3011 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3012 if (!(ss_e2 & DESC_P_MASK))
3013#ifdef VBOX /* See page 3-99 of 253666.pdf */
3014 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3015#else
3016 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3017#endif
3018
3019 // push_size = ((param_count * 2) + 8) << shift;
3020
3021 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3022 old_ssp = env->segs[R_SS].base;
3023
3024 sp_mask = get_sp_mask(ss_e2);
3025 ssp = get_seg_base(ss_e1, ss_e2);
3026 if (shift) {
3027 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3028 PUSHL(ssp, sp, sp_mask, ESP);
3029 for(i = param_count - 1; i >= 0; i--) {
3030 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3031 PUSHL(ssp, sp, sp_mask, val);
3032 }
3033 } else {
3034 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3035 PUSHW(ssp, sp, sp_mask, ESP);
3036 for(i = param_count - 1; i >= 0; i--) {
3037 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3038 PUSHW(ssp, sp, sp_mask, val);
3039 }
3040 }
3041 new_stack = 1;
3042 } else {
3043 /* to same privilege */
3044 sp = ESP;
3045 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3046 ssp = env->segs[R_SS].base;
3047 // push_size = (4 << shift);
3048 new_stack = 0;
3049 }
3050
3051 if (shift) {
3052 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3053 PUSHL(ssp, sp, sp_mask, next_eip);
3054 } else {
3055 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3056 PUSHW(ssp, sp, sp_mask, next_eip);
3057 }
3058
3059 /* from this point, not restartable */
3060
3061 if (new_stack) {
3062 ss = (ss & ~3) | dpl;
3063 cpu_x86_load_seg_cache(env, R_SS, ss,
3064 ssp,
3065 get_seg_limit(ss_e1, ss_e2),
3066 ss_e2);
3067 }
3068
3069 selector = (selector & ~3) | dpl;
3070 cpu_x86_load_seg_cache(env, R_CS, selector,
3071 get_seg_base(e1, e2),
3072 get_seg_limit(e1, e2),
3073 e2);
3074 cpu_x86_set_cpl(env, dpl);
3075 SET_ESP(sp, sp_mask);
3076 EIP = offset;
3077 }
3078}
3079
3080/* real and vm86 mode iret */
3081void helper_iret_real(int shift)
3082{
3083 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3084 target_ulong ssp;
3085 int eflags_mask;
3086#ifdef VBOX
3087 bool fVME = false;
3088
3089 remR3TrapClear(env->pVM);
3090#endif /* VBOX */
3091
3092 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3093 sp = ESP;
3094 ssp = env->segs[R_SS].base;
3095 if (shift == 1) {
3096 /* 32 bits */
3097 POPL(ssp, sp, sp_mask, new_eip);
3098 POPL(ssp, sp, sp_mask, new_cs);
3099 new_cs &= 0xffff;
3100 POPL(ssp, sp, sp_mask, new_eflags);
3101 } else {
3102 /* 16 bits */
3103 POPW(ssp, sp, sp_mask, new_eip);
3104 POPW(ssp, sp, sp_mask, new_cs);
3105 POPW(ssp, sp, sp_mask, new_eflags);
3106 }
3107#ifdef VBOX
3108 if ( (env->eflags & VM_MASK)
3109 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3110 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3111 {
3112 fVME = true;
3113 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3114 /* if TF will be set -> #GP */
3115 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3116 || (new_eflags & TF_MASK))
3117 raise_exception(EXCP0D_GPF);
3118 }
3119#endif /* VBOX */
3120 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3121 env->segs[R_CS].selector = new_cs;
3122 env->segs[R_CS].base = (new_cs << 4);
3123 env->eip = new_eip;
3124#ifdef VBOX
3125 if (fVME)
3126 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3127 else
3128#endif
3129 if (env->eflags & VM_MASK)
3130 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3131 else
3132 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3133 if (shift == 0)
3134 eflags_mask &= 0xffff;
3135 load_eflags(new_eflags, eflags_mask);
3136 env->hflags2 &= ~HF2_NMI_MASK;
3137#ifdef VBOX
3138 if (fVME)
3139 {
3140 if (new_eflags & IF_MASK)
3141 env->eflags |= VIF_MASK;
3142 else
3143 env->eflags &= ~VIF_MASK;
3144 }
3145#endif /* VBOX */
3146}
3147
3148static inline void validate_seg(int seg_reg, int cpl)
3149{
3150 int dpl;
3151 uint32_t e2;
3152
3153 /* XXX: on x86_64, we do not want to nullify FS and GS because
3154 they may still contain a valid base. I would be interested to
3155 know how a real x86_64 CPU behaves */
3156 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3157 (env->segs[seg_reg].selector & 0xfffc) == 0)
3158 return;
3159
3160 e2 = env->segs[seg_reg].flags;
3161 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3162 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3163 /* data or non conforming code segment */
3164 if (dpl < cpl) {
3165 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3166 }
3167 }
3168}
3169
3170/* protected mode iret */
3171static inline void helper_ret_protected(int shift, int is_iret, int addend)
3172{
3173 uint32_t new_cs, new_eflags, new_ss;
3174 uint32_t new_es, new_ds, new_fs, new_gs;
3175 uint32_t e1, e2, ss_e1, ss_e2;
3176 int cpl, dpl, rpl, eflags_mask, iopl;
3177 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3178
3179#ifdef VBOX /** @todo Why do we do this? */
3180 ss_e1 = ss_e2 = e1 = e2 = 0;
3181#endif
3182
3183#ifdef TARGET_X86_64
3184 if (shift == 2)
3185 sp_mask = -1;
3186 else
3187#endif
3188 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3189 sp = ESP;
3190 ssp = env->segs[R_SS].base;
3191 new_eflags = 0; /* avoid warning */
3192#ifdef TARGET_X86_64
3193 if (shift == 2) {
3194 POPQ(sp, new_eip);
3195 POPQ(sp, new_cs);
3196 new_cs &= 0xffff;
3197 if (is_iret) {
3198 POPQ(sp, new_eflags);
3199 }
3200 } else
3201#endif
3202 if (shift == 1) {
3203 /* 32 bits */
3204 POPL(ssp, sp, sp_mask, new_eip);
3205 POPL(ssp, sp, sp_mask, new_cs);
3206 new_cs &= 0xffff;
3207 if (is_iret) {
3208 POPL(ssp, sp, sp_mask, new_eflags);
3209#define LOG_GROUP LOG_GROUP_REM
3210#if defined(VBOX) && defined(DEBUG)
3211 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3212 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3213 Log(("iret: new EFLAGS %08X\n", new_eflags));
3214 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3215#endif
3216 if (new_eflags & VM_MASK)
3217 goto return_to_vm86;
3218 }
3219#ifdef VBOX
3220 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3221 {
3222 if ( !EMIsRawRing1Enabled(env->pVM)
3223 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3224 {
3225 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3226 new_cs = new_cs & 0xfffc;
3227 }
3228 else
3229 {
3230 /* Ugly assumption: assume a genuine switch to ring-1. */
3231 Log(("Genuine switch to ring-1 (iret)\n"));
3232 }
3233 }
3234 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3235 {
3236 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3237 new_cs = (new_cs & 0xfffc) | 1;
3238 }
3239#endif
3240 } else {
3241 /* 16 bits */
3242 POPW(ssp, sp, sp_mask, new_eip);
3243 POPW(ssp, sp, sp_mask, new_cs);
3244 if (is_iret)
3245 POPW(ssp, sp, sp_mask, new_eflags);
3246 }
3247 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3248 new_cs, new_eip, shift, addend);
3249 LOG_PCALL_STATE(env);
3250 if ((new_cs & 0xfffc) == 0)
3251 {
3252#if defined(VBOX) && defined(DEBUG)
3253 Log(("new_cs & 0xfffc) == 0\n"));
3254#endif
3255 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3256 }
3257 if (load_segment(&e1, &e2, new_cs) != 0)
3258 {
3259#if defined(VBOX) && defined(DEBUG)
3260 Log(("load_segment failed\n"));
3261#endif
3262 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3263 }
3264 if (!(e2 & DESC_S_MASK) ||
3265 !(e2 & DESC_CS_MASK))
3266 {
3267#if defined(VBOX) && defined(DEBUG)
3268 Log(("e2 mask %08x\n", e2));
3269#endif
3270 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3271 }
3272 cpl = env->hflags & HF_CPL_MASK;
3273 rpl = new_cs & 3;
3274 if (rpl < cpl)
3275 {
3276#if defined(VBOX) && defined(DEBUG)
3277 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3278#endif
3279 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3280 }
3281 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3282
3283 if (e2 & DESC_C_MASK) {
3284 if (dpl > rpl)
3285 {
3286#if defined(VBOX) && defined(DEBUG)
3287 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3288#endif
3289 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3290 }
3291 } else {
3292 if (dpl != rpl)
3293 {
3294#if defined(VBOX) && defined(DEBUG)
3295 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3296#endif
3297 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3298 }
3299 }
3300 if (!(e2 & DESC_P_MASK))
3301 {
3302#if defined(VBOX) && defined(DEBUG)
3303 Log(("DESC_P_MASK e2=%08x\n", e2));
3304#endif
3305 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3306 }
3307
3308 sp += addend;
3309 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3310 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3311 /* return to same privilege level */
3312#ifdef VBOX
3313 if (!(e2 & DESC_A_MASK))
3314 e2 = set_segment_accessed(new_cs, e2);
3315#endif
3316 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3317 get_seg_base(e1, e2),
3318 get_seg_limit(e1, e2),
3319 e2);
3320 } else {
3321 /* return to different privilege level */
3322#ifdef TARGET_X86_64
3323 if (shift == 2) {
3324 POPQ(sp, new_esp);
3325 POPQ(sp, new_ss);
3326 new_ss &= 0xffff;
3327 } else
3328#endif
3329 if (shift == 1) {
3330 /* 32 bits */
3331 POPL(ssp, sp, sp_mask, new_esp);
3332 POPL(ssp, sp, sp_mask, new_ss);
3333 new_ss &= 0xffff;
3334 } else {
3335 /* 16 bits */
3336 POPW(ssp, sp, sp_mask, new_esp);
3337 POPW(ssp, sp, sp_mask, new_ss);
3338 }
3339 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3340 new_ss, new_esp);
3341 if ((new_ss & 0xfffc) == 0) {
3342#ifdef TARGET_X86_64
3343 /* NULL ss is allowed in long mode if cpl != 3*/
3344# ifndef VBOX
3345 /* XXX: test CS64 ? */
3346 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3347 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3348 0, 0xffffffff,
3349 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3350 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3351 DESC_W_MASK | DESC_A_MASK);
3352 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3353 } else
3354# else /* VBOX */
3355 if ((env->hflags & HF_LMA_MASK) && rpl != 3 && (e2 & DESC_L_MASK)) {
3356 if (!(e2 & DESC_A_MASK))
3357 e2 = set_segment_accessed(new_cs, e2);
3358 cpu_x86_load_seg_cache_with_clean_flags(env, R_SS, new_ss,
3359 0, 0xffffffff,
3360 DESC_INTEL_UNUSABLE | (rpl << DESC_DPL_SHIFT) );
3361 ss_e2 = DESC_B_MASK; /* not really used */
3362 } else
3363# endif
3364#endif
3365 {
3366#if defined(VBOX) && defined(DEBUG)
3367 Log(("NULL ss, rpl=%d\n", rpl));
3368#endif
3369 raise_exception_err(EXCP0D_GPF, 0);
3370 }
3371 } else {
3372 if ((new_ss & 3) != rpl)
3373 {
3374#if defined(VBOX) && defined(DEBUG)
3375 Log(("new_ss=%x != rpl=%d\n", new_ss, rpl));
3376#endif
3377 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3378 }
3379 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3380 {
3381#if defined(VBOX) && defined(DEBUG)
3382 Log(("new_ss=%x load error\n", new_ss));
3383#endif
3384 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3385 }
3386 if (!(ss_e2 & DESC_S_MASK) ||
3387 (ss_e2 & DESC_CS_MASK) ||
3388 !(ss_e2 & DESC_W_MASK))
3389 {
3390#if defined(VBOX) && defined(DEBUG)
3391 Log(("new_ss=%x ss_e2=%#x bad type\n", new_ss, ss_e2));
3392#endif
3393 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3394 }
3395 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3396 if (dpl != rpl)
3397 {
3398#if defined(VBOX) && defined(DEBUG)
3399 Log(("SS.dpl=%u != rpl=%u\n", dpl, rpl));
3400#endif
3401 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3402 }
3403 if (!(ss_e2 & DESC_P_MASK))
3404 {
3405#if defined(VBOX) && defined(DEBUG)
3406 Log(("new_ss=%#x #NP\n", new_ss));
3407#endif
3408 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3409 }
3410#ifdef VBOX
3411 if (!(e2 & DESC_A_MASK))
3412 e2 = set_segment_accessed(new_cs, e2);
3413 if (!(ss_e2 & DESC_A_MASK))
3414 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3415#endif
3416 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3417 get_seg_base(ss_e1, ss_e2),
3418 get_seg_limit(ss_e1, ss_e2),
3419 ss_e2);
3420 }
3421
3422 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3423 get_seg_base(e1, e2),
3424 get_seg_limit(e1, e2),
3425 e2);
3426 cpu_x86_set_cpl(env, rpl);
3427 sp = new_esp;
3428#ifdef TARGET_X86_64
3429 if (env->hflags & HF_CS64_MASK)
3430 sp_mask = -1;
3431 else
3432#endif
3433 sp_mask = get_sp_mask(ss_e2);
3434
3435 /* validate data segments */
3436 validate_seg(R_ES, rpl);
3437 validate_seg(R_DS, rpl);
3438 validate_seg(R_FS, rpl);
3439 validate_seg(R_GS, rpl);
3440
3441 sp += addend;
3442 }
3443 SET_ESP(sp, sp_mask);
3444 env->eip = new_eip;
3445 if (is_iret) {
3446 /* NOTE: 'cpl' is the _old_ CPL */
3447 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3448 if (cpl == 0)
3449#ifdef VBOX
3450 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3451#else
3452 eflags_mask |= IOPL_MASK;
3453#endif
3454 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3455 if (cpl <= iopl)
3456 eflags_mask |= IF_MASK;
3457 if (shift == 0)
3458 eflags_mask &= 0xffff;
3459 load_eflags(new_eflags, eflags_mask);
3460 }
3461 return;
3462
3463 return_to_vm86:
3464 POPL(ssp, sp, sp_mask, new_esp);
3465 POPL(ssp, sp, sp_mask, new_ss);
3466 POPL(ssp, sp, sp_mask, new_es);
3467 POPL(ssp, sp, sp_mask, new_ds);
3468 POPL(ssp, sp, sp_mask, new_fs);
3469 POPL(ssp, sp, sp_mask, new_gs);
3470
3471 /* modify processor state */
3472 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3473 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3474 load_seg_vm(R_CS, new_cs & 0xffff);
3475 cpu_x86_set_cpl(env, 3);
3476 load_seg_vm(R_SS, new_ss & 0xffff);
3477 load_seg_vm(R_ES, new_es & 0xffff);
3478 load_seg_vm(R_DS, new_ds & 0xffff);
3479 load_seg_vm(R_FS, new_fs & 0xffff);
3480 load_seg_vm(R_GS, new_gs & 0xffff);
3481
3482 env->eip = new_eip & 0xffff;
3483 ESP = new_esp;
3484}
3485
3486void helper_iret_protected(int shift, int next_eip)
3487{
3488 int tss_selector, type;
3489 uint32_t e1, e2;
3490
3491#ifdef VBOX
3492 Log(("iret (shift=%d new_eip=%#x)\n", shift, next_eip));
3493 e1 = e2 = 0; /** @todo Why do we do this? */
3494 remR3TrapClear(env->pVM);
3495#endif
3496
3497 /* specific case for TSS */
3498 if (env->eflags & NT_MASK) {
3499#ifdef TARGET_X86_64
3500 if (env->hflags & HF_LMA_MASK)
3501 {
3502#if defined(VBOX) && defined(DEBUG)
3503 Log(("eflags.NT=1 on iret in long mode\n"));
3504#endif
3505 raise_exception_err(EXCP0D_GPF, 0);
3506 }
3507#endif
3508 tss_selector = lduw_kernel(env->tr.base + 0);
3509 if (tss_selector & 4)
3510 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3511 if (load_segment(&e1, &e2, tss_selector) != 0)
3512 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3513 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3514 /* NOTE: we check both segment and busy TSS */
3515 if (type != 3)
3516 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3517 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3518 } else {
3519 helper_ret_protected(shift, 1, 0);
3520 }
3521 env->hflags2 &= ~HF2_NMI_MASK;
3522}
3523
3524void helper_lret_protected(int shift, int addend)
3525{
3526 helper_ret_protected(shift, 0, addend);
3527}
3528
3529void helper_sysenter(void)
3530{
3531 if (env->sysenter_cs == 0) {
3532 raise_exception_err(EXCP0D_GPF, 0);
3533 }
3534 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3535 cpu_x86_set_cpl(env, 0);
3536
3537#ifdef TARGET_X86_64
3538 if (env->hflags & HF_LMA_MASK) {
3539 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3540 0, 0xffffffff,
3541 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3542 DESC_S_MASK |
3543 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3544 } else
3545#endif
3546 {
3547 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3548 0, 0xffffffff,
3549 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3550 DESC_S_MASK |
3551 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3552 }
3553 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3554 0, 0xffffffff,
3555 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3556 DESC_S_MASK |
3557 DESC_W_MASK | DESC_A_MASK);
3558 ESP = env->sysenter_esp;
3559 EIP = env->sysenter_eip;
3560}
3561
3562void helper_sysexit(int dflag)
3563{
3564 int cpl;
3565
3566 cpl = env->hflags & HF_CPL_MASK;
3567 if (env->sysenter_cs == 0 || cpl != 0) {
3568 raise_exception_err(EXCP0D_GPF, 0);
3569 }
3570 cpu_x86_set_cpl(env, 3);
3571#ifdef TARGET_X86_64
3572 if (dflag == 2) {
3573 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3574 0, 0xffffffff,
3575 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3576 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3577 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3578 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3579 0, 0xffffffff,
3580 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3581 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3582 DESC_W_MASK | DESC_A_MASK);
3583 } else
3584#endif
3585 {
3586 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3587 0, 0xffffffff,
3588 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3589 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3590 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3591 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3592 0, 0xffffffff,
3593 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3594 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3595 DESC_W_MASK | DESC_A_MASK);
3596 }
3597 ESP = ECX;
3598 EIP = EDX;
3599}
3600
3601#if defined(CONFIG_USER_ONLY)
3602target_ulong helper_read_crN(int reg)
3603{
3604 return 0;
3605}
3606
3607void helper_write_crN(int reg, target_ulong t0)
3608{
3609}
3610
3611void helper_movl_drN_T0(int reg, target_ulong t0)
3612{
3613}
3614#else
3615target_ulong helper_read_crN(int reg)
3616{
3617 target_ulong val;
3618
3619 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3620 switch(reg) {
3621 default:
3622 val = env->cr[reg];
3623 break;
3624 case 8:
3625 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3626#ifndef VBOX
3627 val = cpu_get_apic_tpr(env->apic_state);
3628#else /* VBOX */
3629 val = cpu_get_apic_tpr(env);
3630#endif /* VBOX */
3631 } else {
3632 val = env->v_tpr;
3633 }
3634 break;
3635 }
3636 return val;
3637}
3638
3639void helper_write_crN(int reg, target_ulong t0)
3640{
3641 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3642 switch(reg) {
3643 case 0:
3644 cpu_x86_update_cr0(env, t0);
3645 break;
3646 case 3:
3647 cpu_x86_update_cr3(env, t0);
3648 break;
3649 case 4:
3650 cpu_x86_update_cr4(env, t0);
3651 break;
3652 case 8:
3653 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3654#ifndef VBOX
3655 cpu_set_apic_tpr(env->apic_state, t0);
3656#else /* VBOX */
3657 cpu_set_apic_tpr(env, t0);
3658#endif /* VBOX */
3659 }
3660 env->v_tpr = t0 & 0x0f;
3661 break;
3662 default:
3663 env->cr[reg] = t0;
3664 break;
3665 }
3666}
3667
3668void helper_movl_drN_T0(int reg, target_ulong t0)
3669{
3670 int i;
3671
3672 if (reg < 4) {
3673 hw_breakpoint_remove(env, reg);
3674 env->dr[reg] = t0;
3675 hw_breakpoint_insert(env, reg);
3676# ifndef VBOX
3677 } else if (reg == 7) {
3678# else
3679 } else if (reg == 7 || reg == 5) { /* (DR5 is an alias for DR7.) */
3680 if (t0 & X86_DR7_MBZ_MASK)
3681 raise_exception_err(EXCP0D_GPF, 0);
3682 t0 |= X86_DR7_RA1_MASK;
3683 t0 &= ~X86_DR7_RAZ_MASK;
3684# endif
3685 for (i = 0; i < 4; i++)
3686 hw_breakpoint_remove(env, i);
3687 env->dr[7] = t0;
3688 for (i = 0; i < 4; i++)
3689 hw_breakpoint_insert(env, i);
3690 } else {
3691# ifndef VBOX
3692 env->dr[reg] = t0;
3693# else
3694 if (t0 & X86_DR6_MBZ_MASK)
3695 raise_exception_err(EXCP0D_GPF, 0);
3696 t0 |= X86_DR6_RA1_MASK;
3697 t0 &= ~X86_DR6_RAZ_MASK;
3698 env->dr[6] = t0; /* (DR4 is an alias for DR6.) */
3699# endif
3700 }
3701}
3702#endif
3703
3704void helper_lmsw(target_ulong t0)
3705{
3706 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3707 if already set to one. */
3708 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3709 helper_write_crN(0, t0);
3710}
3711
3712void helper_clts(void)
3713{
3714 env->cr[0] &= ~CR0_TS_MASK;
3715 env->hflags &= ~HF_TS_MASK;
3716}
3717
3718void helper_invlpg(target_ulong addr)
3719{
3720 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3721 tlb_flush_page(env, addr);
3722}
3723
3724void helper_rdtsc(void)
3725{
3726 uint64_t val;
3727
3728 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3729 raise_exception(EXCP0D_GPF);
3730 }
3731 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3732
3733 val = cpu_get_tsc(env) + env->tsc_offset;
3734 EAX = (uint32_t)(val);
3735 EDX = (uint32_t)(val >> 32);
3736}
3737
3738void helper_rdtscp(void)
3739{
3740 helper_rdtsc();
3741#ifndef VBOX
3742 ECX = (uint32_t)(env->tsc_aux);
3743#else /* VBOX */
3744 uint64_t val;
3745 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3746 ECX = (uint32_t)(val);
3747 else
3748 ECX = 0;
3749#endif /* VBOX */
3750}
3751
3752void helper_rdpmc(void)
3753{
3754#ifdef VBOX
3755 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3756 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3757 raise_exception(EXCP0D_GPF);
3758 }
3759 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3760 EAX = 0;
3761 EDX = 0;
3762#else /* !VBOX */
3763 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3764 raise_exception(EXCP0D_GPF);
3765 }
3766 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3767
3768 /* currently unimplemented */
3769 raise_exception_err(EXCP06_ILLOP, 0);
3770#endif /* !VBOX */
3771}
3772
3773#if defined(CONFIG_USER_ONLY)
3774void helper_wrmsr(void)
3775{
3776}
3777
3778void helper_rdmsr(void)
3779{
3780}
3781#else
3782void helper_wrmsr(void)
3783{
3784 uint64_t val;
3785
3786 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3787
3788 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3789
3790 switch((uint32_t)ECX) {
3791 case MSR_IA32_SYSENTER_CS:
3792 env->sysenter_cs = val & 0xffff;
3793 break;
3794 case MSR_IA32_SYSENTER_ESP:
3795 env->sysenter_esp = val;
3796 break;
3797 case MSR_IA32_SYSENTER_EIP:
3798 env->sysenter_eip = val;
3799 break;
3800 case MSR_IA32_APICBASE:
3801# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3802 cpu_set_apic_base(env->apic_state, val);
3803# endif
3804 break;
3805 case MSR_EFER:
3806 {
3807 uint64_t update_mask;
3808 update_mask = 0;
3809 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3810 update_mask |= MSR_EFER_SCE;
3811 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3812 update_mask |= MSR_EFER_LME;
3813 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3814 update_mask |= MSR_EFER_FFXSR;
3815 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3816 update_mask |= MSR_EFER_NXE;
3817 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3818 update_mask |= MSR_EFER_SVME;
3819 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3820 update_mask |= MSR_EFER_FFXSR;
3821 cpu_load_efer(env, (env->efer & ~update_mask) |
3822 (val & update_mask));
3823 }
3824 break;
3825 case MSR_STAR:
3826 env->star = val;
3827 break;
3828 case MSR_PAT:
3829 env->pat = val;
3830 break;
3831 case MSR_VM_HSAVE_PA:
3832 env->vm_hsave = val;
3833 break;
3834#ifdef TARGET_X86_64
3835 case MSR_LSTAR:
3836 env->lstar = val;
3837 break;
3838 case MSR_CSTAR:
3839 env->cstar = val;
3840 break;
3841 case MSR_FMASK:
3842 env->fmask = val;
3843 break;
3844 case MSR_FSBASE:
3845 env->segs[R_FS].base = val;
3846 break;
3847 case MSR_GSBASE:
3848 env->segs[R_GS].base = val;
3849 break;
3850 case MSR_KERNELGSBASE:
3851 env->kernelgsbase = val;
3852 break;
3853#endif
3854# ifndef VBOX
3855 case MSR_MTRRphysBase(0):
3856 case MSR_MTRRphysBase(1):
3857 case MSR_MTRRphysBase(2):
3858 case MSR_MTRRphysBase(3):
3859 case MSR_MTRRphysBase(4):
3860 case MSR_MTRRphysBase(5):
3861 case MSR_MTRRphysBase(6):
3862 case MSR_MTRRphysBase(7):
3863 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3864 break;
3865 case MSR_MTRRphysMask(0):
3866 case MSR_MTRRphysMask(1):
3867 case MSR_MTRRphysMask(2):
3868 case MSR_MTRRphysMask(3):
3869 case MSR_MTRRphysMask(4):
3870 case MSR_MTRRphysMask(5):
3871 case MSR_MTRRphysMask(6):
3872 case MSR_MTRRphysMask(7):
3873 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3874 break;
3875 case MSR_MTRRfix64K_00000:
3876 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3877 break;
3878 case MSR_MTRRfix16K_80000:
3879 case MSR_MTRRfix16K_A0000:
3880 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3881 break;
3882 case MSR_MTRRfix4K_C0000:
3883 case MSR_MTRRfix4K_C8000:
3884 case MSR_MTRRfix4K_D0000:
3885 case MSR_MTRRfix4K_D8000:
3886 case MSR_MTRRfix4K_E0000:
3887 case MSR_MTRRfix4K_E8000:
3888 case MSR_MTRRfix4K_F0000:
3889 case MSR_MTRRfix4K_F8000:
3890 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3891 break;
3892 case MSR_MTRRdefType:
3893 env->mtrr_deftype = val;
3894 break;
3895 case MSR_MCG_STATUS:
3896 env->mcg_status = val;
3897 break;
3898 case MSR_MCG_CTL:
3899 if ((env->mcg_cap & MCG_CTL_P)
3900 && (val == 0 || val == ~(uint64_t)0))
3901 env->mcg_ctl = val;
3902 break;
3903 case MSR_TSC_AUX:
3904 env->tsc_aux = val;
3905 break;
3906# endif /* !VBOX */
3907 default:
3908# ifndef VBOX
3909 if ((uint32_t)ECX >= MSR_MC0_CTL
3910 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3911 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3912 if ((offset & 0x3) != 0
3913 || (val == 0 || val == ~(uint64_t)0))
3914 env->mce_banks[offset] = val;
3915 break;
3916 }
3917 /* XXX: exception ? */
3918# endif
3919 break;
3920 }
3921
3922# ifdef VBOX
3923 /* call CPUM. */
3924 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3925 {
3926 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3927 }
3928# endif
3929}
3930
3931void helper_rdmsr(void)
3932{
3933 uint64_t val;
3934
3935 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3936
3937 switch((uint32_t)ECX) {
3938 case MSR_IA32_SYSENTER_CS:
3939 val = env->sysenter_cs;
3940 break;
3941 case MSR_IA32_SYSENTER_ESP:
3942 val = env->sysenter_esp;
3943 break;
3944 case MSR_IA32_SYSENTER_EIP:
3945 val = env->sysenter_eip;
3946 break;
3947 case MSR_IA32_APICBASE:
3948#ifndef VBOX
3949 val = cpu_get_apic_base(env->apic_state);
3950#else /* VBOX */
3951 val = cpu_get_apic_base(env);
3952#endif /* VBOX */
3953 break;
3954 case MSR_EFER:
3955 val = env->efer;
3956 break;
3957 case MSR_STAR:
3958 val = env->star;
3959 break;
3960 case MSR_PAT:
3961 val = env->pat;
3962 break;
3963 case MSR_VM_HSAVE_PA:
3964 val = env->vm_hsave;
3965 break;
3966# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3967 case MSR_IA32_PERF_STATUS:
3968 /* tsc_increment_by_tick */
3969 val = 1000ULL;
3970 /* CPU multiplier */
3971 val |= (((uint64_t)4ULL) << 40);
3972 break;
3973# endif /* !VBOX */
3974#ifdef TARGET_X86_64
3975 case MSR_LSTAR:
3976 val = env->lstar;
3977 break;
3978 case MSR_CSTAR:
3979 val = env->cstar;
3980 break;
3981 case MSR_FMASK:
3982 val = env->fmask;
3983 break;
3984 case MSR_FSBASE:
3985 val = env->segs[R_FS].base;
3986 break;
3987 case MSR_GSBASE:
3988 val = env->segs[R_GS].base;
3989 break;
3990 case MSR_KERNELGSBASE:
3991 val = env->kernelgsbase;
3992 break;
3993# ifndef VBOX
3994 case MSR_TSC_AUX:
3995 val = env->tsc_aux;
3996 break;
3997# endif /*!VBOX*/
3998#endif
3999# ifndef VBOX
4000 case MSR_MTRRphysBase(0):
4001 case MSR_MTRRphysBase(1):
4002 case MSR_MTRRphysBase(2):
4003 case MSR_MTRRphysBase(3):
4004 case MSR_MTRRphysBase(4):
4005 case MSR_MTRRphysBase(5):
4006 case MSR_MTRRphysBase(6):
4007 case MSR_MTRRphysBase(7):
4008 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
4009 break;
4010 case MSR_MTRRphysMask(0):
4011 case MSR_MTRRphysMask(1):
4012 case MSR_MTRRphysMask(2):
4013 case MSR_MTRRphysMask(3):
4014 case MSR_MTRRphysMask(4):
4015 case MSR_MTRRphysMask(5):
4016 case MSR_MTRRphysMask(6):
4017 case MSR_MTRRphysMask(7):
4018 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
4019 break;
4020 case MSR_MTRRfix64K_00000:
4021 val = env->mtrr_fixed[0];
4022 break;
4023 case MSR_MTRRfix16K_80000:
4024 case MSR_MTRRfix16K_A0000:
4025 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
4026 break;
4027 case MSR_MTRRfix4K_C0000:
4028 case MSR_MTRRfix4K_C8000:
4029 case MSR_MTRRfix4K_D0000:
4030 case MSR_MTRRfix4K_D8000:
4031 case MSR_MTRRfix4K_E0000:
4032 case MSR_MTRRfix4K_E8000:
4033 case MSR_MTRRfix4K_F0000:
4034 case MSR_MTRRfix4K_F8000:
4035 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
4036 break;
4037 case MSR_MTRRdefType:
4038 val = env->mtrr_deftype;
4039 break;
4040 case MSR_MTRRcap:
4041 if (env->cpuid_features & CPUID_MTRR)
4042 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
4043 else
4044 /* XXX: exception ? */
4045 val = 0;
4046 break;
4047 case MSR_MCG_CAP:
4048 val = env->mcg_cap;
4049 break;
4050 case MSR_MCG_CTL:
4051 if (env->mcg_cap & MCG_CTL_P)
4052 val = env->mcg_ctl;
4053 else
4054 val = 0;
4055 break;
4056 case MSR_MCG_STATUS:
4057 val = env->mcg_status;
4058 break;
4059# endif /* !VBOX */
4060 default:
4061# ifndef VBOX
4062 if ((uint32_t)ECX >= MSR_MC0_CTL
4063 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
4064 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
4065 val = env->mce_banks[offset];
4066 break;
4067 }
4068 /* XXX: exception ? */
4069 val = 0;
4070# else /* VBOX */
4071 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4072 {
4073 /** @todo be a brave man and raise a \#GP(0) here as we should... */
4074 val = 0;
4075 }
4076# endif /* VBOX */
4077 break;
4078 }
4079 EAX = (uint32_t)(val);
4080 EDX = (uint32_t)(val >> 32);
4081
4082# ifdef VBOX_STRICT
4083 if ((uint32_t)ECX != MSR_IA32_TSC) {
4084 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4085 val = 0;
4086 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
4087 }
4088# endif
4089}
4090#endif
4091
4092target_ulong helper_lsl(target_ulong selector1)
4093{
4094 unsigned int limit;
4095 uint32_t e1, e2, eflags, selector;
4096 int rpl, dpl, cpl, type;
4097
4098 selector = selector1 & 0xffff;
4099 eflags = helper_cc_compute_all(CC_OP);
4100 if ((selector & 0xfffc) == 0)
4101 goto fail;
4102 if (load_segment(&e1, &e2, selector) != 0)
4103 goto fail;
4104 rpl = selector & 3;
4105 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4106 cpl = env->hflags & HF_CPL_MASK;
4107 if (e2 & DESC_S_MASK) {
4108 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4109 /* conforming */
4110 } else {
4111 if (dpl < cpl || dpl < rpl)
4112 goto fail;
4113 }
4114 } else {
4115 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4116 switch(type) {
4117 case 1:
4118 case 2:
4119 case 3:
4120 case 9:
4121 case 11:
4122 break;
4123 default:
4124 goto fail;
4125 }
4126 if (dpl < cpl || dpl < rpl) {
4127 fail:
4128 CC_SRC = eflags & ~CC_Z;
4129 return 0;
4130 }
4131 }
4132 limit = get_seg_limit(e1, e2);
4133 CC_SRC = eflags | CC_Z;
4134 return limit;
4135}
4136
4137target_ulong helper_lar(target_ulong selector1)
4138{
4139 uint32_t e1, e2, eflags, selector;
4140 int rpl, dpl, cpl, type;
4141
4142 selector = selector1 & 0xffff;
4143 eflags = helper_cc_compute_all(CC_OP);
4144 if ((selector & 0xfffc) == 0)
4145 goto fail;
4146 if (load_segment(&e1, &e2, selector) != 0)
4147 goto fail;
4148 rpl = selector & 3;
4149 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4150 cpl = env->hflags & HF_CPL_MASK;
4151 if (e2 & DESC_S_MASK) {
4152 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4153 /* conforming */
4154 } else {
4155 if (dpl < cpl || dpl < rpl)
4156 goto fail;
4157 }
4158 } else {
4159 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4160 switch(type) {
4161 case 1:
4162 case 2:
4163 case 3:
4164 case 4:
4165 case 5:
4166 case 9:
4167 case 11:
4168 case 12:
4169 break;
4170 default:
4171 goto fail;
4172 }
4173 if (dpl < cpl || dpl < rpl) {
4174 fail:
4175 CC_SRC = eflags & ~CC_Z;
4176 return 0;
4177 }
4178 }
4179 CC_SRC = eflags | CC_Z;
4180#ifdef VBOX /* AMD says 0x00ffff00, while intel says 0x00fxff00. Bochs and IEM does like AMD says (x=f). */
4181 return e2 & 0x00ffff00;
4182#else
4183 return e2 & 0x00f0ff00;
4184#endif
4185}
4186
4187void helper_verr(target_ulong selector1)
4188{
4189 uint32_t e1, e2, eflags, selector;
4190 int rpl, dpl, cpl;
4191
4192 selector = selector1 & 0xffff;
4193 eflags = helper_cc_compute_all(CC_OP);
4194 if ((selector & 0xfffc) == 0)
4195 goto fail;
4196 if (load_segment(&e1, &e2, selector) != 0)
4197 goto fail;
4198 if (!(e2 & DESC_S_MASK))
4199 goto fail;
4200 rpl = selector & 3;
4201 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4202 cpl = env->hflags & HF_CPL_MASK;
4203 if (e2 & DESC_CS_MASK) {
4204 if (!(e2 & DESC_R_MASK))
4205 goto fail;
4206 if (!(e2 & DESC_C_MASK)) {
4207 if (dpl < cpl || dpl < rpl)
4208 goto fail;
4209 }
4210 } else {
4211 if (dpl < cpl || dpl < rpl) {
4212 fail:
4213 CC_SRC = eflags & ~CC_Z;
4214 return;
4215 }
4216 }
4217 CC_SRC = eflags | CC_Z;
4218}
4219
4220void helper_verw(target_ulong selector1)
4221{
4222 uint32_t e1, e2, eflags, selector;
4223 int rpl, dpl, cpl;
4224
4225 selector = selector1 & 0xffff;
4226 eflags = helper_cc_compute_all(CC_OP);
4227 if ((selector & 0xfffc) == 0)
4228 goto fail;
4229 if (load_segment(&e1, &e2, selector) != 0)
4230 goto fail;
4231 if (!(e2 & DESC_S_MASK))
4232 goto fail;
4233 rpl = selector & 3;
4234 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4235 cpl = env->hflags & HF_CPL_MASK;
4236 if (e2 & DESC_CS_MASK) {
4237 goto fail;
4238 } else {
4239 if (dpl < cpl || dpl < rpl)
4240 goto fail;
4241 if (!(e2 & DESC_W_MASK)) {
4242 fail:
4243 CC_SRC = eflags & ~CC_Z;
4244 return;
4245 }
4246 }
4247 CC_SRC = eflags | CC_Z;
4248}
4249
4250/* x87 FPU helpers */
4251
4252static void fpu_set_exception(int mask)
4253{
4254 env->fpus |= mask;
4255 if (env->fpus & (~env->fpuc & FPUC_EM))
4256 env->fpus |= FPUS_SE | FPUS_B;
4257}
4258
4259static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4260{
4261 if (b == 0.0)
4262 fpu_set_exception(FPUS_ZE);
4263 return a / b;
4264}
4265
4266static void fpu_raise_exception(void)
4267{
4268 if (env->cr[0] & CR0_NE_MASK) {
4269 raise_exception(EXCP10_COPR);
4270 }
4271#if !defined(CONFIG_USER_ONLY)
4272 else {
4273 cpu_set_ferr(env);
4274 }
4275#endif
4276}
4277
4278void helper_flds_FT0(uint32_t val)
4279{
4280 union {
4281 float32 f;
4282 uint32_t i;
4283 } u;
4284 u.i = val;
4285 FT0 = float32_to_floatx(u.f, &env->fp_status);
4286}
4287
4288void helper_fldl_FT0(uint64_t val)
4289{
4290 union {
4291 float64 f;
4292 uint64_t i;
4293 } u;
4294 u.i = val;
4295 FT0 = float64_to_floatx(u.f, &env->fp_status);
4296}
4297
4298void helper_fildl_FT0(int32_t val)
4299{
4300 FT0 = int32_to_floatx(val, &env->fp_status);
4301}
4302
4303void helper_flds_ST0(uint32_t val)
4304{
4305 int new_fpstt;
4306 union {
4307 float32 f;
4308 uint32_t i;
4309 } u;
4310 new_fpstt = (env->fpstt - 1) & 7;
4311 u.i = val;
4312 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4313 env->fpstt = new_fpstt;
4314 env->fptags[new_fpstt] = 0; /* validate stack entry */
4315}
4316
4317void helper_fldl_ST0(uint64_t val)
4318{
4319 int new_fpstt;
4320 union {
4321 float64 f;
4322 uint64_t i;
4323 } u;
4324 new_fpstt = (env->fpstt - 1) & 7;
4325 u.i = val;
4326 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4327 env->fpstt = new_fpstt;
4328 env->fptags[new_fpstt] = 0; /* validate stack entry */
4329}
4330
4331void helper_fildl_ST0(int32_t val)
4332{
4333 int new_fpstt;
4334 new_fpstt = (env->fpstt - 1) & 7;
4335 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4336 env->fpstt = new_fpstt;
4337 env->fptags[new_fpstt] = 0; /* validate stack entry */
4338}
4339
4340void helper_fildll_ST0(int64_t val)
4341{
4342 int new_fpstt;
4343 new_fpstt = (env->fpstt - 1) & 7;
4344 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4345 env->fpstt = new_fpstt;
4346 env->fptags[new_fpstt] = 0; /* validate stack entry */
4347}
4348
4349#ifndef VBOX
4350uint32_t helper_fsts_ST0(void)
4351#else
4352RTCCUINTREG helper_fsts_ST0(void)
4353#endif
4354{
4355 union {
4356 float32 f;
4357 uint32_t i;
4358 } u;
4359 u.f = floatx_to_float32(ST0, &env->fp_status);
4360 return u.i;
4361}
4362
4363uint64_t helper_fstl_ST0(void)
4364{
4365 union {
4366 float64 f;
4367 uint64_t i;
4368 } u;
4369 u.f = floatx_to_float64(ST0, &env->fp_status);
4370 return u.i;
4371}
4372
4373#ifndef VBOX
4374int32_t helper_fist_ST0(void)
4375#else
4376RTCCINTREG helper_fist_ST0(void)
4377#endif
4378{
4379 int32_t val;
4380 val = floatx_to_int32(ST0, &env->fp_status);
4381 if (val != (int16_t)val)
4382 val = -32768;
4383 return val;
4384}
4385
4386#ifndef VBOX
4387int32_t helper_fistl_ST0(void)
4388#else
4389RTCCINTREG helper_fistl_ST0(void)
4390#endif
4391{
4392 int32_t val;
4393 val = floatx_to_int32(ST0, &env->fp_status);
4394 return val;
4395}
4396
4397int64_t helper_fistll_ST0(void)
4398{
4399 int64_t val;
4400 val = floatx_to_int64(ST0, &env->fp_status);
4401 return val;
4402}
4403
4404#ifndef VBOX
4405int32_t helper_fistt_ST0(void)
4406#else
4407RTCCINTREG helper_fistt_ST0(void)
4408#endif
4409{
4410 int32_t val;
4411 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4412 if (val != (int16_t)val)
4413 val = -32768;
4414 return val;
4415}
4416
4417#ifndef VBOX
4418int32_t helper_fisttl_ST0(void)
4419#else
4420RTCCINTREG helper_fisttl_ST0(void)
4421#endif
4422{
4423 int32_t val;
4424 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4425 return val;
4426}
4427
4428int64_t helper_fisttll_ST0(void)
4429{
4430 int64_t val;
4431 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4432 return val;
4433}
4434
4435void helper_fldt_ST0(target_ulong ptr)
4436{
4437 int new_fpstt;
4438 new_fpstt = (env->fpstt - 1) & 7;
4439 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4440 env->fpstt = new_fpstt;
4441 env->fptags[new_fpstt] = 0; /* validate stack entry */
4442}
4443
4444void helper_fstt_ST0(target_ulong ptr)
4445{
4446 helper_fstt(ST0, ptr);
4447}
4448
4449void helper_fpush(void)
4450{
4451 fpush();
4452}
4453
4454void helper_fpop(void)
4455{
4456 fpop();
4457}
4458
4459void helper_fdecstp(void)
4460{
4461 env->fpstt = (env->fpstt - 1) & 7;
4462 env->fpus &= (~0x4700);
4463}
4464
4465void helper_fincstp(void)
4466{
4467 env->fpstt = (env->fpstt + 1) & 7;
4468 env->fpus &= (~0x4700);
4469}
4470
4471/* FPU move */
4472
4473void helper_ffree_STN(int st_index)
4474{
4475 env->fptags[(env->fpstt + st_index) & 7] = 1;
4476}
4477
4478void helper_fmov_ST0_FT0(void)
4479{
4480 ST0 = FT0;
4481}
4482
4483void helper_fmov_FT0_STN(int st_index)
4484{
4485 FT0 = ST(st_index);
4486}
4487
4488void helper_fmov_ST0_STN(int st_index)
4489{
4490 ST0 = ST(st_index);
4491}
4492
4493void helper_fmov_STN_ST0(int st_index)
4494{
4495 ST(st_index) = ST0;
4496}
4497
4498void helper_fxchg_ST0_STN(int st_index)
4499{
4500 CPU86_LDouble tmp;
4501 tmp = ST(st_index);
4502 ST(st_index) = ST0;
4503 ST0 = tmp;
4504}
4505
4506/* FPU operations */
4507
4508static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4509
4510void helper_fcom_ST0_FT0(void)
4511{
4512 int ret;
4513
4514 ret = floatx_compare(ST0, FT0, &env->fp_status);
4515 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4516}
4517
4518void helper_fucom_ST0_FT0(void)
4519{
4520 int ret;
4521
4522 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4523 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4524}
4525
4526static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4527
4528void helper_fcomi_ST0_FT0(void)
4529{
4530 int eflags;
4531 int ret;
4532
4533 ret = floatx_compare(ST0, FT0, &env->fp_status);
4534 eflags = helper_cc_compute_all(CC_OP);
4535 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4536 CC_SRC = eflags;
4537}
4538
4539void helper_fucomi_ST0_FT0(void)
4540{
4541 int eflags;
4542 int ret;
4543
4544 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4545 eflags = helper_cc_compute_all(CC_OP);
4546 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4547 CC_SRC = eflags;
4548}
4549
4550void helper_fadd_ST0_FT0(void)
4551{
4552 ST0 += FT0;
4553}
4554
4555void helper_fmul_ST0_FT0(void)
4556{
4557 ST0 *= FT0;
4558}
4559
4560void helper_fsub_ST0_FT0(void)
4561{
4562 ST0 -= FT0;
4563}
4564
4565void helper_fsubr_ST0_FT0(void)
4566{
4567 ST0 = FT0 - ST0;
4568}
4569
4570void helper_fdiv_ST0_FT0(void)
4571{
4572 ST0 = helper_fdiv(ST0, FT0);
4573}
4574
4575void helper_fdivr_ST0_FT0(void)
4576{
4577 ST0 = helper_fdiv(FT0, ST0);
4578}
4579
4580/* fp operations between STN and ST0 */
4581
4582void helper_fadd_STN_ST0(int st_index)
4583{
4584 ST(st_index) += ST0;
4585}
4586
4587void helper_fmul_STN_ST0(int st_index)
4588{
4589 ST(st_index) *= ST0;
4590}
4591
4592void helper_fsub_STN_ST0(int st_index)
4593{
4594 ST(st_index) -= ST0;
4595}
4596
4597void helper_fsubr_STN_ST0(int st_index)
4598{
4599 CPU86_LDouble *p;
4600 p = &ST(st_index);
4601 *p = ST0 - *p;
4602}
4603
4604void helper_fdiv_STN_ST0(int st_index)
4605{
4606 CPU86_LDouble *p;
4607 p = &ST(st_index);
4608 *p = helper_fdiv(*p, ST0);
4609}
4610
4611void helper_fdivr_STN_ST0(int st_index)
4612{
4613 CPU86_LDouble *p;
4614 p = &ST(st_index);
4615 *p = helper_fdiv(ST0, *p);
4616}
4617
4618/* misc FPU operations */
4619void helper_fchs_ST0(void)
4620{
4621 ST0 = floatx_chs(ST0);
4622}
4623
4624void helper_fabs_ST0(void)
4625{
4626 ST0 = floatx_abs(ST0);
4627}
4628
4629void helper_fld1_ST0(void)
4630{
4631 ST0 = f15rk[1];
4632}
4633
4634void helper_fldl2t_ST0(void)
4635{
4636 ST0 = f15rk[6];
4637}
4638
4639void helper_fldl2e_ST0(void)
4640{
4641 ST0 = f15rk[5];
4642}
4643
4644void helper_fldpi_ST0(void)
4645{
4646 ST0 = f15rk[2];
4647}
4648
4649void helper_fldlg2_ST0(void)
4650{
4651 ST0 = f15rk[3];
4652}
4653
4654void helper_fldln2_ST0(void)
4655{
4656 ST0 = f15rk[4];
4657}
4658
4659void helper_fldz_ST0(void)
4660{
4661 ST0 = f15rk[0];
4662}
4663
4664void helper_fldz_FT0(void)
4665{
4666 FT0 = f15rk[0];
4667}
4668
4669#ifndef VBOX
4670uint32_t helper_fnstsw(void)
4671#else
4672RTCCUINTREG helper_fnstsw(void)
4673#endif
4674{
4675 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4676}
4677
4678#ifndef VBOX
4679uint32_t helper_fnstcw(void)
4680#else
4681RTCCUINTREG helper_fnstcw(void)
4682#endif
4683{
4684 return env->fpuc;
4685}
4686
4687static void update_fp_status(void)
4688{
4689 int rnd_type;
4690
4691 /* set rounding mode */
4692 switch(env->fpuc & RC_MASK) {
4693 default:
4694 case RC_NEAR:
4695 rnd_type = float_round_nearest_even;
4696 break;
4697 case RC_DOWN:
4698 rnd_type = float_round_down;
4699 break;
4700 case RC_UP:
4701 rnd_type = float_round_up;
4702 break;
4703 case RC_CHOP:
4704 rnd_type = float_round_to_zero;
4705 break;
4706 }
4707 set_float_rounding_mode(rnd_type, &env->fp_status);
4708#ifdef FLOATX80
4709 switch((env->fpuc >> 8) & 3) {
4710 case 0:
4711 rnd_type = 32;
4712 break;
4713 case 2:
4714 rnd_type = 64;
4715 break;
4716 case 3:
4717 default:
4718 rnd_type = 80;
4719 break;
4720 }
4721 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4722#endif
4723}
4724
4725void helper_fldcw(uint32_t val)
4726{
4727 env->fpuc = val;
4728 update_fp_status();
4729}
4730
4731void helper_fclex(void)
4732{
4733 env->fpus &= 0x7f00;
4734}
4735
4736void helper_fwait(void)
4737{
4738 if (env->fpus & FPUS_SE)
4739 fpu_raise_exception();
4740}
4741
4742void helper_fninit(void)
4743{
4744 env->fpus = 0;
4745 env->fpstt = 0;
4746 env->fpuc = 0x37f;
4747 env->fptags[0] = 1;
4748 env->fptags[1] = 1;
4749 env->fptags[2] = 1;
4750 env->fptags[3] = 1;
4751 env->fptags[4] = 1;
4752 env->fptags[5] = 1;
4753 env->fptags[6] = 1;
4754 env->fptags[7] = 1;
4755}
4756
4757/* BCD ops */
4758
4759void helper_fbld_ST0(target_ulong ptr)
4760{
4761 CPU86_LDouble tmp;
4762 uint64_t val;
4763 unsigned int v;
4764 int i;
4765
4766 val = 0;
4767 for(i = 8; i >= 0; i--) {
4768 v = ldub(ptr + i);
4769 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4770 }
4771 tmp = val;
4772 if (ldub(ptr + 9) & 0x80)
4773 tmp = -tmp;
4774 fpush();
4775 ST0 = tmp;
4776}
4777
4778void helper_fbst_ST0(target_ulong ptr)
4779{
4780 int v;
4781 target_ulong mem_ref, mem_end;
4782 int64_t val;
4783
4784 val = floatx_to_int64(ST0, &env->fp_status);
4785 mem_ref = ptr;
4786 mem_end = mem_ref + 9;
4787 if (val < 0) {
4788 stb(mem_end, 0x80);
4789 val = -val;
4790 } else {
4791 stb(mem_end, 0x00);
4792 }
4793 while (mem_ref < mem_end) {
4794 if (val == 0)
4795 break;
4796 v = val % 100;
4797 val = val / 100;
4798 v = ((v / 10) << 4) | (v % 10);
4799 stb(mem_ref++, v);
4800 }
4801 while (mem_ref < mem_end) {
4802 stb(mem_ref++, 0);
4803 }
4804}
4805
4806void helper_f2xm1(void)
4807{
4808 ST0 = pow(2.0,ST0) - 1.0;
4809}
4810
4811void helper_fyl2x(void)
4812{
4813 CPU86_LDouble fptemp;
4814
4815 fptemp = ST0;
4816 if (fptemp>0.0){
4817 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4818 ST1 *= fptemp;
4819 fpop();
4820 } else {
4821 env->fpus &= (~0x4700);
4822 env->fpus |= 0x400;
4823 }
4824}
4825
4826void helper_fptan(void)
4827{
4828 CPU86_LDouble fptemp;
4829
4830 fptemp = ST0;
4831 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4832 env->fpus |= 0x400;
4833 } else {
4834 ST0 = tan(fptemp);
4835 fpush();
4836 ST0 = 1.0;
4837 env->fpus &= (~0x400); /* C2 <-- 0 */
4838 /* the above code is for |arg| < 2**52 only */
4839 }
4840}
4841
4842void helper_fpatan(void)
4843{
4844 CPU86_LDouble fptemp, fpsrcop;
4845
4846 fpsrcop = ST1;
4847 fptemp = ST0;
4848 ST1 = atan2(fpsrcop,fptemp);
4849 fpop();
4850}
4851
4852void helper_fxtract(void)
4853{
4854 CPU86_LDoubleU temp;
4855 unsigned int expdif;
4856
4857 temp.d = ST0;
4858 expdif = EXPD(temp) - EXPBIAS;
4859 /*DP exponent bias*/
4860 ST0 = expdif;
4861 fpush();
4862 BIASEXPONENT(temp);
4863 ST0 = temp.d;
4864}
4865
4866void helper_fprem1(void)
4867{
4868 CPU86_LDouble dblq, fpsrcop, fptemp;
4869 CPU86_LDoubleU fpsrcop1, fptemp1;
4870 int expdif;
4871 signed long long int q;
4872
4873#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4874 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4875#else
4876 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4877#endif
4878 ST0 = 0.0 / 0.0; /* NaN */
4879 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4880 return;
4881 }
4882
4883 fpsrcop = ST0;
4884 fptemp = ST1;
4885 fpsrcop1.d = fpsrcop;
4886 fptemp1.d = fptemp;
4887 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4888
4889 if (expdif < 0) {
4890 /* optimisation? taken from the AMD docs */
4891 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4892 /* ST0 is unchanged */
4893 return;
4894 }
4895
4896 if (expdif < 53) {
4897 dblq = fpsrcop / fptemp;
4898 /* round dblq towards nearest integer */
4899 dblq = rint(dblq);
4900 ST0 = fpsrcop - fptemp * dblq;
4901
4902 /* convert dblq to q by truncating towards zero */
4903 if (dblq < 0.0)
4904 q = (signed long long int)(-dblq);
4905 else
4906 q = (signed long long int)dblq;
4907
4908 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4909 /* (C0,C3,C1) <-- (q2,q1,q0) */
4910 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4911 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4912 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4913 } else {
4914 env->fpus |= 0x400; /* C2 <-- 1 */
4915 fptemp = pow(2.0, expdif - 50);
4916 fpsrcop = (ST0 / ST1) / fptemp;
4917 /* fpsrcop = integer obtained by chopping */
4918 fpsrcop = (fpsrcop < 0.0) ?
4919 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4920 ST0 -= (ST1 * fpsrcop * fptemp);
4921 }
4922}
4923
4924void helper_fprem(void)
4925{
4926 CPU86_LDouble dblq, fpsrcop, fptemp;
4927 CPU86_LDoubleU fpsrcop1, fptemp1;
4928 int expdif;
4929 signed long long int q;
4930
4931#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4932 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4933#else
4934 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4935#endif
4936 ST0 = 0.0 / 0.0; /* NaN */
4937 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4938 return;
4939 }
4940
4941 fpsrcop = (CPU86_LDouble)ST0;
4942 fptemp = (CPU86_LDouble)ST1;
4943 fpsrcop1.d = fpsrcop;
4944 fptemp1.d = fptemp;
4945 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4946
4947 if (expdif < 0) {
4948 /* optimisation? taken from the AMD docs */
4949 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4950 /* ST0 is unchanged */
4951 return;
4952 }
4953
4954 if ( expdif < 53 ) {
4955 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4956 /* round dblq towards zero */
4957 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4958 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4959
4960 /* convert dblq to q by truncating towards zero */
4961 if (dblq < 0.0)
4962 q = (signed long long int)(-dblq);
4963 else
4964 q = (signed long long int)dblq;
4965
4966 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4967 /* (C0,C3,C1) <-- (q2,q1,q0) */
4968 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4969 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4970 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4971 } else {
4972 int N = 32 + (expdif % 32); /* as per AMD docs */
4973 env->fpus |= 0x400; /* C2 <-- 1 */
4974 fptemp = pow(2.0, (double)(expdif - N));
4975 fpsrcop = (ST0 / ST1) / fptemp;
4976 /* fpsrcop = integer obtained by chopping */
4977 fpsrcop = (fpsrcop < 0.0) ?
4978 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4979 ST0 -= (ST1 * fpsrcop * fptemp);
4980 }
4981}
4982
4983void helper_fyl2xp1(void)
4984{
4985 CPU86_LDouble fptemp;
4986
4987 fptemp = ST0;
4988 if ((fptemp+1.0)>0.0) {
4989 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4990 ST1 *= fptemp;
4991 fpop();
4992 } else {
4993 env->fpus &= (~0x4700);
4994 env->fpus |= 0x400;
4995 }
4996}
4997
4998void helper_fsqrt(void)
4999{
5000 CPU86_LDouble fptemp;
5001
5002 fptemp = ST0;
5003 if (fptemp<0.0) {
5004 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5005 env->fpus |= 0x400;
5006 }
5007 ST0 = sqrt(fptemp);
5008}
5009
5010void helper_fsincos(void)
5011{
5012 CPU86_LDouble fptemp;
5013
5014 fptemp = ST0;
5015 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5016 env->fpus |= 0x400;
5017 } else {
5018 ST0 = sin(fptemp);
5019 fpush();
5020 ST0 = cos(fptemp);
5021 env->fpus &= (~0x400); /* C2 <-- 0 */
5022 /* the above code is for |arg| < 2**63 only */
5023 }
5024}
5025
5026void helper_frndint(void)
5027{
5028 ST0 = floatx_round_to_int(ST0, &env->fp_status);
5029}
5030
5031void helper_fscale(void)
5032{
5033 ST0 = ldexp (ST0, (int)(ST1));
5034}
5035
5036void helper_fsin(void)
5037{
5038 CPU86_LDouble fptemp;
5039
5040 fptemp = ST0;
5041 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5042 env->fpus |= 0x400;
5043 } else {
5044 ST0 = sin(fptemp);
5045 env->fpus &= (~0x400); /* C2 <-- 0 */
5046 /* the above code is for |arg| < 2**53 only */
5047 }
5048}
5049
5050void helper_fcos(void)
5051{
5052 CPU86_LDouble fptemp;
5053
5054 fptemp = ST0;
5055 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5056 env->fpus |= 0x400;
5057 } else {
5058 ST0 = cos(fptemp);
5059 env->fpus &= (~0x400); /* C2 <-- 0 */
5060 /* the above code is for |arg5 < 2**63 only */
5061 }
5062}
5063
5064void helper_fxam_ST0(void)
5065{
5066 CPU86_LDoubleU temp;
5067 int expdif;
5068
5069 temp.d = ST0;
5070
5071 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5072 if (SIGND(temp))
5073 env->fpus |= 0x200; /* C1 <-- 1 */
5074
5075 /* XXX: test fptags too */
5076 expdif = EXPD(temp);
5077 if (expdif == MAXEXPD) {
5078#ifdef USE_X86LDOUBLE
5079 if (MANTD(temp) == 0x8000000000000000ULL)
5080#else
5081 if (MANTD(temp) == 0)
5082#endif
5083 env->fpus |= 0x500 /*Infinity*/;
5084 else
5085 env->fpus |= 0x100 /*NaN*/;
5086 } else if (expdif == 0) {
5087 if (MANTD(temp) == 0)
5088 env->fpus |= 0x4000 /*Zero*/;
5089 else
5090 env->fpus |= 0x4400 /*Denormal*/;
5091 } else {
5092 env->fpus |= 0x400;
5093 }
5094}
5095
5096void helper_fstenv(target_ulong ptr, int data32)
5097{
5098 int fpus, fptag, exp, i;
5099 uint64_t mant;
5100 CPU86_LDoubleU tmp;
5101
5102 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5103 fptag = 0;
5104 for (i=7; i>=0; i--) {
5105 fptag <<= 2;
5106 if (env->fptags[i]) {
5107 fptag |= 3;
5108 } else {
5109 tmp.d = env->fpregs[i].d;
5110 exp = EXPD(tmp);
5111 mant = MANTD(tmp);
5112 if (exp == 0 && mant == 0) {
5113 /* zero */
5114 fptag |= 1;
5115 } else if (exp == 0 || exp == MAXEXPD
5116#ifdef USE_X86LDOUBLE
5117 || (mant & (1LL << 63)) == 0
5118#endif
5119 ) {
5120 /* NaNs, infinity, denormal */
5121 fptag |= 2;
5122 }
5123 }
5124 }
5125 if (data32) {
5126 /* 32 bit */
5127 stl(ptr, env->fpuc);
5128 stl(ptr + 4, fpus);
5129 stl(ptr + 8, fptag);
5130 stl(ptr + 12, 0); /* fpip */
5131 stl(ptr + 16, 0); /* fpcs */
5132 stl(ptr + 20, 0); /* fpoo */
5133 stl(ptr + 24, 0); /* fpos */
5134 } else {
5135 /* 16 bit */
5136 stw(ptr, env->fpuc);
5137 stw(ptr + 2, fpus);
5138 stw(ptr + 4, fptag);
5139 stw(ptr + 6, 0);
5140 stw(ptr + 8, 0);
5141 stw(ptr + 10, 0);
5142 stw(ptr + 12, 0);
5143 }
5144}
5145
5146void helper_fldenv(target_ulong ptr, int data32)
5147{
5148 int i, fpus, fptag;
5149
5150 if (data32) {
5151 env->fpuc = lduw(ptr);
5152 fpus = lduw(ptr + 4);
5153 fptag = lduw(ptr + 8);
5154 }
5155 else {
5156 env->fpuc = lduw(ptr);
5157 fpus = lduw(ptr + 2);
5158 fptag = lduw(ptr + 4);
5159 }
5160 env->fpstt = (fpus >> 11) & 7;
5161 env->fpus = fpus & ~0x3800;
5162 for(i = 0;i < 8; i++) {
5163 env->fptags[i] = ((fptag & 3) == 3);
5164 fptag >>= 2;
5165 }
5166}
5167
5168void helper_fsave(target_ulong ptr, int data32)
5169{
5170 CPU86_LDouble tmp;
5171 int i;
5172
5173 helper_fstenv(ptr, data32);
5174
5175 ptr += (14 << data32);
5176 for(i = 0;i < 8; i++) {
5177 tmp = ST(i);
5178 helper_fstt(tmp, ptr);
5179 ptr += 10;
5180 }
5181
5182 /* fninit */
5183 env->fpus = 0;
5184 env->fpstt = 0;
5185 env->fpuc = 0x37f;
5186 env->fptags[0] = 1;
5187 env->fptags[1] = 1;
5188 env->fptags[2] = 1;
5189 env->fptags[3] = 1;
5190 env->fptags[4] = 1;
5191 env->fptags[5] = 1;
5192 env->fptags[6] = 1;
5193 env->fptags[7] = 1;
5194}
5195
5196void helper_frstor(target_ulong ptr, int data32)
5197{
5198 CPU86_LDouble tmp;
5199 int i;
5200
5201 helper_fldenv(ptr, data32);
5202 ptr += (14 << data32);
5203
5204 for(i = 0;i < 8; i++) {
5205 tmp = helper_fldt(ptr);
5206 ST(i) = tmp;
5207 ptr += 10;
5208 }
5209}
5210
5211void helper_fxsave(target_ulong ptr, int data64)
5212{
5213 int fpus, fptag, i, nb_xmm_regs;
5214 CPU86_LDouble tmp;
5215 target_ulong addr;
5216
5217 /* The operand must be 16 byte aligned */
5218 if (ptr & 0xf) {
5219 raise_exception(EXCP0D_GPF);
5220 }
5221
5222 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5223 fptag = 0;
5224 for(i = 0; i < 8; i++) {
5225 fptag |= (env->fptags[i] << i);
5226 }
5227 stw(ptr, env->fpuc);
5228 stw(ptr + 2, fpus);
5229 stw(ptr + 4, fptag ^ 0xff);
5230#ifdef TARGET_X86_64
5231 if (data64) {
5232 stq(ptr + 0x08, 0); /* rip */
5233 stq(ptr + 0x10, 0); /* rdp */
5234 } else
5235#endif
5236 {
5237 stl(ptr + 0x08, 0); /* eip */
5238 stl(ptr + 0x0c, 0); /* sel */
5239 stl(ptr + 0x10, 0); /* dp */
5240 stl(ptr + 0x14, 0); /* sel */
5241 }
5242
5243 addr = ptr + 0x20;
5244 for(i = 0;i < 8; i++) {
5245 tmp = ST(i);
5246 helper_fstt(tmp, addr);
5247 addr += 16;
5248 }
5249
5250 if (env->cr[4] & CR4_OSFXSR_MASK) {
5251 /* XXX: finish it */
5252 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5253 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5254 if (env->hflags & HF_CS64_MASK)
5255 nb_xmm_regs = 16;
5256 else
5257 nb_xmm_regs = 8;
5258 addr = ptr + 0xa0;
5259 /* Fast FXSAVE leaves out the XMM registers */
5260 if (!(env->efer & MSR_EFER_FFXSR)
5261 || (env->hflags & HF_CPL_MASK)
5262 || !(env->hflags & HF_LMA_MASK)) {
5263 for(i = 0; i < nb_xmm_regs; i++) {
5264 stq(addr, env->xmm_regs[i].XMM_Q(0));
5265 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5266 addr += 16;
5267 }
5268 }
5269 }
5270}
5271
5272void helper_fxrstor(target_ulong ptr, int data64)
5273{
5274 int i, fpus, fptag, nb_xmm_regs;
5275 CPU86_LDouble tmp;
5276 target_ulong addr;
5277
5278 /* The operand must be 16 byte aligned */
5279 if (ptr & 0xf) {
5280 raise_exception(EXCP0D_GPF);
5281 }
5282
5283 env->fpuc = lduw(ptr);
5284 fpus = lduw(ptr + 2);
5285 fptag = lduw(ptr + 4);
5286 env->fpstt = (fpus >> 11) & 7;
5287 env->fpus = fpus & ~0x3800;
5288 fptag ^= 0xff;
5289 for(i = 0;i < 8; i++) {
5290 env->fptags[i] = ((fptag >> i) & 1);
5291 }
5292
5293 addr = ptr + 0x20;
5294 for(i = 0;i < 8; i++) {
5295 tmp = helper_fldt(addr);
5296 ST(i) = tmp;
5297 addr += 16;
5298 }
5299
5300 if (env->cr[4] & CR4_OSFXSR_MASK) {
5301 /* XXX: finish it */
5302 env->mxcsr = ldl(ptr + 0x18);
5303 //ldl(ptr + 0x1c);
5304 if (env->hflags & HF_CS64_MASK)
5305 nb_xmm_regs = 16;
5306 else
5307 nb_xmm_regs = 8;
5308 addr = ptr + 0xa0;
5309 /* Fast FXRESTORE leaves out the XMM registers */
5310 if (!(env->efer & MSR_EFER_FFXSR)
5311 || (env->hflags & HF_CPL_MASK)
5312 || !(env->hflags & HF_LMA_MASK)) {
5313 for(i = 0; i < nb_xmm_regs; i++) {
5314#if !defined(VBOX) || __GNUC__ < 4
5315 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5316 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5317#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5318# if 1
5319 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5320 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5321 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5322 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5323# else
5324 /* this works fine on Mac OS X, gcc 4.0.1 */
5325 uint64_t u64 = ldq(addr);
5326 env->xmm_regs[i].XMM_Q(0);
5327 u64 = ldq(addr + 4);
5328 env->xmm_regs[i].XMM_Q(1) = u64;
5329# endif
5330#endif
5331 addr += 16;
5332 }
5333 }
5334 }
5335}
5336
5337#ifndef USE_X86LDOUBLE
5338
5339void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5340{
5341 CPU86_LDoubleU temp;
5342 int e;
5343
5344 temp.d = f;
5345 /* mantissa */
5346 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5347 /* exponent + sign */
5348 e = EXPD(temp) - EXPBIAS + 16383;
5349 e |= SIGND(temp) >> 16;
5350 *pexp = e;
5351}
5352
5353CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5354{
5355 CPU86_LDoubleU temp;
5356 int e;
5357 uint64_t ll;
5358
5359 /* XXX: handle overflow ? */
5360 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5361 e |= (upper >> 4) & 0x800; /* sign */
5362 ll = (mant >> 11) & ((1LL << 52) - 1);
5363#ifdef __arm__
5364 temp.l.upper = (e << 20) | (ll >> 32);
5365 temp.l.lower = ll;
5366#else
5367 temp.ll = ll | ((uint64_t)e << 52);
5368#endif
5369 return temp.d;
5370}
5371
5372#else
5373
5374void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5375{
5376 CPU86_LDoubleU temp;
5377
5378 temp.d = f;
5379 *pmant = temp.l.lower;
5380 *pexp = temp.l.upper;
5381}
5382
5383CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5384{
5385 CPU86_LDoubleU temp;
5386
5387 temp.l.upper = upper;
5388 temp.l.lower = mant;
5389 return temp.d;
5390}
5391#endif
5392
5393#ifdef TARGET_X86_64
5394
5395//#define DEBUG_MULDIV
5396
5397static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5398{
5399 *plow += a;
5400 /* carry test */
5401 if (*plow < a)
5402 (*phigh)++;
5403 *phigh += b;
5404}
5405
5406static void neg128(uint64_t *plow, uint64_t *phigh)
5407{
5408 *plow = ~ *plow;
5409 *phigh = ~ *phigh;
5410 add128(plow, phigh, 1, 0);
5411}
5412
5413/* return TRUE if overflow */
5414static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5415{
5416 uint64_t q, r, a1, a0;
5417 int i, qb, ab;
5418
5419 a0 = *plow;
5420 a1 = *phigh;
5421 if (a1 == 0) {
5422 q = a0 / b;
5423 r = a0 % b;
5424 *plow = q;
5425 *phigh = r;
5426 } else {
5427 if (a1 >= b)
5428 return 1;
5429 /* XXX: use a better algorithm */
5430 for(i = 0; i < 64; i++) {
5431 ab = a1 >> 63;
5432 a1 = (a1 << 1) | (a0 >> 63);
5433 if (ab || a1 >= b) {
5434 a1 -= b;
5435 qb = 1;
5436 } else {
5437 qb = 0;
5438 }
5439 a0 = (a0 << 1) | qb;
5440 }
5441#if defined(DEBUG_MULDIV)
5442 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5443 *phigh, *plow, b, a0, a1);
5444#endif
5445 *plow = a0;
5446 *phigh = a1;
5447 }
5448 return 0;
5449}
5450
5451/* return TRUE if overflow */
5452static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5453{
5454 int sa, sb;
5455 sa = ((int64_t)*phigh < 0);
5456 if (sa)
5457 neg128(plow, phigh);
5458 sb = (b < 0);
5459 if (sb)
5460 b = -b;
5461 if (div64(plow, phigh, b) != 0)
5462 return 1;
5463 if (sa ^ sb) {
5464 if (*plow > (1ULL << 63))
5465 return 1;
5466 *plow = - *plow;
5467 } else {
5468 if (*plow >= (1ULL << 63))
5469 return 1;
5470 }
5471 if (sa)
5472 *phigh = - *phigh;
5473 return 0;
5474}
5475
5476void helper_mulq_EAX_T0(target_ulong t0)
5477{
5478 uint64_t r0, r1;
5479
5480 mulu64(&r0, &r1, EAX, t0);
5481 EAX = r0;
5482 EDX = r1;
5483 CC_DST = r0;
5484 CC_SRC = r1;
5485}
5486
5487void helper_imulq_EAX_T0(target_ulong t0)
5488{
5489 uint64_t r0, r1;
5490
5491 muls64(&r0, &r1, EAX, t0);
5492 EAX = r0;
5493 EDX = r1;
5494 CC_DST = r0;
5495 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5496}
5497
5498target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5499{
5500 uint64_t r0, r1;
5501
5502 muls64(&r0, &r1, t0, t1);
5503 CC_DST = r0;
5504 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5505 return r0;
5506}
5507
5508void helper_divq_EAX(target_ulong t0)
5509{
5510 uint64_t r0, r1;
5511 if (t0 == 0) {
5512 raise_exception(EXCP00_DIVZ);
5513 }
5514 r0 = EAX;
5515 r1 = EDX;
5516 if (div64(&r0, &r1, t0))
5517 raise_exception(EXCP00_DIVZ);
5518 EAX = r0;
5519 EDX = r1;
5520}
5521
5522void helper_idivq_EAX(target_ulong t0)
5523{
5524 uint64_t r0, r1;
5525 if (t0 == 0) {
5526 raise_exception(EXCP00_DIVZ);
5527 }
5528 r0 = EAX;
5529 r1 = EDX;
5530 if (idiv64(&r0, &r1, t0))
5531 raise_exception(EXCP00_DIVZ);
5532 EAX = r0;
5533 EDX = r1;
5534}
5535#endif
5536
5537static void do_hlt(void)
5538{
5539 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5540 env->halted = 1;
5541 env->exception_index = EXCP_HLT;
5542 cpu_loop_exit();
5543}
5544
5545void helper_hlt(int next_eip_addend)
5546{
5547 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5548 EIP += next_eip_addend;
5549
5550 do_hlt();
5551}
5552
5553void helper_monitor(target_ulong ptr)
5554{
5555#ifdef VBOX
5556 if ((uint32_t)ECX > 1)
5557 raise_exception(EXCP0D_GPF);
5558#else /* !VBOX */
5559 if ((uint32_t)ECX != 0)
5560 raise_exception(EXCP0D_GPF);
5561#endif /* !VBOX */
5562 /* XXX: store address ? */
5563 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5564}
5565
5566void helper_mwait(int next_eip_addend)
5567{
5568 if ((uint32_t)ECX != 0)
5569 raise_exception(EXCP0D_GPF);
5570#ifdef VBOX
5571 helper_hlt(next_eip_addend);
5572#else /* !VBOX */
5573 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5574 EIP += next_eip_addend;
5575
5576 /* XXX: not complete but not completely erroneous */
5577 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5578 /* more than one CPU: do not sleep because another CPU may
5579 wake this one */
5580 } else {
5581 do_hlt();
5582 }
5583#endif /* !VBOX */
5584}
5585
5586void helper_debug(void)
5587{
5588 env->exception_index = EXCP_DEBUG;
5589 cpu_loop_exit();
5590}
5591
5592void helper_reset_rf(void)
5593{
5594 env->eflags &= ~RF_MASK;
5595}
5596
5597void helper_raise_interrupt(int intno, int next_eip_addend)
5598{
5599 raise_interrupt(intno, 1, 0, next_eip_addend);
5600}
5601
5602void helper_raise_exception(int exception_index)
5603{
5604 raise_exception(exception_index);
5605}
5606
5607void helper_cli(void)
5608{
5609 env->eflags &= ~IF_MASK;
5610}
5611
5612void helper_sti(void)
5613{
5614 env->eflags |= IF_MASK;
5615}
5616
5617#ifdef VBOX
5618void helper_cli_vme(void)
5619{
5620 env->eflags &= ~VIF_MASK;
5621}
5622
5623void helper_sti_vme(void)
5624{
5625 /* First check, then change eflags according to the AMD manual */
5626 if (env->eflags & VIP_MASK) {
5627 raise_exception(EXCP0D_GPF);
5628 }
5629 env->eflags |= VIF_MASK;
5630}
5631#endif /* VBOX */
5632
5633#if 0
5634/* vm86plus instructions */
5635void helper_cli_vm(void)
5636{
5637 env->eflags &= ~VIF_MASK;
5638}
5639
5640void helper_sti_vm(void)
5641{
5642 env->eflags |= VIF_MASK;
5643 if (env->eflags & VIP_MASK) {
5644 raise_exception(EXCP0D_GPF);
5645 }
5646}
5647#endif
5648
5649void helper_set_inhibit_irq(void)
5650{
5651 env->hflags |= HF_INHIBIT_IRQ_MASK;
5652}
5653
5654void helper_reset_inhibit_irq(void)
5655{
5656 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5657}
5658
5659void helper_boundw(target_ulong a0, int v)
5660{
5661 int low, high;
5662 low = ldsw(a0);
5663 high = ldsw(a0 + 2);
5664 v = (int16_t)v;
5665 if (v < low || v > high) {
5666 raise_exception(EXCP05_BOUND);
5667 }
5668}
5669
5670void helper_boundl(target_ulong a0, int v)
5671{
5672 int low, high;
5673 low = ldl(a0);
5674 high = ldl(a0 + 4);
5675 if (v < low || v > high) {
5676 raise_exception(EXCP05_BOUND);
5677 }
5678}
5679
5680static float approx_rsqrt(float a)
5681{
5682 return 1.0 / sqrt(a);
5683}
5684
5685static float approx_rcp(float a)
5686{
5687 return 1.0 / a;
5688}
5689
5690#if !defined(CONFIG_USER_ONLY)
5691
5692#define MMUSUFFIX _mmu
5693
5694#define SHIFT 0
5695#include "softmmu_template.h"
5696
5697#define SHIFT 1
5698#include "softmmu_template.h"
5699
5700#define SHIFT 2
5701#include "softmmu_template.h"
5702
5703#define SHIFT 3
5704#include "softmmu_template.h"
5705
5706#endif
5707
5708#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5709/* This code assumes real physical address always fit into host CPU reg,
5710 which is wrong in general, but true for our current use cases. */
5711RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5712{
5713 return remR3PhysReadS8(addr);
5714}
5715RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5716{
5717 return remR3PhysReadU8(addr);
5718}
5719void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5720{
5721 remR3PhysWriteU8(addr, val);
5722}
5723RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5724{
5725 return remR3PhysReadS16(addr);
5726}
5727RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5728{
5729 return remR3PhysReadU16(addr);
5730}
5731void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5732{
5733 remR3PhysWriteU16(addr, val);
5734}
5735RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5736{
5737 return remR3PhysReadS32(addr);
5738}
5739RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5740{
5741 return remR3PhysReadU32(addr);
5742}
5743void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5744{
5745 remR3PhysWriteU32(addr, val);
5746}
5747uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5748{
5749 return remR3PhysReadU64(addr);
5750}
5751void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5752{
5753 remR3PhysWriteU64(addr, val);
5754}
5755#endif /* VBOX */
5756
5757#if !defined(CONFIG_USER_ONLY)
5758/* try to fill the TLB and return an exception if error. If retaddr is
5759 NULL, it means that the function was called in C code (i.e. not
5760 from generated code or from helper.c) */
5761/* XXX: fix it to restore all registers */
5762void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5763{
5764 TranslationBlock *tb;
5765 int ret;
5766 uintptr_t pc;
5767 CPUX86State *saved_env;
5768
5769 /* XXX: hack to restore env in all cases, even if not called from
5770 generated code */
5771 saved_env = env;
5772 env = cpu_single_env;
5773
5774 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5775 if (ret) {
5776 if (retaddr) {
5777 /* now we have a real cpu fault */
5778 pc = (uintptr_t)retaddr;
5779 tb = tb_find_pc(pc);
5780 if (tb) {
5781 /* the PC is inside the translated code. It means that we have
5782 a virtual CPU fault */
5783 cpu_restore_state(tb, env, pc, NULL);
5784 }
5785 }
5786 raise_exception_err(env->exception_index, env->error_code);
5787 }
5788 env = saved_env;
5789}
5790#endif
5791
5792#ifdef VBOX
5793
5794/**
5795 * Correctly computes the eflags.
5796 * @returns eflags.
5797 * @param env1 CPU environment.
5798 */
5799uint32_t raw_compute_eflags(CPUX86State *env1)
5800{
5801 CPUX86State *savedenv = env;
5802 uint32_t efl;
5803 env = env1;
5804 efl = compute_eflags();
5805 env = savedenv;
5806 return efl;
5807}
5808
5809/**
5810 * Reads byte from virtual address in guest memory area.
5811 * XXX: is it working for any addresses? swapped out pages?
5812 * @returns read data byte.
5813 * @param env1 CPU environment.
5814 * @param pvAddr GC Virtual address.
5815 */
5816uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5817{
5818 CPUX86State *savedenv = env;
5819 uint8_t u8;
5820 env = env1;
5821 u8 = ldub_kernel(addr);
5822 env = savedenv;
5823 return u8;
5824}
5825
5826/**
5827 * Reads byte from virtual address in guest memory area.
5828 * XXX: is it working for any addresses? swapped out pages?
5829 * @returns read data byte.
5830 * @param env1 CPU environment.
5831 * @param pvAddr GC Virtual address.
5832 */
5833uint16_t read_word(CPUX86State *env1, target_ulong addr)
5834{
5835 CPUX86State *savedenv = env;
5836 uint16_t u16;
5837 env = env1;
5838 u16 = lduw_kernel(addr);
5839 env = savedenv;
5840 return u16;
5841}
5842
5843/**
5844 * Reads byte from virtual address in guest memory area.
5845 * XXX: is it working for any addresses? swapped out pages?
5846 * @returns read data byte.
5847 * @param env1 CPU environment.
5848 * @param pvAddr GC Virtual address.
5849 */
5850uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5851{
5852 CPUX86State *savedenv = env;
5853 uint32_t u32;
5854 env = env1;
5855 u32 = ldl_kernel(addr);
5856 env = savedenv;
5857 return u32;
5858}
5859
5860/**
5861 * Writes byte to virtual address in guest memory area.
5862 * XXX: is it working for any addresses? swapped out pages?
5863 * @returns read data byte.
5864 * @param env1 CPU environment.
5865 * @param pvAddr GC Virtual address.
5866 * @param val byte value
5867 */
5868void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5869{
5870 CPUX86State *savedenv = env;
5871 env = env1;
5872 stb(addr, val);
5873 env = savedenv;
5874}
5875
5876void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5877{
5878 CPUX86State *savedenv = env;
5879 env = env1;
5880 stw(addr, val);
5881 env = savedenv;
5882}
5883
5884void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5885{
5886 CPUX86State *savedenv = env;
5887 env = env1;
5888 stl(addr, val);
5889 env = savedenv;
5890}
5891
5892/**
5893 * Correctly loads selector into segment register with updating internal
5894 * qemu data/caches.
5895 * @param env1 CPU environment.
5896 * @param seg_reg Segment register.
5897 * @param selector Selector to load.
5898 */
5899void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5900{
5901 CPUX86State *savedenv = env;
5902#ifdef FORCE_SEGMENT_SYNC
5903 jmp_buf old_buf;
5904#endif
5905
5906 env = env1;
5907
5908 if ( env->eflags & X86_EFL_VM
5909 || !(env->cr[0] & X86_CR0_PE))
5910 {
5911 load_seg_vm(seg_reg, selector);
5912
5913 env = savedenv;
5914
5915 /* Successful sync. */
5916 Assert(env1->segs[seg_reg].newselector == 0);
5917 }
5918 else
5919 {
5920 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5921 time critical - let's not do that */
5922#ifdef FORCE_SEGMENT_SYNC
5923 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5924#endif
5925 if (setjmp(env1->jmp_env) == 0)
5926 {
5927 if (seg_reg == R_CS)
5928 {
5929 uint32_t e1, e2;
5930 e1 = e2 = 0;
5931 load_segment(&e1, &e2, selector);
5932 cpu_x86_load_seg_cache(env, R_CS, selector,
5933 get_seg_base(e1, e2),
5934 get_seg_limit(e1, e2),
5935 e2);
5936 }
5937 else
5938 helper_load_seg(seg_reg, selector);
5939 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5940 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5941
5942 env = savedenv;
5943
5944 /* Successful sync. */
5945 Assert(env1->segs[seg_reg].newselector == 0);
5946 }
5947 else
5948 {
5949 env = savedenv;
5950
5951 /* Postpone sync until the guest uses the selector. */
5952 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5953 env1->segs[seg_reg].newselector = selector;
5954 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5955 env1->exception_index = -1;
5956 env1->error_code = 0;
5957 env1->old_exception = -1;
5958 }
5959#ifdef FORCE_SEGMENT_SYNC
5960 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5961#endif
5962 }
5963
5964}
5965
5966DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5967{
5968 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5969}
5970
5971
5972int emulate_single_instr(CPUX86State *env1)
5973{
5974 TranslationBlock *tb;
5975 TranslationBlock *current;
5976 int flags;
5977 uint8_t *tc_ptr;
5978 target_ulong old_eip;
5979
5980 /* ensures env is loaded! */
5981 CPUX86State *savedenv = env;
5982 env = env1;
5983
5984 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5985
5986 current = env->current_tb;
5987 env->current_tb = NULL;
5988 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5989
5990 /*
5991 * Translate only one instruction.
5992 */
5993 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5994 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5995 env->segs[R_CS].base, flags, 0);
5996
5997 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5998
5999
6000 /* tb_link_phys: */
6001 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
6002 tb->jmp_next[0] = NULL;
6003 tb->jmp_next[1] = NULL;
6004 Assert(tb->jmp_next[0] == NULL);
6005 Assert(tb->jmp_next[1] == NULL);
6006 if (tb->tb_next_offset[0] != 0xffff)
6007 tb_reset_jump(tb, 0);
6008 if (tb->tb_next_offset[1] != 0xffff)
6009 tb_reset_jump(tb, 1);
6010
6011 /*
6012 * Execute it using emulation
6013 */
6014 old_eip = env->eip;
6015 env->current_tb = tb;
6016
6017 /*
6018 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
6019 * perhaps not a very safe hack
6020 */
6021 while (old_eip == env->eip)
6022 {
6023 tc_ptr = tb->tc_ptr;
6024
6025#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
6026 int fake_ret;
6027 tcg_qemu_tb_exec(tc_ptr, fake_ret);
6028#else
6029 tcg_qemu_tb_exec(tc_ptr);
6030#endif
6031
6032 /*
6033 * Exit once we detect an external interrupt and interrupts are enabled
6034 */
6035 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
6036 || ( (env->eflags & IF_MASK)
6037 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
6038 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
6039 )
6040 {
6041 break;
6042 }
6043 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
6044 tlb_flush(env, true);
6045 }
6046 }
6047 env->current_tb = current;
6048
6049 tb_phys_invalidate(tb, -1);
6050 tb_free(tb);
6051/*
6052 Assert(tb->tb_next_offset[0] == 0xffff);
6053 Assert(tb->tb_next_offset[1] == 0xffff);
6054 Assert(tb->tb_next[0] == 0xffff);
6055 Assert(tb->tb_next[1] == 0xffff);
6056 Assert(tb->jmp_next[0] == NULL);
6057 Assert(tb->jmp_next[1] == NULL);
6058 Assert(tb->jmp_first == NULL); */
6059
6060 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
6061
6062 /*
6063 * Execute the next instruction when we encounter instruction fusing.
6064 */
6065 if (env->hflags & HF_INHIBIT_IRQ_MASK)
6066 {
6067 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
6068 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6069 emulate_single_instr(env);
6070 }
6071
6072 env = savedenv;
6073 return 0;
6074}
6075
6076/**
6077 * Correctly loads a new ldtr selector.
6078 *
6079 * @param env1 CPU environment.
6080 * @param selector Selector to load.
6081 */
6082void sync_ldtr(CPUX86State *env1, int selector)
6083{
6084 CPUX86State *saved_env = env;
6085 if (setjmp(env1->jmp_env) == 0)
6086 {
6087 env = env1;
6088 helper_lldt(selector);
6089 env = saved_env;
6090 }
6091 else
6092 {
6093 env = saved_env;
6094#ifdef VBOX_STRICT
6095 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6096#endif
6097 }
6098}
6099
6100int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6101 uint32_t *esp_ptr, int dpl)
6102{
6103 int type, index, shift;
6104
6105 CPUX86State *savedenv = env;
6106 env = env1;
6107
6108 if (!(env->tr.flags & DESC_P_MASK))
6109 cpu_abort(env, "invalid tss");
6110 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6111 if ((type & 7) != 1)
6112 cpu_abort(env, "invalid tss type %d", type);
6113 shift = type >> 3;
6114 index = (dpl * 4 + 2) << shift;
6115 if (index + (4 << shift) - 1 > env->tr.limit)
6116 {
6117 env = savedenv;
6118 return 0;
6119 }
6120 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6121
6122 if (shift == 0) {
6123 *esp_ptr = lduw_kernel(env->tr.base + index);
6124 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6125 } else {
6126 *esp_ptr = ldl_kernel(env->tr.base + index);
6127 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6128 }
6129
6130 env = savedenv;
6131 return 1;
6132}
6133
6134//*****************************************************************************
6135// Needs to be at the bottom of the file (overriding macros)
6136
6137static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6138{
6139#ifdef USE_X86LDOUBLE
6140 CPU86_LDoubleU tmp;
6141 tmp.l.lower = *(uint64_t const *)ptr;
6142 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6143 return tmp.d;
6144#else
6145# error "Busted FPU saving/restoring!"
6146 return *(CPU86_LDouble *)ptr;
6147#endif
6148}
6149
6150static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6151{
6152#ifdef USE_X86LDOUBLE
6153 CPU86_LDoubleU tmp;
6154 tmp.d = f;
6155 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6156 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6157 *(uint16_t *)(ptr + 10) = 0;
6158 *(uint32_t *)(ptr + 12) = 0;
6159 AssertCompile(sizeof(long double) > 8);
6160#else
6161# error "Busted FPU saving/restoring!"
6162 *(CPU86_LDouble *)ptr = f;
6163#endif
6164}
6165
6166#undef stw
6167#undef stl
6168#undef stq
6169#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6170#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6171#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6172
6173//*****************************************************************************
6174void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6175{
6176 int fpus, fptag, i, nb_xmm_regs;
6177 CPU86_LDouble tmp;
6178 uint8_t *addr;
6179 int data64 = !!(env->hflags & HF_LMA_MASK);
6180
6181 if (env->cpuid_features & CPUID_FXSR)
6182 {
6183 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6184 fptag = 0;
6185 for(i = 0; i < 8; i++) {
6186 fptag |= (env->fptags[i] << i);
6187 }
6188 stw(ptr, env->fpuc);
6189 stw(ptr + 2, fpus);
6190 stw(ptr + 4, fptag ^ 0xff);
6191
6192 addr = ptr + 0x20;
6193 for(i = 0;i < 8; i++) {
6194 tmp = ST(i);
6195 helper_fstt_raw(tmp, addr);
6196 addr += 16;
6197 }
6198
6199 if (env->cr[4] & CR4_OSFXSR_MASK) {
6200 /* XXX: finish it */
6201 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6202 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6203 nb_xmm_regs = 8 << data64;
6204 addr = ptr + 0xa0;
6205 for(i = 0; i < nb_xmm_regs; i++) {
6206#if __GNUC__ < 4
6207 stq(addr, env->xmm_regs[i].XMM_Q(0));
6208 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6209#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6210 stl(addr, env->xmm_regs[i].XMM_L(0));
6211 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6212 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6213 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6214#endif
6215 addr += 16;
6216 }
6217 }
6218 }
6219 else
6220 {
6221 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6222 int fptag;
6223
6224 fp->FCW = env->fpuc;
6225 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6226 fptag = 0;
6227 for (i=7; i>=0; i--) {
6228 fptag <<= 2;
6229 if (env->fptags[i]) {
6230 fptag |= 3;
6231 } else {
6232 /* the FPU automatically computes it */
6233 }
6234 }
6235 fp->FTW = fptag;
6236
6237 for(i = 0;i < 8; i++) {
6238 tmp = ST(i);
6239 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6240 }
6241 }
6242}
6243
6244//*****************************************************************************
6245#undef lduw
6246#undef ldl
6247#undef ldq
6248#define lduw(a) *(uint16_t *)(a)
6249#define ldl(a) *(uint32_t *)(a)
6250#define ldq(a) *(uint64_t *)(a)
6251//*****************************************************************************
6252void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6253{
6254 int i, fpus, fptag, nb_xmm_regs;
6255 CPU86_LDouble tmp;
6256 uint8_t *addr;
6257 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6258
6259 if (env->cpuid_features & CPUID_FXSR)
6260 {
6261 env->fpuc = lduw(ptr);
6262 fpus = lduw(ptr + 2);
6263 fptag = lduw(ptr + 4);
6264 env->fpstt = (fpus >> 11) & 7;
6265 env->fpus = fpus & ~0x3800;
6266 fptag ^= 0xff;
6267 for(i = 0;i < 8; i++) {
6268 env->fptags[i] = ((fptag >> i) & 1);
6269 }
6270
6271 addr = ptr + 0x20;
6272 for(i = 0;i < 8; i++) {
6273 tmp = helper_fldt_raw(addr);
6274 ST(i) = tmp;
6275 addr += 16;
6276 }
6277
6278 if (env->cr[4] & CR4_OSFXSR_MASK) {
6279 /* XXX: finish it, endianness */
6280 env->mxcsr = ldl(ptr + 0x18);
6281 //ldl(ptr + 0x1c);
6282 nb_xmm_regs = 8 << data64;
6283 addr = ptr + 0xa0;
6284 for(i = 0; i < nb_xmm_regs; i++) {
6285#if HC_ARCH_BITS == 32
6286 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6287 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6288 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6289 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6290 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6291#else
6292 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6293 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6294#endif
6295 addr += 16;
6296 }
6297 }
6298 }
6299 else
6300 {
6301 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6302 int fptag, j;
6303
6304 env->fpuc = fp->FCW;
6305 env->fpstt = (fp->FSW >> 11) & 7;
6306 env->fpus = fp->FSW & ~0x3800;
6307 fptag = fp->FTW;
6308 for(i = 0;i < 8; i++) {
6309 env->fptags[i] = ((fptag & 3) == 3);
6310 fptag >>= 2;
6311 }
6312 j = env->fpstt;
6313 for(i = 0;i < 8; i++) {
6314 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6315 ST(i) = tmp;
6316 }
6317 }
6318}
6319//*****************************************************************************
6320//*****************************************************************************
6321
6322#endif /* VBOX */
6323
6324/* Secure Virtual Machine helpers */
6325
6326#if defined(CONFIG_USER_ONLY)
6327
6328void helper_vmrun(int aflag, int next_eip_addend)
6329{
6330}
6331void helper_vmmcall(void)
6332{
6333}
6334void helper_vmload(int aflag)
6335{
6336}
6337void helper_vmsave(int aflag)
6338{
6339}
6340void helper_stgi(void)
6341{
6342}
6343void helper_clgi(void)
6344{
6345}
6346void helper_skinit(void)
6347{
6348}
6349void helper_invlpga(int aflag)
6350{
6351}
6352void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6353{
6354}
6355void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6356{
6357}
6358
6359void helper_svm_check_io(uint32_t port, uint32_t param,
6360 uint32_t next_eip_addend)
6361{
6362}
6363#else
6364
6365static inline void svm_save_seg(target_phys_addr_t addr,
6366 const SegmentCache *sc)
6367{
6368 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6369 sc->selector);
6370 stq_phys(addr + offsetof(struct vmcb_seg, base),
6371 sc->base);
6372 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6373 sc->limit);
6374 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6375 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6376}
6377
6378static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6379{
6380 unsigned int flags;
6381
6382 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6383 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6384 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6385 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6386 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6387}
6388
6389static inline void svm_load_seg_cache(target_phys_addr_t addr,
6390 CPUState *env, int seg_reg)
6391{
6392 SegmentCache sc1, *sc = &sc1;
6393 svm_load_seg(addr, sc);
6394 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6395 sc->base, sc->limit, sc->flags);
6396}
6397
6398void helper_vmrun(int aflag, int next_eip_addend)
6399{
6400 target_ulong addr;
6401 uint32_t event_inj;
6402 uint32_t int_ctl;
6403
6404 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6405
6406 if (aflag == 2)
6407 addr = EAX;
6408 else
6409 addr = (uint32_t)EAX;
6410
6411 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6412
6413 env->vm_vmcb = addr;
6414
6415 /* save the current CPU state in the hsave page */
6416 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6417 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6418
6419 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6420 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6421
6422 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6423 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6424 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6425 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6426 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6427 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6428
6429 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6430 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6431
6432 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6433 &env->segs[R_ES]);
6434 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6435 &env->segs[R_CS]);
6436 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6437 &env->segs[R_SS]);
6438 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6439 &env->segs[R_DS]);
6440
6441 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6442 EIP + next_eip_addend);
6443 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6444 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6445
6446 /* load the interception bitmaps so we do not need to access the
6447 vmcb in svm mode */
6448 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6449 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6450 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6451 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6452 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6453 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6454
6455 /* enable intercepts */
6456 env->hflags |= HF_SVMI_MASK;
6457
6458 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6459
6460 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6461 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6462
6463 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6464 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6465
6466 /* clear exit_info_2 so we behave like the real hardware */
6467 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6468
6469 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6470 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6471 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6472 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6473 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6474 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6475 if (int_ctl & V_INTR_MASKING_MASK) {
6476 env->v_tpr = int_ctl & V_TPR_MASK;
6477 env->hflags2 |= HF2_VINTR_MASK;
6478 if (env->eflags & IF_MASK)
6479 env->hflags2 |= HF2_HIF_MASK;
6480 }
6481
6482 cpu_load_efer(env,
6483 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6484 env->eflags = 0;
6485 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6486 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6487 CC_OP = CC_OP_EFLAGS;
6488
6489 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6490 env, R_ES);
6491 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6492 env, R_CS);
6493 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6494 env, R_SS);
6495 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6496 env, R_DS);
6497
6498 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6499 env->eip = EIP;
6500 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6501 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6502 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6503 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6504 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6505
6506 /* FIXME: guest state consistency checks */
6507
6508 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6509 case TLB_CONTROL_DO_NOTHING:
6510 break;
6511 case TLB_CONTROL_FLUSH_ALL_ASID:
6512 /* FIXME: this is not 100% correct but should work for now */
6513 tlb_flush(env, 1);
6514 break;
6515 }
6516
6517 env->hflags2 |= HF2_GIF_MASK;
6518
6519 if (int_ctl & V_IRQ_MASK) {
6520 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6521 }
6522
6523 /* maybe we need to inject an event */
6524 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6525 if (event_inj & SVM_EVTINJ_VALID) {
6526 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6527 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6528 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6529
6530 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6531 /* FIXME: need to implement valid_err */
6532 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6533 case SVM_EVTINJ_TYPE_INTR:
6534 env->exception_index = vector;
6535 env->error_code = event_inj_err;
6536 env->exception_is_int = 0;
6537 env->exception_next_eip = -1;
6538 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6539 /* XXX: is it always correct ? */
6540 do_interrupt(vector, 0, 0, 0, 1);
6541 break;
6542 case SVM_EVTINJ_TYPE_NMI:
6543 env->exception_index = EXCP02_NMI;
6544 env->error_code = event_inj_err;
6545 env->exception_is_int = 0;
6546 env->exception_next_eip = EIP;
6547 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6548 cpu_loop_exit();
6549 break;
6550 case SVM_EVTINJ_TYPE_EXEPT:
6551 env->exception_index = vector;
6552 env->error_code = event_inj_err;
6553 env->exception_is_int = 0;
6554 env->exception_next_eip = -1;
6555 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6556 cpu_loop_exit();
6557 break;
6558 case SVM_EVTINJ_TYPE_SOFT:
6559 env->exception_index = vector;
6560 env->error_code = event_inj_err;
6561 env->exception_is_int = 1;
6562 env->exception_next_eip = EIP;
6563 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6564 cpu_loop_exit();
6565 break;
6566 }
6567 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6568 }
6569}
6570
6571void helper_vmmcall(void)
6572{
6573 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6574 raise_exception(EXCP06_ILLOP);
6575}
6576
6577void helper_vmload(int aflag)
6578{
6579 target_ulong addr;
6580 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6581
6582 if (aflag == 2)
6583 addr = EAX;
6584 else
6585 addr = (uint32_t)EAX;
6586
6587 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6588 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6589 env->segs[R_FS].base);
6590
6591 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6592 env, R_FS);
6593 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6594 env, R_GS);
6595 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6596 &env->tr);
6597 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6598 &env->ldt);
6599
6600#ifdef TARGET_X86_64
6601 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6602 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6603 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6604 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6605#endif
6606 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6607 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6608 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6609 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6610}
6611
6612void helper_vmsave(int aflag)
6613{
6614 target_ulong addr;
6615 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6616
6617 if (aflag == 2)
6618 addr = EAX;
6619 else
6620 addr = (uint32_t)EAX;
6621
6622 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6623 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6624 env->segs[R_FS].base);
6625
6626 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6627 &env->segs[R_FS]);
6628 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6629 &env->segs[R_GS]);
6630 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6631 &env->tr);
6632 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6633 &env->ldt);
6634
6635#ifdef TARGET_X86_64
6636 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6637 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6638 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6639 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6640#endif
6641 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6642 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6643 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6644 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6645}
6646
6647void helper_stgi(void)
6648{
6649 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6650 env->hflags2 |= HF2_GIF_MASK;
6651}
6652
6653void helper_clgi(void)
6654{
6655 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6656 env->hflags2 &= ~HF2_GIF_MASK;
6657}
6658
6659void helper_skinit(void)
6660{
6661 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6662 /* XXX: not implemented */
6663 raise_exception(EXCP06_ILLOP);
6664}
6665
6666void helper_invlpga(int aflag)
6667{
6668 target_ulong addr;
6669 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6670
6671 if (aflag == 2)
6672 addr = EAX;
6673 else
6674 addr = (uint32_t)EAX;
6675
6676 /* XXX: could use the ASID to see if it is needed to do the
6677 flush */
6678 tlb_flush_page(env, addr);
6679}
6680
6681void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6682{
6683 if (likely(!(env->hflags & HF_SVMI_MASK)))
6684 return;
6685#ifndef VBOX
6686 switch(type) {
6687 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6688 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6689 helper_vmexit(type, param);
6690 }
6691 break;
6692 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6693 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6694 helper_vmexit(type, param);
6695 }
6696 break;
6697 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6698 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6699 helper_vmexit(type, param);
6700 }
6701 break;
6702 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6703 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6704 helper_vmexit(type, param);
6705 }
6706 break;
6707 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6708 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6709 helper_vmexit(type, param);
6710 }
6711 break;
6712 case SVM_EXIT_MSR:
6713 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6714 /* FIXME: this should be read in at vmrun (faster this way?) */
6715 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6716 uint32_t t0, t1;
6717 switch((uint32_t)ECX) {
6718 case 0 ... 0x1fff:
6719 t0 = (ECX * 2) % 8;
6720 t1 = ECX / 8;
6721 break;
6722 case 0xc0000000 ... 0xc0001fff:
6723 t0 = (8192 + ECX - 0xc0000000) * 2;
6724 t1 = (t0 / 8);
6725 t0 %= 8;
6726 break;
6727 case 0xc0010000 ... 0xc0011fff:
6728 t0 = (16384 + ECX - 0xc0010000) * 2;
6729 t1 = (t0 / 8);
6730 t0 %= 8;
6731 break;
6732 default:
6733 helper_vmexit(type, param);
6734 t0 = 0;
6735 t1 = 0;
6736 break;
6737 }
6738 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6739 helper_vmexit(type, param);
6740 }
6741 break;
6742 default:
6743 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6744 helper_vmexit(type, param);
6745 }
6746 break;
6747 }
6748#else /* VBOX */
6749 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6750#endif /* VBOX */
6751}
6752
6753void helper_svm_check_io(uint32_t port, uint32_t param,
6754 uint32_t next_eip_addend)
6755{
6756 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6757 /* FIXME: this should be read in at vmrun (faster this way?) */
6758 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6759 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6760 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6761 /* next EIP */
6762 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6763 env->eip + next_eip_addend);
6764 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6765 }
6766 }
6767}
6768
6769/* Note: currently only 32 bits of exit_code are used */
6770void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6771{
6772 uint32_t int_ctl;
6773
6774 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6775 exit_code, exit_info_1,
6776 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6777 EIP);
6778
6779 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6780 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6781 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6782 } else {
6783 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6784 }
6785
6786 /* Save the VM state in the vmcb */
6787 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6788 &env->segs[R_ES]);
6789 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6790 &env->segs[R_CS]);
6791 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6792 &env->segs[R_SS]);
6793 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6794 &env->segs[R_DS]);
6795
6796 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6797 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6798
6799 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6800 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6801
6802 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6803 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6804 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6805 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6806 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6807
6808 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6809 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6810 int_ctl |= env->v_tpr & V_TPR_MASK;
6811 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6812 int_ctl |= V_IRQ_MASK;
6813 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6814
6815 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6816 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6817 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6818 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6819 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6820 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6821 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6822
6823 /* Reload the host state from vm_hsave */
6824 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6825 env->hflags &= ~HF_SVMI_MASK;
6826 env->intercept = 0;
6827 env->intercept_exceptions = 0;
6828 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6829 env->tsc_offset = 0;
6830
6831 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6832 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6833
6834 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6835 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6836
6837 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6838 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6839 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6840 /* we need to set the efer after the crs so the hidden flags get
6841 set properly */
6842 cpu_load_efer(env,
6843 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6844 env->eflags = 0;
6845 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6846 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6847 CC_OP = CC_OP_EFLAGS;
6848
6849 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6850 env, R_ES);
6851 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6852 env, R_CS);
6853 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6854 env, R_SS);
6855 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6856 env, R_DS);
6857
6858 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6859 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6860 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6861
6862 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6863 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6864
6865 /* other setups */
6866 cpu_x86_set_cpl(env, 0);
6867 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6868 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6869
6870 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6871 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6872 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6873 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6874 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6875
6876 env->hflags2 &= ~HF2_GIF_MASK;
6877 /* FIXME: Resets the current ASID register to zero (host ASID). */
6878
6879 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6880
6881 /* Clears the TSC_OFFSET inside the processor. */
6882
6883 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6884 from the page table indicated the host's CR3. If the PDPEs contain
6885 illegal state, the processor causes a shutdown. */
6886
6887 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6888 env->cr[0] |= CR0_PE_MASK;
6889 env->eflags &= ~VM_MASK;
6890
6891 /* Disables all breakpoints in the host DR7 register. */
6892
6893 /* Checks the reloaded host state for consistency. */
6894
6895 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6896 host's code segment or non-canonical (in the case of long mode), a
6897 #GP fault is delivered inside the host.) */
6898
6899 /* remove any pending exception */
6900 env->exception_index = -1;
6901 env->error_code = 0;
6902 env->old_exception = -1;
6903
6904 cpu_loop_exit();
6905}
6906
6907#endif
6908
6909/* MMX/SSE */
6910/* XXX: optimize by storing fptt and fptags in the static cpu state */
6911void helper_enter_mmx(void)
6912{
6913 env->fpstt = 0;
6914 *(uint32_t *)(env->fptags) = 0;
6915 *(uint32_t *)(env->fptags + 4) = 0;
6916}
6917
6918void helper_emms(void)
6919{
6920 /* set to empty state */
6921 *(uint32_t *)(env->fptags) = 0x01010101;
6922 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6923}
6924
6925/* XXX: suppress */
6926void helper_movq(void *d, void *s)
6927{
6928 *(uint64_t *)d = *(uint64_t *)s;
6929}
6930
6931#define SHIFT 0
6932#include "ops_sse.h"
6933
6934#define SHIFT 1
6935#include "ops_sse.h"
6936
6937#define SHIFT 0
6938#include "helper_template.h"
6939#undef SHIFT
6940
6941#define SHIFT 1
6942#include "helper_template.h"
6943#undef SHIFT
6944
6945#define SHIFT 2
6946#include "helper_template.h"
6947#undef SHIFT
6948
6949#ifdef TARGET_X86_64
6950
6951#define SHIFT 3
6952#include "helper_template.h"
6953#undef SHIFT
6954
6955#endif
6956
6957/* bit operations */
6958target_ulong helper_bsf(target_ulong t0)
6959{
6960 int count;
6961 target_ulong res;
6962
6963 res = t0;
6964 count = 0;
6965 while ((res & 1) == 0) {
6966 count++;
6967 res >>= 1;
6968 }
6969 return count;
6970}
6971
6972target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6973{
6974 int count;
6975 target_ulong res, mask;
6976
6977 if (wordsize > 0 && t0 == 0) {
6978 return wordsize;
6979 }
6980 res = t0;
6981 count = TARGET_LONG_BITS - 1;
6982 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6983 while ((res & mask) == 0) {
6984 count--;
6985 res <<= 1;
6986 }
6987 if (wordsize > 0) {
6988 return wordsize - 1 - count;
6989 }
6990 return count;
6991}
6992
6993target_ulong helper_bsr(target_ulong t0)
6994{
6995 return helper_lzcnt(t0, 0);
6996}
6997
6998static int compute_all_eflags(void)
6999{
7000 return CC_SRC;
7001}
7002
7003static int compute_c_eflags(void)
7004{
7005 return CC_SRC & CC_C;
7006}
7007
7008uint32_t helper_cc_compute_all(int op)
7009{
7010 switch (op) {
7011 default: /* should never happen */ return 0;
7012
7013 case CC_OP_EFLAGS: return compute_all_eflags();
7014
7015 case CC_OP_MULB: return compute_all_mulb();
7016 case CC_OP_MULW: return compute_all_mulw();
7017 case CC_OP_MULL: return compute_all_mull();
7018
7019 case CC_OP_ADDB: return compute_all_addb();
7020 case CC_OP_ADDW: return compute_all_addw();
7021 case CC_OP_ADDL: return compute_all_addl();
7022
7023 case CC_OP_ADCB: return compute_all_adcb();
7024 case CC_OP_ADCW: return compute_all_adcw();
7025 case CC_OP_ADCL: return compute_all_adcl();
7026
7027 case CC_OP_SUBB: return compute_all_subb();
7028 case CC_OP_SUBW: return compute_all_subw();
7029 case CC_OP_SUBL: return compute_all_subl();
7030
7031 case CC_OP_SBBB: return compute_all_sbbb();
7032 case CC_OP_SBBW: return compute_all_sbbw();
7033 case CC_OP_SBBL: return compute_all_sbbl();
7034
7035 case CC_OP_LOGICB: return compute_all_logicb();
7036 case CC_OP_LOGICW: return compute_all_logicw();
7037 case CC_OP_LOGICL: return compute_all_logicl();
7038
7039 case CC_OP_INCB: return compute_all_incb();
7040 case CC_OP_INCW: return compute_all_incw();
7041 case CC_OP_INCL: return compute_all_incl();
7042
7043 case CC_OP_DECB: return compute_all_decb();
7044 case CC_OP_DECW: return compute_all_decw();
7045 case CC_OP_DECL: return compute_all_decl();
7046
7047 case CC_OP_SHLB: return compute_all_shlb();
7048 case CC_OP_SHLW: return compute_all_shlw();
7049 case CC_OP_SHLL: return compute_all_shll();
7050
7051 case CC_OP_SARB: return compute_all_sarb();
7052 case CC_OP_SARW: return compute_all_sarw();
7053 case CC_OP_SARL: return compute_all_sarl();
7054
7055#ifdef TARGET_X86_64
7056 case CC_OP_MULQ: return compute_all_mulq();
7057
7058 case CC_OP_ADDQ: return compute_all_addq();
7059
7060 case CC_OP_ADCQ: return compute_all_adcq();
7061
7062 case CC_OP_SUBQ: return compute_all_subq();
7063
7064 case CC_OP_SBBQ: return compute_all_sbbq();
7065
7066 case CC_OP_LOGICQ: return compute_all_logicq();
7067
7068 case CC_OP_INCQ: return compute_all_incq();
7069
7070 case CC_OP_DECQ: return compute_all_decq();
7071
7072 case CC_OP_SHLQ: return compute_all_shlq();
7073
7074 case CC_OP_SARQ: return compute_all_sarq();
7075#endif
7076 }
7077}
7078
7079uint32_t helper_cc_compute_c(int op)
7080{
7081 switch (op) {
7082 default: /* should never happen */ return 0;
7083
7084 case CC_OP_EFLAGS: return compute_c_eflags();
7085
7086 case CC_OP_MULB: return compute_c_mull();
7087 case CC_OP_MULW: return compute_c_mull();
7088 case CC_OP_MULL: return compute_c_mull();
7089
7090 case CC_OP_ADDB: return compute_c_addb();
7091 case CC_OP_ADDW: return compute_c_addw();
7092 case CC_OP_ADDL: return compute_c_addl();
7093
7094 case CC_OP_ADCB: return compute_c_adcb();
7095 case CC_OP_ADCW: return compute_c_adcw();
7096 case CC_OP_ADCL: return compute_c_adcl();
7097
7098 case CC_OP_SUBB: return compute_c_subb();
7099 case CC_OP_SUBW: return compute_c_subw();
7100 case CC_OP_SUBL: return compute_c_subl();
7101
7102 case CC_OP_SBBB: return compute_c_sbbb();
7103 case CC_OP_SBBW: return compute_c_sbbw();
7104 case CC_OP_SBBL: return compute_c_sbbl();
7105
7106 case CC_OP_LOGICB: return compute_c_logicb();
7107 case CC_OP_LOGICW: return compute_c_logicw();
7108 case CC_OP_LOGICL: return compute_c_logicl();
7109
7110 case CC_OP_INCB: return compute_c_incl();
7111 case CC_OP_INCW: return compute_c_incl();
7112 case CC_OP_INCL: return compute_c_incl();
7113
7114 case CC_OP_DECB: return compute_c_incl();
7115 case CC_OP_DECW: return compute_c_incl();
7116 case CC_OP_DECL: return compute_c_incl();
7117
7118 case CC_OP_SHLB: return compute_c_shlb();
7119 case CC_OP_SHLW: return compute_c_shlw();
7120 case CC_OP_SHLL: return compute_c_shll();
7121
7122 case CC_OP_SARB: return compute_c_sarl();
7123 case CC_OP_SARW: return compute_c_sarl();
7124 case CC_OP_SARL: return compute_c_sarl();
7125
7126#ifdef TARGET_X86_64
7127 case CC_OP_MULQ: return compute_c_mull();
7128
7129 case CC_OP_ADDQ: return compute_c_addq();
7130
7131 case CC_OP_ADCQ: return compute_c_adcq();
7132
7133 case CC_OP_SUBQ: return compute_c_subq();
7134
7135 case CC_OP_SBBQ: return compute_c_sbbq();
7136
7137 case CC_OP_LOGICQ: return compute_c_logicq();
7138
7139 case CC_OP_INCQ: return compute_c_incl();
7140
7141 case CC_OP_DECQ: return compute_c_incl();
7142
7143 case CC_OP_SHLQ: return compute_c_shlq();
7144
7145 case CC_OP_SARQ: return compute_c_sarl();
7146#endif
7147 }
7148}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette